text stringlengths 11 4.05M |
|---|
package command
import (
"fmt"
"strings"
"titan-auth/group"
"github.com/emicklei/go-restful"
"grm-service/command"
"grm-service/dbcentral/etcd"
"grm-service/dbcentral/pg"
"grm-service/service"
. "titan-auth/dbcentral/etcd"
. "titan-auth/dbcentral/pg"
"titan-auth/user"
)
type TitanAuthCommand struct {
command.Meta
AuthDB string
DBuser string
DBpwd string
EtcdEndpoint string
}
func (c *TitanAuthCommand) Help() string {
helpText := `
Usage: titan-grm titan-auth [registry_address] [server_address] [server_namespace] [data_dir] [config_dir]
Example: titan-grm titan-auth -registry_address consul:8500 -server_address :8080 -server_namespace titangrm
-data_dir /opt/titangrm/data -config_dir /opt/titangrm/config
`
return strings.TrimSpace(helpText)
}
func (c *TitanAuthCommand) Synopsis() string {
return "Titan Auth Service"
}
func (c *TitanAuthCommand) Run(args []string) int {
flags := c.Meta.FlagSet(service.TitanAuthService, command.FlagSetDefault)
flags.StringVar(&c.AuthDB, "authdb", "192.168.1.149:31771", "postgresql server address and port")
flags.StringVar(&c.DBuser, "dbuser", "postgres", "postgresql user")
flags.StringVar(&c.DBpwd, "dbpwd", "otitan123", "postgresql user password")
flags.StringVar(&c.EtcdEndpoint, "etcd endpoint", "192.168.1.149:31686", "etcd endpoint")
if err := flags.Parse(args); err != nil {
c.Ui.Error(c.Help())
return 1
}
service := service.NewService(service.TitanAuthService, "v2")
service.Init(&c.Meta)
// 初始化数据库
authDB, err := pg.ConnectAuthDB(c.AuthDB, c.DBuser, c.DBpwd)
if err != nil {
fmt.Println("Faile to connect auth db:", err, c.AuthDB, c.DBuser, c.DBpwd)
return 1
}
defer authDB.DisConnect()
// 初始化etcd连接
dynamic := DynamicDB{etcd.DynamicEtcd{Endpoints: strings.Split(c.EtcdEndpoint, ";")}}
if err := dynamic.Connect(); err != nil {
fmt.Println("Faile to connect etcd v3:", err)
return 1
}
defer dynamic.DisConnect()
// TODO: 初始化系统auth信息
// 添加服务路由
wc := restful.NewContainer()
userSvc := user.UserSvc{AuthDB: &AuthDB{authDB}, DynamicDB: &dynamic}
wc.Add(userSvc.WebService())
groupSvc := group.GroupSvc{
AuthDB: &AuthDB{authDB},
DynamicDB: &dynamic,
}
wc.Add(groupSvc.WebService())
service.Handle("/", wc)
service.Run()
return 0
}
|
package core
func NewException(exception Type) *Type {
return &Type{Exception: &exception}
}
func NewStringException(message string) *Type {
return &Type{Exception: &Type{String: &message}}
}
func (node *Type) IsException() bool {
return node.Exception != nil
}
func (node *Type) AsException() *Type {
return node.Exception
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build linux
package cgroup
import (
"fmt"
"regexp"
"runtime"
"strconv"
"sync"
"syscall"
"testing"
"github.com/stretchr/testify/require"
)
func checkKernelVersionNewerThan(t *testing.T, major, minor int) bool {
u := syscall.Utsname{}
err := syscall.Uname(&u)
require.NoError(t, err)
releaseBs := make([]byte, 0, len(u.Release))
for _, v := range u.Release {
if v == 0 {
break
}
releaseBs = append(releaseBs, byte(v))
}
releaseStr := string(releaseBs)
t.Log("kernel release string:", releaseStr)
versionInfoRE := regexp.MustCompile(`[0-9]+\.[0-9]+\.[0-9]+`)
kernelVerion := versionInfoRE.FindAllString(releaseStr, 1)
require.Equal(t, 1, len(kernelVerion), fmt.Sprintf("release str is %s", releaseStr))
kernelVersionPartRE := regexp.MustCompile(`[0-9]+`)
kernelVersionParts := kernelVersionPartRE.FindAllString(kernelVerion[0], -1)
require.Equal(t, 3, len(kernelVersionParts), fmt.Sprintf("kernel verion str is %s", kernelVerion[0]))
t.Logf("parsed kernel version parts: major %s, minor %s, patch %s",
kernelVersionParts[0], kernelVersionParts[1], kernelVersionParts[2])
mustConvInt := func(s string) int {
i, err := strconv.Atoi(s)
require.NoError(t, err, s)
return i
}
versionNewerThanFlag := false
if mustConvInt(kernelVersionParts[0]) > major {
versionNewerThanFlag = true
} else {
if mustConvInt(kernelVersionParts[0]) == major && mustConvInt(kernelVersionParts[1]) > minor {
versionNewerThanFlag = true
}
}
return versionNewerThanFlag
}
func TestGetCgroupCPU(t *testing.T) {
exit := make(chan struct{})
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-exit:
return
default:
runtime.Gosched()
}
}
}()
}
cpu, err := GetCgroupCPU()
if err == errNoCPUControllerDetected {
// for more information, please refer https://github.com/pingcap/tidb/pull/41347
if checkKernelVersionNewerThan(t, 4, 7) {
require.NoError(t, err, "linux version > v4.7 and err still happens")
} else {
t.Logf("the error '%s' is ignored because the kernel is too old", err)
}
} else {
require.NoError(t, err)
require.NotZero(t, cpu.Period)
require.Less(t, int64(1), cpu.Period)
}
close(exit)
wg.Wait()
}
|
package main
import (
"fmt"
)
func divide(dividend int, divisor int) int {
if dividend == divisor {
return 1
}
if divisor == 1 {
return dividend
}
sgn := 1
if (dividend > 0 && divisor < 0) || (dividend < 0 && divisor > 0) {
sgn = -1
}
if dividend < 0 {
dividend = -dividend
}
if divisor < 0 {
divisor = -divisor
}
if dividend < divisor {
return 0
}
ans := 0
temp := 0
for i := uint32(31); ; i-- {
if temp+(divisor<<i) <= dividend {
temp += divisor << i
ans = ans | (1 << i)
}
if i == 0 {
break
}
}
if sgn == -1 {
ans = -ans
}
if ans >= 1<<31 {
ans = (1 << 31) - 1
}
return ans
}
func main() {
tests := [][3]int{{1, 2, 0}, {10, 3, 3}, {5, 3, 1}, {0, -5, 0}, {-2147483648, -1, 2147483647}, {-2147483647, 2, -2147483647 / 2}}
for _, test := range tests {
dividend, divisor, expAns := test[0], test[1], test[2]
ans := divide(dividend, divisor)
if ans != expAns {
fmt.Printf("test failed: %d / %d should have been %d. Was %d.\n", dividend, divisor, expAns, ans)
}
}
}
|
package services
import (
"github.com/exproletariy/pip-services3-containers-examples/app-process-container-example-go/logic"
"net/http"
crefer "github.com/pip-services3-go/pip-services3-commons-go/refer"
rpc "github.com/pip-services3-go/pip-services3-rpc-go/services"
)
type AppExampleRestService struct {
*rpc.RestService
controller *logic.AppExampleController
}
func NewAppExampleRestService() *AppExampleRestService {
c := AppExampleRestService{}
c.RestService = rpc.InheritRestService(&c)
c.BaseRoute = "/example"
c.DependencyResolver.Put("controller", crefer.NewDescriptor("app-example", "controller", "*", "*", "1.0"))
return &c
}
func (c *AppExampleRestService) SetReferences(references crefer.IReferences) {
c.RestService.SetReferences(references)
depRes, depErr := c.DependencyResolver.GetOneRequired("controller")
if depErr == nil && depRes != nil {
c.controller = depRes.(*logic.AppExampleController)
}
}
func (c *AppExampleRestService) greeting(res http.ResponseWriter, req *http.Request) {
name := req.URL.Query().Get("name")
result, err := c.controller.Greeting(name)
c.SendResult(res, req, result, err)
}
func (c *AppExampleRestService) Register() {
c.RegisterRoute("get", "/greeting", nil, c.greeting)
}
|
package main
import (
"go/token"
"strings"
"sync"
)
func CheckGoDocs(lc <-chan *Lexeme, outc chan<- *CheckedLexeme) {
var wg sync.WaitGroup
mux := LexemeMux(lc, 2)
wg.Add(2)
go func() {
ch := Filter(mux[0], DeclRootCommentFilter)
checkGoDoc(ch, outc)
wg.Done()
}()
go func() {
ch := Filter(Filter(mux[1], DeclTypeFilter), DeclIdentCommentFilter)
checkGoDoc(ch, outc)
wg.Done()
}()
wg.Wait()
}
func checkGoDoc(tch <-chan *Lexeme, outc chan<- *CheckedLexeme) {
for {
ll := []*Lexeme{}
for {
l, ok := <-tch
if !ok {
return
}
if l.tok == token.ILLEGAL {
break
}
ll = append(ll, l)
}
godoc := godocBlock(ll)
// does the comment line up with the next line?
after := afterGoDoc(ll)
if after.pos.Column != godoc[0].pos.Column {
continue
}
// is the comment on the line immediately before the code?
if after.pos.Line != godoc[len(godoc)-1].pos.Line+1 {
continue
}
// does the comment have a token for documentation?
fields := strings.Fields(godoc[0].lit)
if len(fields) < 2 {
continue
}
// is the comment a go-swagger comment? If so ignore.
// len("swagger") == 7
if len(fields[1]) >= 7 && fields[1][:7] == "swagger" {
continue
}
// check package
if ll[len(ll)-2].tok == token.PACKAGE {
if ll[len(ll)-1].lit == "main" {
// main exemption for describing command line utilities
continue
}
hasPkg := fields[1] == "Package"
hasName := fields[2] == ll[len(ll)-1].lit
switch {
case !hasPkg && !hasName:
cw := []CheckedWord{{fields[1], "// Package " + ll[len(ll)-1].lit}}
cl := &CheckedLexeme{godoc[0], "godoc-export", cw}
outc <- cl
case !hasPkg:
cw := []CheckedWord{{fields[1], "Package"}}
cl := &CheckedLexeme{godoc[0], "godoc-export", cw}
outc <- cl
case !hasName:
cw := []CheckedWord{{fields[2], ll[len(ll)-1].lit}}
cl := &CheckedLexeme{godoc[0], "godoc-export", cw}
outc <- cl
}
continue
}
// what token should the documentation match?
cmplex := ll[len(ll)-1]
if ll[len(ll)-2].tok == token.IDENT {
cmplex = ll[len(ll)-2]
}
if (fields[1] == "A" || fields[1] == "An") && fields[2] == cmplex.lit {
continue
}
if fields[1] == cmplex.lit {
continue
}
// bad godoc
label := "godoc-local"
if strings.ToUpper(cmplex.lit)[0] == cmplex.lit[0] {
label = "godoc-export"
}
cw := []CheckedWord{{fields[1], cmplex.lit}}
cl := &CheckedLexeme{godoc[0], label, cw}
outc <- cl
}
}
// godocBlock gets the godoc comment block from a comment prefixed token string
func godocBlock(ll []*Lexeme) (comm []*Lexeme) {
wantLine := 0
for _, l := range ll {
if l.tok != token.COMMENT {
break
}
if l.pos.Line != wantLine {
comm = []*Lexeme{}
}
wantLine = l.pos.Line + 1
comm = append(comm, l)
}
return comm
}
// afterGoDoc gets the first token following the comments
func afterGoDoc(ll []*Lexeme) *Lexeme {
for _, l := range ll {
if l.tok != token.COMMENT {
return l
}
}
return nil
}
|
package resp
import (
"github.com/EverythingMe/meduza/client"
"github.com/EverythingMe/meduza/errors"
"github.com/EverythingMe/meduza/protocol"
"github.com/EverythingMe/meduza/transport"
"github.com/dvirsky/go-pylog/logging"
redigo "github.com/garyburd/redigo/redis"
)
// Client wraps a connection to the server and the protocol used
type Client struct {
conn redigo.Conn
proto protocol.Protocol
}
// NewClientConn creates a client from an existing redis connection
func NewClient(proto protocol.Protocol, conn redigo.Conn) *Client {
return &Client{
conn: conn,
proto: proto,
}
}
// Dialer creates client objects from a
type Dialer struct {
pool *redigo.Pool
Proto protocol.Protocol
}
func (d Dialer) Dial() (client.Client, error) {
c := NewClient(d.Proto, d.pool.Get())
return c, nil
}
func NewDialer(proto protocol.Protocol, addr string) Dialer {
return Dialer{
pool: redigo.NewPool(func() (redigo.Conn, error) {
return redigo.Dial("tcp", addr)
}, 4),
Proto: proto,
}
}
// Do sends a query to the server and receives its response
// Returns an error if we could not send the message
func (c *Client) Do(query interface{}) (interface{}, error) {
// TODO: make sure it's a real query message - the proto will allow responses as well, and we don't want that
msg, err := c.proto.WriteMessage(query)
if err != nil {
return nil, errors.NewError("Could not send query: %s", err)
}
if msg, err = c.roundtrip(msg); err != nil {
logging.Error("Could not roundtrip: %s", errors.Sprint(err))
return nil, err
}
return c.proto.ReadMessage(msg)
}
func (c *Client) roundtrip(msg transport.Message) (transport.Message, error) {
var ret transport.Message
vals, err := redigo.Values(c.conn.Do(string(msg.Type), msg.Body))
if err != nil {
return ret, errors.NewError("Error receiving message: %s", err)
}
if len(vals) != 2 {
return ret, errors.NewError("Invalid response read, expected 2 elements, got %d", len(vals))
}
msgType, ok := vals[0].([]byte)
if !ok {
return ret, errors.NewError("Invalid response: %v", vals[0])
}
data, ok := vals[1].([]byte)
if !ok {
return ret, errors.NewError("Invalid response data: %v", vals[1])
}
ret = transport.Message{
Type: transport.MessageType(msgType),
Body: data,
}
return ret, nil
}
|
package socket
// Common is the normal data for messages passed on the console socket.
type Common struct {
// Type of message being passed
Type string `json:"type"`
}
// TerminalRequest is the normal data for messages passing a pseudoterminal master.
type TerminalRequest struct {
Common
// Container ID for the container whose pseudoterminal master is being set.
Container string `json:"container"`
}
// Response is the normal data for response messages.
type Response struct {
Common
// Message is a phrase describing the response.
Message string `json:"message,omitempty"`
}
|
package users
import (
"crypto/aes"
"crypto/cipher"
"crypto/md5"
"crypto/rand"
"encoding/hex"
"fmt"
"io"
"log"
)
type User struct {
Name string `json:"name"`
Id string `json:"id"`
City string `json:"city"`
Age int `json:"age"`
Password string `json:"password"`
}
/*
We need to implement hashing and salting
(so that even the encrypted passwords cannot be reverse-engineered easily through rainbow tables or brute force attacks)\
For the purpose of this project, simply encrypting the passwords using a cryptographic algorithm will suffice,
Whereas in practice we would use hashing as well as salting with more secure algorithms
*/
//secret-key
const passphrase string = "srikanth.balakrishna511@gmail.com"
func createHash(key string) string {
hasher := md5.New()
hasher.Write([]byte(key))
return hex.EncodeToString(hasher.Sum(nil))
}
func (user *User) EncryptPassword() {
block, _ := aes.NewCipher([]byte(createHash(passphrase)))
gcm, err := cipher.NewGCM(block)
if err != nil {
log.Fatal(err)
}
nonce := make([]byte, gcm.NonceSize())
if _, err = io.ReadFull(rand.Reader, nonce); err != nil {
log.Fatal(err)
}
user.Password = string(gcm.Seal(nonce, nonce, []byte(user.Password), nil))
fmt.Println("User encrypted password:" + user.Password)
}
|
package csnotes
import (
"fmt"
"testing"
)
func Test_entryNodeOfLoop(t *testing.T) {
listNode := ListNode{1, nil}
listNode2 := ListNode{2, nil}
listNode3 := ListNode{3, nil}
listNode4 := ListNode{4, nil}
listNode5 := ListNode{5, nil}
listNode6 := ListNode{6, nil}
listNode7 := ListNode{7, nil}
listNode8 := ListNode{8, nil}
listNode.Next = &listNode2
listNode2.Next = &listNode3
listNode3.Next = &listNode4
listNode4.Next = &listNode5
listNode5.Next = &listNode6
listNode6.Next = &listNode7
listNode7.Next = &listNode8
listNode8.Next = &listNode3
res := entryNodeOfLoop(&listNode)
if res == nil {
fmt.Println("no loop")
} else {
fmt.Println(res.Val)
}
}
func Test_deleteNode(t *testing.T) {
listNode := ListNode{1, nil}
listNode2 := ListNode{2, nil}
listNode3 := ListNode{3, nil}
listNode4 := ListNode{4, nil}
listNode.Next = &listNode2
listNode2.Next = &listNode3
listNode3.Next = &listNode4
listNode5 := ListNode{1, nil}
// head
deleteNode(&listNode, &listNode5)
// end
deleteNode(&listNode, &listNode4)
// next
deleteNode(&listNode, &listNode3)
}
func Test_print(t *testing.T) {
print(2)
}
func Test_numOf1(t *testing.T) {
cnt := numOf1(0x11)
if cnt != 2 {
t.Error(cnt)
}
}
func Test_rectCover(t *testing.T) {
res := rectCover(3)
res2 := rectCover(5)
fmt.Println(res, res2)
}
func Test_duplicate(t *testing.T) {
nums := []int{2, 3, 1, 0, 2, 5}
res, ok := duplicate(nums)
fmt.Println(res, ok)
}
func Test_find(t *testing.T) {
nums := [][]int{
{1, 4, 7, 11, 15},
{2, 5, 8, 12, 19},
{3, 6, 9, 16, 22},
{10, 13, 14, 17, 24},
{18, 21, 23, 26, 30},
}
res := find(18, nums)
res1 := find(17, nums)
fmt.Println(res, res1)
}
func Test_printList(t *testing.T) {
listNode := ListNode{1, nil}
listNode2 := ListNode{2, nil}
listNode3 := ListNode{3, nil}
listNode.Next = &listNode2
listNode2.Next = &listNode3
node := &listNode
for node != nil {
fmt.Print(node.Val)
fmt.Print("\t")
node = node.Next
}
fmt.Println()
printList(&listNode)
fmt.Println()
}
func Test_printList2(t *testing.T) {
listNode := ListNode{1, nil}
listNode2 := ListNode{2, nil}
listNode3 := ListNode{3, nil}
listNode.Next = &listNode2
listNode2.Next = &listNode3
fmt.Println("before: ")
node := &listNode
for node != nil {
fmt.Print(node.Val)
fmt.Print("\t")
node = node.Next
}
fmt.Println()
res := printList2(&listNode)
fmt.Println("after: ")
node = res
for node != nil {
fmt.Print(node.Val)
fmt.Print("\t")
node = node.Next
}
fmt.Println()
}
func Test_rebuildBinaryTree(t *testing.T) {
preOrder := []int{3, 9, 20, 15, 7}
inOrder := []int{9, 3, 15, 20, 7}
rebuildBinaryTree(preOrder, inOrder)
}
func Test_inOrderNextNode(t *testing.T) {
// 6
// 2 7
// 1 4
// 3 5
root := &BinaryTree2{Val: 6}
node1 := &BinaryTree2{Val: 2}
node2 := &BinaryTree2{Val: 7}
node3 := &BinaryTree2{Val: 1}
node4 := &BinaryTree2{Val: 4}
node5 := &BinaryTree2{Val: 3}
node6 := &BinaryTree2{Val: 5}
root.Left = node1
root.Right = node2
node2.Next = root
node1.Next = root
node1.Left = node3
node1.Right = node4
node3.Next = node1
node4.Next = node1
node4.Left = node5
node4.Right = node6
node5.Next = node4
node6.Next = node4
res := inOrderNextNode(node1)
res2 := inOrderNextNode(node6)
fmt.Println(res, res2)
}
func Test_hasPath(t *testing.T) {
chars := [][]byte{
{'a', 'b', 't', 'g'},
{'c', 'f', 'c', 's'},
{'j', 'd', 'e', 'h'},
}
res := hasPath(chars, "bfce")
res2 := hasPath(chars, "dddd")
fmt.Println(res, res2)
}
|
package ewallet
import (
"os"
goxendit "github.com/xendit/xendit-go"
"github.com/xendit/xendit-go/ewallet"
"github.com/imrenagi/go-payment/invoice"
)
// NewDana create xendit payment request for Dana
func NewDana(inv *invoice.Invoice) (*ewallet.CreatePaymentParams, error) {
return newBuilder(inv).
SetPaymentMethod(goxendit.EWalletTypeDANA).
SetCallback(os.Getenv("DANA_LEGACY_CALLBACK_URL")).
SetRedirect(os.Getenv("DANA_LEGACY_REDIRECT_URL")).
Build()
} |
/*
@Time : 2019/5/4 13:48
@Author : yanKoo
@File : redis_data_sync
@Software: GoLand
@Description:
*/
package server
import (
pb "api/talk_cloud"
"cache"
"database/sql"
"db"
"log"
tg "pkg/group"
tgc "pkg/group_cache"
tu "pkg/user"
tuc "pkg/user_cache"
"sync"
)
type ConcurrentEngine struct {
Scheduler Scheduler
WorkerCount int
}
type Scheduler interface {
Submit(int32)
ConfigureMasterWorkerChan(chan int32)
}
func (e ConcurrentEngine) Run() {
in := make(chan int32)
var wg sync.WaitGroup
e.Scheduler.ConfigureMasterWorkerChan(in)
for i := 0; i < e.WorkerCount; i++ {
createWorker(in, &wg)
}
// 查找所有的用户id
uIds, _ := tu.SelectAllUserId()
for _, v := range uIds {
log.Printf("# uid %d", v)
wg.Add(1)
go func() {e.Scheduler.Submit(v)}()
}
wg.Wait()
log.Printf("**********************redis data sync done*****************************")
}
func createWorker(in chan int32, wg *sync.WaitGroup) {
go func() {
for {
uId := <- in
err := UserData(uId)
if err != nil {
continue
}
err = GroupData(uId)
if err != nil {
continue
}
wg.Done()
}
}()
}
func DataInit() {
uIds, _ := tu.SelectAllUserId()
for _, v := range uIds {
_ = UserData(v)
_ = GroupData(v)
}
}
func UserData(uId int32) error {
// 根据用户id去获取每一位的信息,放进缓存
res, err := tu.SelectUserByKey(int(uId))
if err != nil && err != sql.ErrNoRows {
log.Printf("UserData SelectUserByKey error : %s", err)
return err
}
userInfo := &pb.Member{
Id: int32(res.Id),
IMei: res.IMei,
UserName: res.UserName,
NickName: res.NickName,
UserType: int32(res.UserType),
LockGroupId: int32(res.LockGroupId),
Online: tuc.USER_OFFLINE, // 加载数据默认全部离线
}
log.Println("Add User Info into cache start")
if err := tuc.AddUserDataInCache(userInfo, cache.GetRedisClient()); err != nil {
log.Println("Add user information to cache with error: ", err)
}
log.Println("Add User Info into cache done")
return nil
}
func GroupData(uid int32) error {
gl, _, err := tg.GetGroupListFromDB(int32(uid), db.DBHandler)
if err != nil {
return err
}
log.Println("GroupData GetGroupListFromDB start update redis")
// 新增到缓存 更新两个地方,首先,每个组的信息要更新,就是group data,记录了群组的id和名字
if err := tgc.AddGroupInCache(gl, cache.GetRedisClient()); err != nil {
return err
}
// 其次更新一个userSet 就是一个组里有哪些用户
if err := tuc.AddUserInGroupToCache(gl, cache.GetRedisClient()); err != nil {
return err
}
// 每个用户的信息
for _, g := range gl.GroupList {
for _, u := range g.UsrList {
if err := tuc.AddUserDataInCache(&pb.Member{
Id: u.Uid,
IMei: u.Imei,
NickName: u.Name,
Online: u.Online,
LockGroupId: u.LockGroupId,
}, cache.GetRedisClient()); err != nil {
log.Println("Add user information to cache with error: ", err)
}
}
}
// 每一个群组拥有的成员
for _, v := range gl.GroupList {
if err := tgc.AddGroupCache(v.UsrList, v, cache.GetRedisClient()); err != nil {
return err
}
}
return nil
}
|
package main
type Article struct {
ID int `json:"id" validate:"min=1"`
Title string `json:"title" validate:"nonzero"`
Content string `json"content" validate:"nonzero"`
}
var articleList = []Article{
Article{ID: 1, Title: "Article 1", Content: "Article 1 Body"},
Article{ID: 2, Title: "Article 2", Content: "Article 2 Body"},
}
func getAllArticles() []Article {
return articleList
} |
package cmd
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"os"
"strings"
awsecs "github.com/aws/aws-sdk-go/service/ecs"
"github.com/oberd/ecsy/ecs"
"github.com/spf13/cobra"
)
// envCmd represents the env command
var envCmd = &cobra.Command{
Use: "env [command]",
Short: "Used to manage environment variables of service task definitions",
Long: `Getting and setting, then deploying new environment variables in ECS services
can be a little annoying due to the cumbersome web ui. Do it in one fell swoop with
ecsy env set or ecsy env edit`,
}
// getCmd represents the get command
var getCmd = &cobra.Command{
Use: "get [cluster-name] [service-name]",
Short: "List environment variables for an ECS service's deployed task definition",
Long: `List environment variables for an ECS service's deployed task definition`,
Run: func(cmd *cobra.Command, args []string) {
container, err := ecs.GetDeployedEssentialContainer(args[0], args[1])
failOnError(err, "")
fmt.Print(ecs.KeyPairsToString(container.Environment))
},
PreRunE: Validate2ArgumentsCount,
}
// setCmd sets a variable
var setCmd = &cobra.Command{
Use: "set [cluster-name] [service-name] [env_var_name] [env_var_value]",
Short: "Set an environment variable for an ECS service's deployed task definition",
Long: `Set an environment variable for an ECS service's deployed task definition`,
Run: func(cmd *cobra.Command, args []string) {
container, err := ecs.GetDeployedEssentialContainer(args[0], args[1])
failOnError(err, "")
varName := args[2]
varValue := args[3]
var found bool
for _, val := range container.Environment {
if *val.Name == varName {
val.SetValue(varValue)
found = true
}
}
if !found {
container.Environment = append(container.Environment, &awsecs.KeyValuePair{Name: aws.String(varName), Value: aws.String(varValue)})
}
deployEnv(args[0], args[1], container.Environment)
},
PreRunE: Validate4ArgumentsCount,
}
// editCmd allows you to edit env vars of an active service's
// task definition
var editCmd = &cobra.Command{
Use: "edit [cluster-name] [service-name]",
Short: "Interactively define environment for a task, and deploy it to the service",
Long: `Interactively define environment for a task, and deploy it to the service`,
Run: func(cmd *cobra.Command, args []string) {
cluster := args[0]
service := args[1]
primary, err := ecs.GetDeployedEssentialContainer(cluster, service)
if err != nil {
fmt.Printf("Error finding essential container:\n%v\n", err)
os.Exit(1)
}
original := ecs.KeyPairsToString(primary.Environment)
original = strings.TrimSpace(original)
edited, err := EditStringBlock(original)
edited = strings.TrimSpace(edited)
if err != nil {
fmt.Printf("Error editing environment: %v\n", err)
os.Exit(1)
}
if original == edited {
fmt.Println("No changes made to environment. Nothing to do!")
} else {
newKeyPairs, err := ecs.StringToKeyPairs(edited)
if err != nil {
fmt.Printf("Problem parsing new environment: %v\n", err)
os.Exit(1)
}
confirm := `
Do you want to save the following environment to the task?
%s
This will also update service "%s" in "%s" to a new task definition.`
if !AskForConfirmation(fmt.Sprintf(confirm, ecs.KeyPairsToString(newKeyPairs), cluster, service)) {
return
}
deployEnv(args[0], args[1], newKeyPairs)
}
},
PreRunE: Validate2ArgumentsCount,
}
func init() {
RootCmd.AddCommand(envCmd)
envCmd.AddCommand(getCmd)
envCmd.AddCommand(editCmd)
envCmd.AddCommand(setCmd)
}
func deployEnv(cluster, service string, newKeyPairs []*awsecs.KeyValuePair) {
fmt.Println("Getting current task definition...")
task, err := ecs.GetCurrentTaskDefinition(cluster, service)
if err != nil {
fmt.Printf("%v\n", err)
os.Exit(1)
}
fmt.Printf("Creating new task based on %s:%d, with new environment\n", *task.Family, *task.Revision)
newTask, err := ecs.CreateNewTaskWithEnvironment(task, newKeyPairs)
if err != nil {
fmt.Printf("Problem creating new task: %v\n", err)
os.Exit(1)
}
serviceStruct, err := ecs.DeployTaskToService(cluster, service, newTask)
if err != nil {
fmt.Printf("Problem deploying task: %v\n", err)
os.Exit(1)
}
fmt.Println("\nSuccessfully deployed new task definition")
fmt.Println("=========================================")
fmt.Printf("Cluster: %s\n", cluster)
fmt.Printf("Service: %s\n", service)
fmt.Printf("Task Definition: %s:%d\n", *newTask.Family, *newTask.Revision)
fmt.Printf("Desired Count: %d\n", *serviceStruct.DesiredCount)
fmt.Printf("To view deployment status, you can visit:\n%s\n", ecs.BuildConsoleURLForService(cluster, service)+"/deployments")
}
|
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stanza
import (
"fmt"
"github.com/open-telemetry/opentelemetry-log-collection/entry"
"go.opentelemetry.io/collector/consumer/pdata"
)
// Convert a stanza-style entry to a pdata.Logs
func Convert(obsLog *entry.Entry) pdata.Logs {
out := pdata.NewLogs()
logs := out.ResourceLogs()
logs.Resize(1)
rls := logs.At(0)
resource := rls.Resource()
if len(obsLog.Resource) > 0 {
resourceAtts := resource.Attributes()
for k, v := range obsLog.Resource {
resourceAtts.InsertString(k, v)
}
}
rls.InstrumentationLibraryLogs().Resize(1)
ills := rls.InstrumentationLibraryLogs().At(0)
lr := pdata.NewLogRecord()
lr.SetTimestamp(pdata.TimestampFromTime(obsLog.Timestamp))
sevText, sevNum := convertSeverity(obsLog.Severity)
lr.SetSeverityText(sevText)
lr.SetSeverityNumber(sevNum)
if len(obsLog.Attributes) > 0 {
attributes := lr.Attributes()
for k, v := range obsLog.Attributes {
attributes.InsertString(k, v)
}
}
insertToAttributeVal(obsLog.Record, lr.Body())
ills.Logs().Append(lr)
return out
}
func insertToAttributeVal(value interface{}, dest pdata.AttributeValue) {
switch t := value.(type) {
case bool:
dest.SetBoolVal(t)
case string:
dest.SetStringVal(t)
case []byte:
dest.SetStringVal(string(t))
case int64:
dest.SetIntVal(t)
case int32:
dest.SetIntVal(int64(t))
case int16:
dest.SetIntVal(int64(t))
case int8:
dest.SetIntVal(int64(t))
case int:
dest.SetIntVal(int64(t))
case uint64:
dest.SetIntVal(int64(t))
case uint32:
dest.SetIntVal(int64(t))
case uint16:
dest.SetIntVal(int64(t))
case uint8:
dest.SetIntVal(int64(t))
case uint:
dest.SetIntVal(int64(t))
case float64:
dest.SetDoubleVal(t)
case float32:
dest.SetDoubleVal(float64(t))
case map[string]interface{}:
toAttributeMap(t).CopyTo(dest)
case []interface{}:
toAttributeArray(t).CopyTo(dest)
default:
dest.SetStringVal(fmt.Sprintf("%v", t))
}
}
func toAttributeMap(obsMap map[string]interface{}) pdata.AttributeValue {
attVal := pdata.NewAttributeValueMap()
attMap := attVal.MapVal()
attMap.InitEmptyWithCapacity(len(obsMap))
for k, v := range obsMap {
switch t := v.(type) {
case bool:
attMap.InsertBool(k, t)
case string:
attMap.InsertString(k, t)
case []byte:
attMap.InsertString(k, string(t))
case int64:
attMap.InsertInt(k, t)
case int32:
attMap.InsertInt(k, int64(t))
case int16:
attMap.InsertInt(k, int64(t))
case int8:
attMap.InsertInt(k, int64(t))
case int:
attMap.InsertInt(k, int64(t))
case uint64:
attMap.InsertInt(k, int64(t))
case uint32:
attMap.InsertInt(k, int64(t))
case uint16:
attMap.InsertInt(k, int64(t))
case uint8:
attMap.InsertInt(k, int64(t))
case uint:
attMap.InsertInt(k, int64(t))
case float64:
attMap.InsertDouble(k, t)
case float32:
attMap.InsertDouble(k, float64(t))
case map[string]interface{}:
subMap := toAttributeMap(t)
attMap.Insert(k, subMap)
case []interface{}:
arr := toAttributeArray(t)
attMap.Insert(k, arr)
default:
attMap.InsertString(k, fmt.Sprintf("%v", t))
}
}
return attVal
}
func toAttributeArray(obsArr []interface{}) pdata.AttributeValue {
arrVal := pdata.NewAttributeValueArray()
arr := arrVal.ArrayVal()
for _, v := range obsArr {
attVal := pdata.NewAttributeValueNull()
insertToAttributeVal(v, attVal)
arr.Append(attVal)
}
return arrVal
}
func convertSeverity(s entry.Severity) (string, pdata.SeverityNumber) {
switch {
// Handle standard severity levels
case s == entry.Catastrophe:
return "Fatal", pdata.SeverityNumberFATAL4
case s == entry.Emergency:
return "Error", pdata.SeverityNumberFATAL
case s == entry.Alert:
return "Error", pdata.SeverityNumberERROR3
case s == entry.Critical:
return "Error", pdata.SeverityNumberERROR2
case s == entry.Error:
return "Error", pdata.SeverityNumberERROR
case s == entry.Warning:
return "Info", pdata.SeverityNumberINFO4
case s == entry.Notice:
return "Info", pdata.SeverityNumberINFO3
case s == entry.Info:
return "Info", pdata.SeverityNumberINFO
case s == entry.Debug:
return "Debug", pdata.SeverityNumberDEBUG
case s == entry.Trace:
return "Trace", pdata.SeverityNumberTRACE2
// Handle custom severity levels
case s > entry.Emergency:
return "Fatal", pdata.SeverityNumberFATAL2
case s > entry.Alert:
return "Error", pdata.SeverityNumberERROR4
case s > entry.Critical:
return "Error", pdata.SeverityNumberERROR3
case s > entry.Error:
return "Error", pdata.SeverityNumberERROR2
case s > entry.Warning:
return "Info", pdata.SeverityNumberINFO4
case s > entry.Notice:
return "Info", pdata.SeverityNumberINFO3
case s > entry.Info:
return "Info", pdata.SeverityNumberINFO2
case s > entry.Debug:
return "Debug", pdata.SeverityNumberDEBUG2
case s > entry.Trace:
return "Trace", pdata.SeverityNumberTRACE3
case s > entry.Default:
return "Trace", pdata.SeverityNumberTRACE
default:
return "Undefined", pdata.SeverityNumberUNDEFINED
}
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/urfave/cli"
"io/ioutil"
"net"
"os"
)
// Sends the file to the server
// The server will hopefully store it
func push(c *cli.Context) {
pushAll(c.Args()[0])
}
func pushAll(filename string) {
fileTemp, err := os.Open(filename)
if err != nil {
fmt.Println(err)
return
}
defer fileTemp.Close()
file, _ := fileTemp.Stat()
if !file.IsDir() {
pushF(fileTemp)
return
}
files, _ := fileTemp.Readdir(-1)
for _, tempFile := range files {
fmt.Println(tempFile.Name())
pushAll(filename + "/" + tempFile.Name())
}
}
func pushF(file *os.File) {
conn, err := net.Dial("tcp", configData.IP+":"+configData.PORT)
if err != nil {
fmt.Fprintf(os.Stdout, "Failed to connect to %v:%v because %v", configData.IP, configData.PORT, err)
return
}
defer conn.Close()
fileBytes, err := ioutil.ReadAll(file)
if err != nil {
fmt.Fprintf(conn, "Failed to read file %v, because %v\n", file.Name(), err)
}
toSend := Message{
Interaction: "push",
Name: file.Name(),
Data: fileBytes,
}
json.NewEncoder(conn).Encode(toSend)
fmt.Printf("Pushed file '%v'\n", file.Name())
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parse
import (
"testing"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/server/internal/util"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/stretchr/testify/require"
)
func TestParseExecArgs(t *testing.T) {
type args struct {
args []expression.Expression
boundParams [][]byte
nullBitmap []byte
paramTypes []byte
paramValues []byte
}
tests := []struct {
args args
err error
expect interface{}
}{
// Tests for int overflow
{
args{
expression.Args2Expressions4Test(1),
[][]byte{nil},
[]byte{0x0},
[]byte{1, 0},
[]byte{0xff},
},
nil,
int64(-1),
},
{
args{
expression.Args2Expressions4Test(1),
[][]byte{nil},
[]byte{0x0},
[]byte{2, 0},
[]byte{0xff, 0xff},
},
nil,
int64(-1),
},
{
args{
expression.Args2Expressions4Test(1),
[][]byte{nil},
[]byte{0x0},
[]byte{3, 0},
[]byte{0xff, 0xff, 0xff, 0xff},
},
nil,
int64(-1),
},
// Tests for date/datetime/timestamp
{
args{
expression.Args2Expressions4Test(1),
[][]byte{nil},
[]byte{0x0},
[]byte{12, 0},
[]byte{0x0b, 0xda, 0x07, 0x0a, 0x11, 0x13, 0x1b, 0x1e, 0x01, 0x00, 0x00, 0x00},
},
nil,
"2010-10-17 19:27:30.000001",
},
{
args{
expression.Args2Expressions4Test(1),
[][]byte{nil},
[]byte{0x0},
[]byte{10, 0},
[]byte{0x04, 0xda, 0x07, 0x0a, 0x11},
},
nil,
"2010-10-17",
},
{
args{
expression.Args2Expressions4Test(1),
[][]byte{nil},
[]byte{0x0},
[]byte{7, 0},
[]byte{0x0b, 0xda, 0x07, 0x0a, 0x11, 0x13, 0x1b, 0x1e, 0x01, 0x00, 0x00, 0x00},
},
nil,
"2010-10-17 19:27:30.000001",
},
{
args{
expression.Args2Expressions4Test(1),
[][]byte{nil},
[]byte{0x0},
[]byte{7, 0},
[]byte{0x07, 0xda, 0x07, 0x0a, 0x11, 0x13, 0x1b, 0x1e},
},
nil,
"2010-10-17 19:27:30",
},
{
args{
expression.Args2Expressions4Test(1),
[][]byte{nil},
[]byte{0x0},
[]byte{7, 0},
[]byte{0x0d, 0xdb, 0x07, 0x02, 0x03, 0x04, 0x05, 0x06, 0x40, 0xe2, 0x01, 0x00, 0xf2, 0x02},
},
nil,
"2011-02-03 04:05:06.123456+12:34",
},
{
args{
expression.Args2Expressions4Test(1),
[][]byte{nil},
[]byte{0x0},
[]byte{7, 0},
[]byte{0x0d, 0xdb, 0x07, 0x02, 0x03, 0x04, 0x05, 0x06, 0x40, 0xe2, 0x01, 0x00, 0x0e, 0xfd},
},
nil,
"2011-02-03 04:05:06.123456-12:34",
},
{
args{
expression.Args2Expressions4Test(1),
[][]byte{nil},
[]byte{0x0},
[]byte{7, 0},
[]byte{0x00},
},
nil,
types.ZeroDatetimeStr,
},
// Tests for time
{
args{
expression.Args2Expressions4Test(1),
[][]byte{nil},
[]byte{0x0},
[]byte{11, 0},
[]byte{0x0c, 0x01, 0x78, 0x00, 0x00, 0x00, 0x13, 0x1b, 0x1e, 0x01, 0x00, 0x00, 0x00},
},
nil,
"-120 19:27:30.000001",
},
{
args{
expression.Args2Expressions4Test(1),
[][]byte{nil},
[]byte{0x0},
[]byte{11, 0},
[]byte{0x08, 0x01, 0x78, 0x00, 0x00, 0x00, 0x13, 0x1b, 0x1e},
},
nil,
"-120 19:27:30",
},
{
args{
expression.Args2Expressions4Test(1),
[][]byte{nil},
[]byte{0x0},
[]byte{11, 0},
[]byte{0x00},
},
nil,
"0",
},
// For error test
{
args{
expression.Args2Expressions4Test(1),
[][]byte{nil},
[]byte{0x0},
[]byte{7, 0},
[]byte{10},
},
mysql.ErrMalformPacket,
nil,
},
{
args{
expression.Args2Expressions4Test(1),
[][]byte{nil},
[]byte{0x0},
[]byte{11, 0},
[]byte{10},
},
mysql.ErrMalformPacket,
nil,
},
{
args{
expression.Args2Expressions4Test(1),
[][]byte{nil},
[]byte{0x0},
[]byte{11, 0},
[]byte{8, 2},
},
mysql.ErrMalformPacket,
nil,
},
}
for _, tt := range tests {
err := ExecArgs(&stmtctx.StatementContext{}, tt.args.args, tt.args.boundParams, tt.args.nullBitmap, tt.args.paramTypes, tt.args.paramValues, nil)
require.Truef(t, terror.ErrorEqual(err, tt.err), "err %v", err)
if err == nil {
require.Equal(t, tt.expect, tt.args.args[0].(*expression.Constant).Value.GetValue())
}
}
}
func TestParseExecArgsAndEncode(t *testing.T) {
dt := expression.Args2Expressions4Test(1)
err := ExecArgs(&stmtctx.StatementContext{},
dt,
[][]byte{nil},
[]byte{0x0},
[]byte{mysql.TypeVarchar, 0},
[]byte{4, 178, 226, 202, 212},
util.NewInputDecoder("gbk"))
require.NoError(t, err)
require.Equal(t, "测试", dt[0].(*expression.Constant).Value.GetValue())
err = ExecArgs(&stmtctx.StatementContext{},
dt,
[][]byte{{178, 226, 202, 212}},
[]byte{0x0},
[]byte{mysql.TypeString, 0},
[]byte{},
util.NewInputDecoder("gbk"))
require.NoError(t, err)
require.Equal(t, "测试", dt[0].(*expression.Constant).Value.GetString())
}
func TestParseStmtFetchCmd(t *testing.T) {
tests := []struct {
arg []byte
stmtID uint32
fetchSize uint32
err error
}{
{[]byte{3, 0, 0, 0, 50, 0, 0, 0}, 3, 50, nil},
{[]byte{5, 0, 0, 0, 232, 3, 0, 0}, 5, 1000, nil},
{[]byte{5, 0, 0, 0, 0, 8, 0, 0}, 5, maxFetchSize, nil},
{[]byte{5, 0, 0}, 0, 0, mysql.ErrMalformPacket},
{[]byte{1, 0, 0, 0, 3, 2, 0, 0, 3, 5, 6}, 0, 0, mysql.ErrMalformPacket},
{[]byte{}, 0, 0, mysql.ErrMalformPacket},
}
for _, tc := range tests {
stmtID, fetchSize, err := StmtFetchCmd(tc.arg)
require.Equal(t, tc.stmtID, stmtID)
require.Equal(t, tc.fetchSize, fetchSize)
require.Equal(t, tc.err, err)
}
}
|
package main
import (
"reflect"
"fmt"
)
type Student struct {
Name string `heylink:"hahaha"`
}
func (s *Student) Print() {
fmt.Println("this is a student:", s.Name)
}
func main() {
var a int = 200
testReflect(a)
var stu = Student{
Name:"heylink",
}
testReflect(stu)
testStruct(&stu)
b := 200
//如果要更改值,传地址进去
testInt(&b)
fmt.Println(b)
}
func testReflect(b interface{}) {
//获取类型
t := reflect.TypeOf(b)
fmt.Println(t) //main.Student
//获取值
v := reflect.ValueOf(b)
fmt.Println(v) //{heylink}
//获取类别
k := v.Kind()
fmt.Println(k) //struct
//把得到的value转回interface{}
iv := v.Interface()
stu, ok := iv.(Student)
if ok {
fmt.Printf("%v %T\n", stu, stu)
}
}
func testInt(b interface{}) {
val := reflect.ValueOf(b)
fmt.Printf("before set: %d\n", val.Elem())
//Elem()方法相当于*
val.Elem().SetInt(100)
c:= val.Elem().Int()
fmt.Printf("after set: %d\n", c)
}
//通过反射操作结构体
func testStruct(b interface{}) {
val := reflect.ValueOf(b)
kd := val.Kind()
if kd != reflect.Ptr || val.Elem().Kind() == reflect.Struct {
fmt.Println("expect struct")
return
}
//获取字段数量
num := val.Elem().NumField()
//修改字段内容
val.Elem().Field(0).SetString("hexing")
fmt.Printf("struct has %d fields\n", num)
//获取方法数量
num = val.NumMethod()
fmt.Printf("struct has %d methods\n", num)
//调用方法
if num > 0 {
val.Method(0).Call(nil)
}
//通过反射获取tag 要通过type拿
st := reflect.TypeOf(b)
field := st.Elem().Field(0)
fmt.Println("反射获取tag:",field.Tag.Get("heylink"))
} |
package testdata
import (
"github.com/frk/gosql/internal/testdata/common"
)
type InsertResultAfterScanSliceQuery struct {
Users []*common.User `rel:"test_user:u"`
Result []*common.User2
}
|
package negotiate
import (
"net/http"
"github.com/unrolled/render"
)
//Negotiator 는 render 를 감싸고
//ContentType 에 따른 전환(switch)을 처리한다.
type Negotiator struct {
ContentType string
*render.Render
}
//GetNegotiator 함수는 요청(http.Request)을 인자로 받아
//콘텐트 타입 헤더(ContentType header)에서
//콘텐트 타입(ContentType)을 가져온다.
func GetNegotiator(r *http.Request) *Negotiator {
contentType := r.Header.Get("Content-Type")
return &Negotiator{
ContentType: contentType,
Render: render.New(),
}
}
|
package ir
var ENGLISH_STOP_WORDS = []string{
"a",
"about",
"above",
"after",
"again",
"against",
"all",
"am",
"an",
"and",
"any",
"are",
"aren",
"as",
"at",
"be",
"because",
"been",
"before",
"being",
"below",
"between",
"both",
"but",
"by",
"can",
"cannot",
"could",
"couldn",
"d",
"did",
"didn",
"do",
"does",
"doesn",
"doing",
"don",
"down",
"during",
"each",
"few",
"for",
"from",
"further",
"had",
"hadn",
"has",
"hasn",
"have",
"haven",
"having",
"he",
"her",
"here",
"here",
"hers",
"herself",
"him",
"himself",
"his",
"how",
"how",
"i",
"im",
"in",
"into",
"is",
"isn",
"it",
"it",
"its",
"itself",
"ll",
"let",
"m",
"me",
"more",
"most",
"mustn",
"my",
"myself",
"no",
"nor",
"not",
"of",
"off",
"on",
"once",
"only",
"or",
"other",
"ought",
"our",
"ours",
"ourselves",
"out",
"over",
"own",
"re",
"s",
"same",
"shan",
"she",
"she",
"she",
"she",
"should",
"shouldn",
"so",
"some",
"such",
"t",
"than",
"that",
"that",
"the",
"their",
"theirs",
"them",
"themselves",
"then",
"there",
"there",
"these",
"they",
"they",
"they",
"they",
"they",
"this",
"those",
"through",
"to",
"too",
"under",
"until",
"up",
"ve",
"very",
"was",
"wasn",
"we",
"we",
"we",
"we",
"we",
"were",
"weren",
"what",
"what",
"when",
"when",
"where",
"where",
"which",
"who",
"who",
"whom",
"why",
"why",
"with",
"won",
"would",
"wouldn",
"you",
"you",
"you",
"you",
"you",
"your",
"yours",
"yourself",
"yourselves",
}
var PORTUGUESE_STOP_WORDS = []string{
"a",
"à",
"agora",
"ainda",
"alguém",
"algum",
"alguma",
"algumas",
"alguns",
"ampla",
"amplas",
"amplo",
"amplos",
"ante",
"antes",
"ao",
"aos",
"após",
"aquela",
"aquelas",
"aquele",
"aqueles",
"aquilo",
"as",
"até",
"através",
"cada",
"coisa",
"coisas",
"com",
"como",
"contra",
"contudo",
"da",
"daquele",
"daqueles",
"das",
"de",
"dela",
"delas",
"dele",
"deles",
"depois",
"dessa",
"dessas",
"desse",
"desses",
"desta",
"destas",
"deste",
"deste",
"destes",
"deve",
"devem",
"devendo",
"dever",
"deverá",
"deverão",
"deveria",
"deveriam",
"devia",
"deviam",
"disse",
"disso",
"disto",
"dito",
"diz",
"dizem",
"do",
"dos",
"e",
"é",
"e'",
"ela",
"elas",
"ele",
"eles",
"em",
"enquanto",
"entre",
"era",
"essa",
"essas",
"esse",
"esses",
"esta",
"está",
"estamos",
"estão",
"estas",
"estava",
"estavam",
"estávamos",
"este",
"estes",
"estou",
"eu",
"fazendo",
"fazer",
"feita",
"feitas",
"feito",
"feitos",
"foi",
"for",
"foram",
"fosse",
"fossem",
"grande",
"grandes",
"há",
"isso",
"isto",
"já",
"la",
"la",
"lá",
"lhe",
"lhes",
"lo",
"mas",
"me",
"mesma",
"mesmas",
"mesmo",
"mesmos",
"meu",
"meus",
"minha",
"minhas",
"muita",
"muitas",
"muito",
"muitos",
"na",
"não",
"nas",
"nem",
"nenhum",
"nessa",
"nessas",
"nesta",
"nestas",
"ninguém",
"no",
"nos",
"nós",
"nossa",
"nossas",
"nosso",
"nossos",
"num",
"numa",
"nunca",
"o",
"os",
"ou",
"outra",
"outras",
"outro",
"outros",
"para",
"pela",
"pelas",
"pelo",
"pelos",
"pequena",
"pequenas",
"pequeno",
"pequenos",
"per",
"perante",
"pode",
"pôde",
"podendo",
"poder",
"poderia",
"poderiam",
"podia",
"podiam",
"pois",
"por",
"porém",
"porque",
"posso",
"pouca",
"poucas",
"pouco",
"poucos",
"primeiro",
"primeiros",
"própria",
"próprias",
"próprio",
"próprios",
"quais",
"qual",
"quando",
"quanto",
"quantos",
"que",
"quem",
"são",
"se",
"seja",
"sejam",
"sem",
"sempre",
"sendo",
"será",
"serão",
"seu",
"seus",
"si",
"sido",
"só",
"sob",
"sobre",
"sua",
"suas",
"talvez",
"também",
"tampouco",
"te",
"tem",
"tendo",
"tenha",
"ter",
"teu",
"teus",
"ti",
"tido",
"tinha",
"tinham",
"toda",
"todas",
"todavia",
"todo",
"todos",
"tu",
"tua",
"tuas",
"tudo",
"última",
"últimas",
"último",
"últimos",
"um",
"uma",
"umas",
"uns",
"vendo",
"ver",
"vez",
"vindo",
"vir",
"vos",
"vós",
}
|
/*
Go Language Raspberry Pi Interface
(c) Copyright David Thorpe 2016-2018
All Rights Reserved
Documentation http://djthorpe.github.io/gopi/
For Licensing and Usage information, please see LICENSE.md
*/
package rfm69
import (
"time"
// Frameworks
"github.com/djthorpe/gopi"
"github.com/djthorpe/sensors"
)
////////////////////////////////////////////////////////////////////////////////
// TYPES
type (
register uint8
)
////////////////////////////////////////////////////////////////////////////////
// CONSTANTS
const (
// RFM69 Registers
RFM_REG_FIFO register = 0x00 /* FIFO Read/Write Access */
RFM_REG_OPMODE register = 0x01 /* Operating modes of the transceiver */
RFM_REG_DATAMODUL register = 0x02 /* Data operation mode and modulation settings */
RFM_REG_BITRATEMSB register = 0x03 /* Bit Rate setting, most significant bits */
RFM_REG_BITRATELSB register = 0x04 /* Bit Rate setting, least significant bits */
RFM_REG_FDEVMSB register = 0x05 /* Frequency deviation setting, most significant bits */
RFM_REG_FDEVLSB register = 0x06 /* Frequency deviation setting, least significant bits */
RFM_REG_FRFMSB register = 0x07 /* RF Carrier Frequency, most significant bits */
RFM_REG_FRFMID register = 0x08 /* RF Carrier Frequency, intermediate bits */
RFM_REG_FRFLSB register = 0x09 /* RF Carrier Frequency, least significant bits */
RFM_REG_OSC1 register = 0x0A /* RC Oscillators Settings */
RFM_REG_AFCCTRL register = 0x0B /* AFC Control in low modulation index situations */
RFM_REG_LISTEN1 register = 0x0D /* Listen mode settings */
RFM_REG_LISTEN2 register = 0x0E /* Listen mode idle duration */
RFM_REG_LISTEN3 register = 0x0F /* Listen mode Rx duration */
RFM_REG_VERSION register = 0x10 /* Module version */
RFM_REG_PALEVEL register = 0x11 /* PA selection and output power control */
RFM_REG_PARAMP register = 0x12 /* Control of the PA ramp time in FSK mode */
RFM_REG_OCP register = 0x13 /* Over Current Protection control */
RFM_REG_LNA register = 0x18 /* LNA Settings */
RFM_REG_RXBW register = 0x19 /* Channel Filter BW Control */
RFM_REG_AFCBW register = 0x1A // Channel Filter BW control during the AFC routine
RFM_REG_OOKPEAK register = 0x1B // OOK demodulator selection and control in peak mode
RFM_REG_OOKAVG register = 0x1C // Average threshold control of the OOK demodulator
RFM_REG_OOKFIX register = 0x1D // Fixed threshold control of the OOK demodulator
RFM_REG_AFCFEI register = 0x1E // AFC and FEI control and status
RFM_REG_AFCMSB register = 0x1F // MSB of the frequency correction of the AFC
RFM_REG_AFCLSB register = 0x20 // LSB of the frequency correction of the AFC
RFM_REG_FEIMSB register = 0x21 // MSB of the calculated frequency error
RFM_REG_FEILSB register = 0x22 // LSB of the calculated frequency error
RFM_REG_RSSICONFIG register = 0x23 // RSSI-related settings
RFM_REG_RSSIVALUE register = 0x24 // RSSI value in dBm
RFM_REG_DIOMAPPING1 register = 0x25 // Mapping of pins DIO0 to DIO3
RFM_REG_DIOMAPPING2 register = 0x26 // Mapping of pins DIO4 and DIO5, ClkOut frequency
RFM_REG_IRQFLAGS1 register = 0x27 // Status register: PLL Lock state, Timeout, RSSI > Threshold...
RFM_REG_IRQFLAGS2 register = 0x28 // Status register: FIFO handling flags...
RFM_REG_RSSITHRESH register = 0x29 // RSSI Threshold control
RFM_REG_RXTIMEOUT1 register = 0x2A // Timeout duration between Rx request and RSSI detection
RFM_REG_RXTIMEOUT2 register = 0x2B // Timeout duration between RSSI detection and PayloadReady
RFM_REG_PREAMBLEMSB register = 0x2C // Preamble length, MSB
RFM_REG_PREAMBLELSB register = 0x2D // Preamble length, LSB
RFM_REG_SYNCCONFIG register = 0x2E // Sync Word Recognition control
RFM_REG_SYNCVALUE1 register = 0x2F // Sync Word bytes, 1 through 8
RFM_REG_SYNCVALUE2 register = 0x30
RFM_REG_SYNCVALUE3 register = 0x31
RFM_REG_SYNCVALUE4 register = 0x32
RFM_REG_SYNCVALUE5 register = 0x33
RFM_REG_SYNCVALUE6 register = 0x34
RFM_REG_SYNCVALUE7 register = 0x35
RFM_REG_SYNCVALUE8 register = 0x36
RFM_REG_PACKETCONFIG1 register = 0x37 // Packet mode settings
RFM_REG_PAYLOADLENGTH register = 0x38 // Payload length setting
RFM_REG_NODEADRS register = 0x39 // Node address
RFM_REG_BROADCASTADRS register = 0x3A // Broadcast address
RFM_REG_AUTOMODES register = 0x3B // Auto modes settings
RFM_REG_FIFOTHRESH register = 0x3C // Fifo threshold, Tx start condition
RFM_REG_PACKETCONFIG2 register = 0x3D // Packet mode settings
RFM_REG_AESKEY1 register = 0x3E // 16 bytes of the cypher key
RFM_REG_AESKEY2 register = 0x3F
RFM_REG_AESKEY3 register = 0x40
RFM_REG_AESKEY4 register = 0x41
RFM_REG_AESKEY5 register = 0x42
RFM_REG_AESKEY6 register = 0x43
RFM_REG_AESKEY7 register = 0x44
RFM_REG_AESKEY8 register = 0x45
RFM_REG_AESKEY9 register = 0x46
RFM_REG_AESKEY10 register = 0x47
RFM_REG_AESKEY11 register = 0x48
RFM_REG_AESKEY12 register = 0x49
RFM_REG_AESKEY13 register = 0x4A
RFM_REG_AESKEY14 register = 0x4B
RFM_REG_AESKEY15 register = 0x4C
RFM_REG_AESKEY16 register = 0x4D
RFM_REG_TEMP1 register = 0x4E // Temperature Sensor control
RFM_REG_TEMP2 register = 0x4F // Temperature readout
RFM_REG_TEST register = 0x50 // Internal test registers
RFM_REG_TESTLNA register = 0x58 // Sensitivity boost
RFM_REG_TESTPA1 register = 0x5A // High Power PA settings
RFM_REG_TESTPA2 register = 0x5C // High Power PA settings
RFM_REG_TESTDAGC register = 0x6F // Fading Margin Improvement
RFM_REG_TESTAFC register = 0x71 // AFC offset for low modulation index AFC
RFM_REG_MAX register = 0x7F // Last possible register value
RFM_REG_WRITE register = 0x80 // Write bit
)
const (
// RFM69 IRQ Flags
RFM_IRQFLAGS1_MODEREADY uint8 = 0x80 // Mode has changed
RFM_IRQFLAGS1_RXREADY uint8 = 0x40
RFM_IRQFLAGS1_TXREADY uint8 = 0x20
RFM_IRQFLAGS1_PLLLOCK uint8 = 0x10
RFM_IRQFLAGS1_RSSI uint8 = 0x08
RFM_IRQFLAGS1_TIMEOUT uint8 = 0x04
RFM_IRQFLAGS1_AUTOMODE uint8 = 0x02
RFM_IRQFLAGS1_SYNCADDRESSMATCH uint8 = 0x01
RFM_IRQFLAGS2_CRCOK uint8 = 0x02
RFM_IRQFLAGS2_PAYLOADREADY uint8 = 0x04
RFM_IRQFLAGS2_PACKETSENT uint8 = 0x08
RFM_IRQFLAGS2_FIFOOVERRUN uint8 = 0x10
RFM_IRQFLAGS2_FIFOLEVEL uint8 = 0x20
RFM_IRQFLAGS2_FIFONOTEMPTY uint8 = 0x40
RFM_IRQFLAGS2_FIFOFULL uint8 = 0x80
)
////////////////////////////////////////////////////////////////////////////////
// IRQ FLAGS
func wait_for_condition(callback func() (bool, error), condition bool, timeout time.Duration) error {
timeout_chan := time.After(timeout)
for {
select {
case <-timeout_chan:
return sensors.ErrDeviceTimeout
default:
r, err := callback()
if err != nil {
return err
}
if r == condition {
return nil
}
time.Sleep(time.Millisecond * 100)
}
}
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_OPMODE
// Read device mode, listen_on, sequencer_off
func (this *rfm69) getOpMode() (sensors.RFMMode, bool, bool, error) {
data, err := this.readreg_uint8(RFM_REG_OPMODE)
if err != nil {
return 0, false, false, err
}
mode := sensors.RFMMode(data>>2) & sensors.RFM_MODE_MAX
listen_on := to_uint8_bool((data >> 6) & 0x01)
sequencer_off := to_uint8_bool((data >> 7) & 0x01)
return mode, listen_on, sequencer_off, nil
}
// Write device_mode, listen_on, listen_abort and sequencer_off values
func (this *rfm69) setOpMode(device_mode sensors.RFMMode, listen_on bool, listen_abort bool, sequencer_off bool) error {
value :=
uint8(device_mode&sensors.RFM_MODE_MAX)<<2 |
to_bool_uint8(listen_on)<<6 |
to_bool_uint8(listen_abort)<<5 |
to_bool_uint8(sequencer_off)<<7
return this.writereg_uint8(RFM_REG_OPMODE, value)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_DATAMODUL
// Read data mode and modulation
func (this *rfm69) getDataModul() (sensors.RFMDataMode, sensors.RFMModulation, error) {
data, err := this.readreg_uint8(RFM_REG_DATAMODUL)
if err != nil {
return 0, 0, err
}
data_mode := sensors.RFMDataMode(data>>5) & sensors.RFM_DATAMODE_MAX
modulation := sensors.RFMModulation(data) & sensors.RFM_MODULATION_MAX
return data_mode, modulation, nil
}
// Write data mode and modulation
func (this *rfm69) setDataModul(data_mode sensors.RFMDataMode, modulation sensors.RFMModulation) error {
value :=
uint8(data_mode&sensors.RFM_DATAMODE_MAX)<<5 |
uint8(modulation&sensors.RFM_MODULATION_MAX)
return this.writereg_uint8(RFM_REG_DATAMODUL, value)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_VERSION
// Read version
func (this *rfm69) getVersion() (uint8, error) {
return this.readreg_uint8(RFM_REG_VERSION)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_AFCMSB, RFM_REG_AFCLSB
// Read Auto Frequency Correction value
func (this *rfm69) getAFC() (int16, error) {
return this.readreg_int16(RFM_REG_AFCMSB)
}
// Read RegAfcCtrl register
func (this *rfm69) getAFCRoutine() (sensors.RFMAFCRoutine, error) {
if afc_routine, err := this.readreg_uint8(RFM_REG_AFCCTRL); err != nil {
return 0, err
} else {
return sensors.RFMAFCRoutine(afc_routine>>5) & sensors.RFM_AFCROUTINE_MASK, nil
}
}
func (this *rfm69) setAFCRoutine(afc_routine sensors.RFMAFCRoutine) error {
value := uint8(afc_routine&sensors.RFM_AFCROUTINE_MASK) << 5
return this.writereg_uint8(RFM_REG_AFCCTRL, value)
}
// Read RFM_REG_AFCFEI - mode, afc_done, fei_done
func (this *rfm69) getAFCControl() (sensors.RFMAFCMode, bool, bool, error) {
if value, err := this.readreg_uint8(RFM_REG_AFCFEI); err != nil {
return 0, false, false, err
} else {
fei_done := to_uint8_bool(value & 0x40)
afc_done := to_uint8_bool(value & 0x10)
afc_mode := sensors.RFMAFCMode(value>>2) & sensors.RFM_AFCMODE_MASK
return afc_mode, afc_done, fei_done, nil
}
}
// Write RFM_REG_AFCFEI register
func (this *rfm69) setAFCControl(afc_mode sensors.RFMAFCMode, fei_start, afc_clear, afc_start bool) error {
value :=
to_bool_uint8(fei_start)<<5 |
uint8(afc_mode&sensors.RFM_AFCMODE_MASK)<<2 |
to_bool_uint8(afc_clear)<<1 |
to_bool_uint8(afc_start)<<0
return this.writereg_uint8(RFM_REG_AFCFEI, value)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_BITRATE
// Read bitrate (two bytes)
func (this *rfm69) getBitrate() (uint16, error) {
return this.readreg_uint16(RFM_REG_BITRATEMSB)
}
// Write bitrate (two bytes)
func (this *rfm69) setBitrate(bitrate uint16) error {
return this.writereg_uint16(RFM_REG_BITRATEMSB, bitrate)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_FRF
// Read FRF (three bytes)
func (this *rfm69) getFreqCarrier() (uint32, error) {
if frf, err := this.readreg_uint24(RFM_REG_FRFMSB); err != nil {
return 0, err
} else {
return frf & RFM_FRF_MAX, nil
}
}
// Read FDEV (two bytes)
func (this *rfm69) getFreqDeviation() (uint16, error) {
if fdev, err := this.readreg_uint16(RFM_REG_FDEVMSB); err != nil {
return 0, err
} else {
return fdev & RFM_FDEV_MAX, nil
}
}
// Write FRF (three bytes)
func (this *rfm69) setFreqCarrier(value uint32) error {
// write MSB, MIDDLE and LSB in that order
if err := this.writereg_uint8(RFM_REG_FRFMSB, uint8(value>>16)); err != nil {
return err
}
if err := this.writereg_uint8(RFM_REG_FRFMID, uint8(value>>8)); err != nil {
return err
}
if err := this.writereg_uint8(RFM_REG_FRFLSB, uint8(value)); err != nil {
return err
}
return nil
}
// Write FRF (three bytes)
func (this *rfm69) setFreqDeviation(value uint16) error {
return this.writereg_uint16(RFM_REG_FDEVMSB, value)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_NODEADRS, RFM_REG_BROADCASTADRS
// Read node address
func (this *rfm69) getNodeAddress() (uint8, error) {
if value, err := this.readreg_uint8(RFM_REG_NODEADRS); err != nil {
return 0, err
} else {
return value, nil
}
}
// Read broadcast address
func (this *rfm69) getBroadcastAddress() (uint8, error) {
if value, err := this.readreg_uint8(RFM_REG_BROADCASTADRS); err != nil {
return 0, err
} else {
return value, nil
}
}
// Write node address
func (this *rfm69) setNodeAddress(value uint8) error {
return this.writereg_uint8(RFM_REG_NODEADRS, value)
}
// Write broadcast address
func (this *rfm69) setBroadcastAddress(value uint8) error {
return this.writereg_uint8(RFM_REG_BROADCASTADRS, value)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_PREAMBLE
// Read Preamble size
func (this *rfm69) getPreambleSize() (uint16, error) {
return this.readreg_uint16(RFM_REG_PREAMBLEMSB)
}
// Write Preamble size
func (this *rfm69) setPreambleSize(preamble_size uint16) error {
return this.writereg_uint16(RFM_REG_PREAMBLEMSB, preamble_size)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_PAYLOADLENGTH
// Read Payload size
func (this *rfm69) getPayloadSize() (uint8, error) {
return this.readreg_uint8(RFM_REG_PAYLOADLENGTH)
}
// Write Payload size
func (this *rfm69) setPayloadSize(payload_size uint8) error {
return this.writereg_uint8(RFM_REG_PAYLOADLENGTH, payload_size)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_AESKEY, RFM_REG_SYNCKEY, RFM_REG_SYNCCONFIG
// Read RFM_REG_AESKEY, RFM_REG_SYNCKEY registers
func (this *rfm69) getAESKey() ([]byte, error) {
if key, err := this.readreg_uint8_array(RFM_REG_AESKEY1, RFM_AESKEY_BYTES); err != nil {
return nil, err
} else {
return key, nil
}
}
// Read RFM_REG_SYNCVALUE register
func (this *rfm69) getSyncWord() ([]byte, error) {
if key, err := this.readreg_uint8_array(RFM_REG_SYNCVALUE1, RFM_SYNCWORD_BYTES); err != nil {
return nil, err
} else {
return key, nil
}
}
// Write RFM_REG_SYNCVALUE register
func (this *rfm69) setSyncWord(word []byte) error {
if len(word) > RFM_SYNCWORD_BYTES {
return gopi.ErrBadParameter
}
return this.writereg_uint8_array(RFM_REG_SYNCVALUE1, word)
}
func (this *rfm69) setAESKey(aes_key []byte) error {
if len(aes_key) != RFM_AESKEY_BYTES {
this.log.Debug2("setAESKey: invalid AES key length (%v bytes, should be %v bytes)", len(aes_key), RFM_AESKEY_BYTES)
return gopi.ErrBadParameter
} else {
return this.writereg_uint8_array(RFM_REG_AESKEY1, aes_key)
}
}
// Read RFM_REG_SYNCCONFIG registers
// Returns SyncOn, FifoFillCondition, SyncSize, SyncTol
// Note sync_size is one less than the SyncSize
func (this *rfm69) getSyncConfig() (bool, bool, uint8, uint8, error) {
if value, err := this.readreg_uint8(RFM_REG_SYNCCONFIG); err != nil {
return false, false, 0, 0, err
} else {
return to_uint8_bool(value & 0x80), to_uint8_bool(value & 0x40), (uint8(value) >> 3) & 0x07, uint8(value & 0x07), nil
}
}
// Write RFM_REG_SYNCCONFIG registers
// Note sync_size is one less than the SyncSize
func (this *rfm69) setSyncConfig(sync_on, fifo_fill_condition bool, sync_size, sync_tol uint8) error {
value :=
to_bool_uint8(sync_on)<<7 |
to_bool_uint8(fifo_fill_condition)<<6 |
(sync_size&0x07)<<3 |
(sync_tol & 0x07)
return this.writereg_uint8(RFM_REG_SYNCCONFIG, value)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_FIFOTHRESH
// Read RFM_REG_FIFOTHRESH register
func (this *rfm69) getFIFOThreshold() (sensors.RFMTXStart, uint8, error) {
if value, err := this.readreg_uint8(RFM_REG_FIFOTHRESH); err != nil {
return 0, 0, err
} else {
tx_start := sensors.RFMTXStart(value>>7) & sensors.RFM_TXSTART_MAX
fifo_threshold := value & 0x7F
return tx_start, fifo_threshold, nil
}
}
// Write RFM_REG_FIFOTHRESH register
func (this *rfm69) setFIFOThreshold(tx_start sensors.RFMTXStart, fifo_threshold uint8) error {
value := uint8(tx_start)<<7 | fifo_threshold&0x7F
return this.writereg_uint8(RFM_REG_FIFOTHRESH, value)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_PACKETCONFIG1
// Read RegPacketConfig1 register - PacketFormat, PacketCoding, AddressFiltering, CRCOn, CRCAutoClearOff
func (this *rfm69) getPacketConfig1() (sensors.RFMPacketFormat, sensors.RFMPacketCoding, sensors.RFMPacketFilter, bool, bool, error) {
if value, err := this.readreg_uint8(RFM_REG_PACKETCONFIG1); err != nil {
return 0, 0, 0, false, false, err
} else {
packet_format := sensors.RFMPacketFormat((value >> 7) & 0x01)
packet_coding := sensors.RFMPacketCoding(value>>5) & sensors.RFM_PACKET_CODING_MAX
packet_filter := sensors.RFMPacketFilter(value) & sensors.RFM_PACKET_FILTER_MAX
crc_on := to_uint8_bool(value & 0x10)
crc_auto_clear_off := to_uint8_bool(value & 0x08)
return packet_format, packet_coding, packet_filter, crc_on, crc_auto_clear_off, nil
}
}
// Write RegPacketConfig1 register
func (this *rfm69) setPacketConfig1(packet_format sensors.RFMPacketFormat, packet_coding sensors.RFMPacketCoding, packet_filter sensors.RFMPacketFilter, crc_on bool, crc_auto_clear_off bool) error {
value :=
(uint8(packet_format)&0x01)<<7 |
uint8(packet_coding&sensors.RFM_PACKET_CODING_MAX)<<5 |
uint8(packet_filter&sensors.RFM_PACKET_FILTER_MAX) |
to_bool_uint8(crc_on)<<4 |
to_bool_uint8(crc_auto_clear_off)<<3
return this.writereg_uint8(RFM_REG_PACKETCONFIG1, value)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_PACKETCONFIG2
// Read RegPacketConfig2 register - InterPacketRxDelay, AutoRxRestartOn, AesOn
func (this *rfm69) getPacketConfig2() (uint8, bool, bool, error) {
if value, err := this.readreg_uint8(RFM_REG_PACKETCONFIG2); err != nil {
return 0, false, false, err
} else {
rx_inter_packet_delay := uint8(value&0xF0) >> 4
rx_auto_restart := to_uint8_bool(value & 0x02)
aes_on := to_uint8_bool(value & 0x01)
return rx_inter_packet_delay, rx_auto_restart, aes_on, nil
}
}
// Write RegPacketConfig2 register
func (this *rfm69) setPacketConfig2(rx_inter_packet_delay uint8, rx_auto_restart bool, aes_on bool) error {
value := (rx_inter_packet_delay&0x0F)<<4 | to_bool_uint8(rx_auto_restart)<<1 | to_bool_uint8(aes_on)
return this.writereg_uint8(RFM_REG_PACKETCONFIG2, value)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_IRQXFLAGS
func (this *rfm69) getIRQFlags1(mask uint8) (uint8, error) {
value, err := this.readreg_uint8(RFM_REG_IRQFLAGS1)
if value&RFM_IRQFLAGS1_MODEREADY != 0 {
this.log.Debug2("RFM_IRQFLAGS1_MODEREADY")
}
if value&RFM_IRQFLAGS1_AUTOMODE != 0 {
this.log.Debug2("RFM_IRQFLAGS1_AUTOMODE")
}
if value&RFM_IRQFLAGS1_PLLLOCK != 0 {
this.log.Debug2("RFM_IRQFLAGS1_PLLLOCK")
}
if value&RFM_IRQFLAGS1_RSSI != 0 {
this.log.Debug2("RFM_IRQFLAGS1_RSSI")
}
if value&RFM_IRQFLAGS1_RXREADY != 0 {
this.log.Debug2("RFM_IRQFLAGS1_RXREADY")
}
if value&RFM_IRQFLAGS1_SYNCADDRESSMATCH != 0 {
this.log.Debug2("RFM_IRQFLAGS1_SYNCADDRESSMATCH")
}
if value&RFM_IRQFLAGS1_TIMEOUT != 0 {
this.log.Debug2("RFM_IRQFLAGS1_TIMEOUT")
}
if value&RFM_IRQFLAGS1_TXREADY != 0 {
this.log.Debug2("RFM_IRQFLAGS1_TXREADY")
}
return value & mask, err
}
func (this *rfm69) setIRQFlags2() error {
// Set "FifoOverrun" flag to clear the FIFO buffer
return this.writereg_uint8(RFM_REG_IRQFLAGS2, 0x10)
}
func (this *rfm69) getIRQFlags2(mask uint8) (uint8, error) {
value, err := this.readreg_uint8(RFM_REG_IRQFLAGS2)
if value&RFM_IRQFLAGS2_CRCOK != 0 {
this.log.Debug2("RFM_IRQFLAGS2_CRCOK")
}
if value&RFM_IRQFLAGS2_CRCOK != 0 {
this.log.Debug2("RFM_IRQFLAGS2_CRCOK")
}
if value&RFM_IRQFLAGS2_PAYLOADREADY != 0 {
this.log.Debug2("RFM_IRQFLAGS2_PAYLOADREADY")
}
if value&RFM_IRQFLAGS2_PACKETSENT != 0 {
this.log.Debug2("RFM_IRQFLAGS2_PACKETSENT")
}
if value&RFM_IRQFLAGS2_FIFOOVERRUN != 0 {
this.log.Debug2("RFM_IRQFLAGS2_FIFOOVERRUN")
}
if value&RFM_IRQFLAGS2_FIFOLEVEL != 0 {
this.log.Debug2("RFM_IRQFLAGS2_FIFOLEVEL")
}
if value&RFM_IRQFLAGS2_FIFONOTEMPTY != 0 {
this.log.Debug2("RFM_IRQFLAGS2_FIFONOTEMPTY")
}
if value&RFM_IRQFLAGS2_FIFOFULL != 0 {
this.log.Debug2("RFM_IRQFLAGS2_FIFOFULL")
}
return value & mask, err
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_FIFO
func (this *rfm69) recvFIFOEmpty() (bool, error) {
if fifo_not_empty, err := this.getIRQFlags2(RFM_IRQFLAGS2_FIFONOTEMPTY); err != nil {
return false, err
} else {
return (fifo_not_empty != RFM_IRQFLAGS2_FIFONOTEMPTY), nil
}
}
func (this *rfm69) irqFIFOLevel() (bool, error) {
if fifo_level, err := this.getIRQFlags2(RFM_IRQFLAGS2_FIFOLEVEL); err != nil {
return false, err
} else {
return (fifo_level == RFM_IRQFLAGS2_FIFOLEVEL), nil
}
}
func (this *rfm69) recvCRCOk() (bool, error) {
if crc_ok, err := this.getIRQFlags2(RFM_IRQFLAGS2_CRCOK); err != nil {
return false, err
} else {
return (crc_ok == RFM_IRQFLAGS2_CRCOK), nil
}
}
func (this *rfm69) recvPayloadReady() (bool, error) {
if payload_ready, err := this.getIRQFlags2(RFM_IRQFLAGS2_PAYLOADREADY); err != nil {
return false, err
} else {
return payload_ready == RFM_IRQFLAGS2_PAYLOADREADY, nil
}
}
func (this *rfm69) recvPacketSent() (bool, error) {
if packet_sent, err := this.getIRQFlags2(RFM_IRQFLAGS2_PACKETSENT); err != nil {
return false, err
} else {
return packet_sent == RFM_IRQFLAGS2_PACKETSENT, nil
}
}
func (this *rfm69) recvFIFO() ([]byte, error) {
buffer := make([]byte, 0, RFM_FIFO_SIZE)
for i := 0; i < RFM_FIFO_SIZE; i++ {
if fifo_empty, err := this.recvFIFOEmpty(); err != nil {
return nil, err
} else if fifo_empty {
break
} else if value, err := this.readreg_uint8(RFM_REG_FIFO); err != nil {
return nil, err
} else {
buffer = append(buffer, value)
}
}
return buffer, nil
}
func (this *rfm69) writeFIFO(data []byte) error {
return this.writereg_uint8_array(RFM_REG_FIFO, data)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_RXBW
func (this *rfm69) writeRXBW(value byte) error {
return this.writereg_uint8(RFM_REG_RXBW, value)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_TEMP1, RFM_REG_TEMP2
// Get running bit
func (this *rfm69) getRegTemp1() (bool, error) {
if value, err := this.readreg_uint8(RFM_REG_TEMP1); err != nil {
return false, err
} else {
running := to_uint8_bool(value & 0x04)
return running, nil
}
}
// Set start measurement bit high
func (this *rfm69) setRegTemp1() error {
return this.writereg_uint8(RFM_REG_TEMP1, 0x08)
}
// Read uncalibrated temperature
func (this *rfm69) getRegTemp2() (uint8, error) {
return this.readreg_uint8(RFM_REG_TEMP2)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_RSSICONFIG, RFM_REG_RSSIVALUE
// Get RSSI done
func (this *rfm69) getRegRSSIDone() (bool, error) {
if value, err := this.readreg_uint8(RFM_REG_RSSICONFIG); err != nil {
return false, err
} else {
done := to_uint8_bool(value & 0x02)
return done, nil
}
}
// Set RSSI start
func (this *rfm69) setRegRSSIStart() error {
return this.writereg_uint8(RFM_REG_RSSICONFIG, 0x01)
}
// Return RFM_REG_RSSIVALUE
func (this *rfm69) getRegRSSIValue() (uint8, error) {
return this.readreg_uint8(RFM_REG_RSSIVALUE)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_LNA
// Read LNA settings - returns the impedance, the gain setting and the current gain value
func (this *rfm69) getRegLNA() (sensors.RFMLNAImpedance, sensors.RFMLNAGain, sensors.RFMLNAGain, error) {
if value, err := this.readreg_uint8(RFM_REG_LNA); err != nil {
return 0, 0, 0, err
} else {
lna_impedance := sensors.RFMLNAImpedance(value>>7) & sensors.RFM_LNA_IMPEDANCE_MAX
set_gain := sensors.RFMLNAGain(value) & sensors.RFM_LNA_GAIN_MAX
current_gain := sensors.RFMLNAGain(value>>3) & sensors.RFM_LNA_GAIN_MAX
return lna_impedance, set_gain, current_gain, nil
}
}
// Write LNA settings
func (this *rfm69) setRegLNA(impedance sensors.RFMLNAImpedance, gain sensors.RFMLNAGain) error {
value :=
uint8(impedance&sensors.RFM_LNA_IMPEDANCE_MAX)<<7 |
uint8(gain&sensors.RFM_LNA_GAIN_MAX)
return this.writereg_uint8(RFM_REG_LNA, value)
}
////////////////////////////////////////////////////////////////////////////////
// RFM_REG_RXBW
func (this *rfm69) getRegRXBW() (sensors.RFMRXBWFrequency, sensors.RFMRXBWCutoff, error) {
if value, err := this.readreg_uint8(RFM_REG_RXBW); err != nil {
return 0, 0, err
} else {
cutoff := sensors.RFMRXBWCutoff(value>>5) & sensors.RFM_RXBW_CUTOFF_MAX
frequency := sensors.RFMRXBWFrequency(value) & sensors.RFM_RXBW_FREQUENCY_MAX
return frequency, cutoff, nil
}
}
func (this *rfm69) setRegRXBW(frequency sensors.RFMRXBWFrequency, cutoff sensors.RFMRXBWCutoff) error {
value :=
uint8(frequency&sensors.RFM_RXBW_FREQUENCY_MAX) | uint8(cutoff&sensors.RFM_RXBW_CUTOFF_MAX)<<5
return this.writereg_uint8(RFM_REG_RXBW, value)
}
|
package types
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
type GitRepository struct {
ApiVersion string
Kind string
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec GitRepositorySpec `json:"spec,omitempty"`
}
type GitRepositorySpec struct {
URL string `json:"url"`
Auth *RepositoryAuth `json:"auth,omitempty"`
}
type RepositoryAuth struct {
Type string `json:"type"`
SecretName string `json:"secretName"`
}
|
package shbp
import (
"fmt"
"../../util"
algos "../analysis"
"../report"
"../traceReplay"
)
type ListenerAsyncSnd struct{}
type ListenerAsyncRcv struct{}
type ListenerSync struct{}
type ListenerDataAccessSHB struct{}
type ListenerDataAccessHB struct{}
type ListenerDataAccessSHBNOLS struct{}
type ListenerDataAccessHBNOLS struct{}
type ListenerDataAccessHBNOP struct{}
type ListenerDataAccessSHBSingle struct{}
type ListenerDataAccessSHBWrites struct{}
type ListenerDataAccessSHBDefault struct{}
type ListenerDataAccessSHBNoPartner struct{}
type ListenerGoFork struct{}
type ListenerGoWait struct{}
type ListenerNT struct{}
type ListenerNTWT struct{}
type ListenerPostProcess struct{}
type EventCollector struct {
listeners []traceReplay.EventListener
}
var statistics = true
var doPostProcess = true
func Init() {
threads = make(map[uint32]thread)
locks = make(map[uint32]lock)
signalList = make(map[uint32]signal)
variables = make(map[uint32]variable)
volatiles = make(map[uint32]vcepoch)
notifies = make(map[uint32]vcepoch)
// listeners1 := []traceReplay.EventListener{
// &ListenerAsyncSnd{},
// &ListenerAsyncRcv{},
// &ListenerSync{},
// &ListenerDataAccessSHB{},
// &ListenerGoFork{},
// &ListenerGoWait{},
// &ListenerNT{},
// &ListenerNTWT{},
// &ListenerPostProcess{},
// }
// algos.RegisterDetector("shbp", &EventCollector{listeners1})
// algos.RegisterDetector("shbpNOLS", &EventCollector{[]traceReplay.EventListener{
// &ListenerAsyncSnd{},
// &ListenerAsyncRcv{},
// &ListenerSync{},
// &ListenerDataAccessSHBNOLS{},
// &ListenerGoFork{},
// &ListenerGoWait{},
// &ListenerNT{},
// &ListenerNTWT{},
// &ListenerPostProcess{},
// }})
// //algos.RegisterDetector("mfshbee", &EventCollector{listeners1})
// listeners2 := []traceReplay.EventListener{
// &ListenerAsyncSnd{},
// &ListenerAsyncRcv{},
// &ListenerSync{},
// &ListenerDataAccessHB{},
// &ListenerGoFork{},
// &ListenerGoWait{},
// &ListenerNT{},
// &ListenerNTWT{},
// &ListenerPostProcess{},
// }
// algos.RegisterDetector("hbp", &EventCollector{listeners2})
// algos.RegisterDetector("hbpNOLS", &EventCollector{[]traceReplay.EventListener{
// &ListenerAsyncSnd{},
// &ListenerAsyncRcv{},
// &ListenerSync{},
// &ListenerDataAccessHBNOLS{},
// &ListenerGoFork{},
// &ListenerGoWait{},
// &ListenerNT{},
// &ListenerNTWT{},
// &ListenerPostProcess{},
// }})
listenershbdef := []traceReplay.EventListener{
&ListenerAsyncSnd{},
&ListenerAsyncRcv{},
&ListenerSync{},
&ListenerDataAccessHBNOP{},
&ListenerGoFork{},
&ListenerGoWait{},
&ListenerNT{},
&ListenerNTWT{},
&ListenerPostProcess{},
}
algos.RegisterDetector("fasttrack", &EventCollector{listenershbdef})
// listeners3 := []traceReplay.EventListener{
// &ListenerAsyncSnd{},
// &ListenerAsyncRcv{},
// &ListenerSync{},
// &ListenerDataAccessSHBSingle{},
// &ListenerGoFork{},
// &ListenerGoWait{},
// &ListenerNT{},
// &ListenerNTWT{},
// &ListenerPostProcess{},
// }
// algos.RegisterDetector("shbpSingle", &EventCollector{listeners3})
// listeners4 := []traceReplay.EventListener{
// &ListenerAsyncSnd{},
// &ListenerAsyncRcv{},
// &ListenerSync{},
// &ListenerDataAccessSHBWrites{},
// &ListenerGoFork{},
// &ListenerGoWait{},
// &ListenerNT{},
// &ListenerNTWT{},
// &ListenerPostProcess{},
// }
// algos.RegisterDetector("shbpWrites", &EventCollector{listeners4})
listeners5 := []traceReplay.EventListener{
&ListenerAsyncSnd{},
&ListenerAsyncRcv{},
&ListenerSync{},
&ListenerDataAccessSHBNoPartner{},
&ListenerGoFork{},
&ListenerGoWait{},
&ListenerNT{},
&ListenerNTWT{},
&ListenerPostProcess{},
}
algos.RegisterDetector("shb", &EventCollector{listeners5})
}
var threads map[uint32]thread
var locks map[uint32]lock
var signalList map[uint32]signal
var variables map[uint32]variable
var volatiles map[uint32]vcepoch
var notifies map[uint32]vcepoch
type thread struct {
vc vcepoch
curr *node
ls map[uint32]struct{}
}
func newThread(tid uint32) thread {
return thread{newvc2().set(tid, 1), nil, make(map[uint32]struct{})}
}
type lock struct {
vc vcepoch
curr *node
}
type signal struct {
vc vcepoch
}
type variable struct {
lastWrite vcepoch
lwEv *util.Item
rvc vcepoch
wvc vcepoch
lastEv *util.Item
hasRace bool
writes []*node
reads []*node
races []race
lwNode *node
}
var nodes []*node
type race struct {
acc1 *node
acc2 *node
}
type variableHistory struct {
ev *util.Item
clock vcepoch
}
type node struct {
ls map[uint32]struct{}
ev *util.Item
clock vcepoch
}
func newVar() variable {
return variable{newvc2(), nil, newEpoch(0, 0), newEpoch(0, 0),
nil, false, make([]*node, 0), make([]*node, 0), make([]race, 0), nil}
}
func (l *EventCollector) Put(p *util.SyncPair) {
// syncPairTrace = append(syncPairTrace, p)
for _, l := range l.listeners {
l.Put(p)
}
}
var uniqueRaceFilter = make(map[string]map[string]struct{})
func isUnique(r race) bool {
s1 := fmt.Sprintf("f:%vl%v", r.acc1.ev.Ops[0].SourceRef, r.acc1.ev.Ops[0].Line)
s2 := fmt.Sprintf("f:%vl%v", r.acc2.ev.Ops[0].SourceRef, r.acc2.ev.Ops[0].Line)
s1map := uniqueRaceFilter[s1]
if s1map == nil {
s1map = make(map[string]struct{})
}
if _, ok := s1map[s2]; !ok {
s1map[s2] = struct{}{}
s2map := uniqueRaceFilter[s2]
if s2map == nil {
s2map = make(map[string]struct{})
}
s2map[s1] = struct{}{}
uniqueRaceFilter[s1] = s1map
uniqueRaceFilter[s2] = s2map
return true
}
return false
}
func intersect(a, b map[uint32]struct{}) bool {
for k := range a {
if _, ok := b[k]; ok {
return true
}
}
return false
}
func (l *ListenerDataAccessHB) Put(p *util.SyncPair) {
if !p.DataAccess {
return
}
t1, ok := threads[p.T1]
if !ok {
t1 = newThread(p.T1)
}
varstate, ok := variables[p.T2]
if !ok {
varstate = newVar()
}
newNode := &node{ev: p.Ev, clock: t1.vc.clone(), ls: make(map[uint32]struct{})}
for k := range t1.ls {
newNode.ls[k] = struct{}{}
}
if p.Write {
newWrites := make([]*node, 0)
//if !varstate.wvc.leq(t1.vc) {
//concurrent writes exist
for i, w := range varstate.writes {
k := w.clock.get(w.ev.Thread)
curr := t1.vc.get(w.ev.Thread)
if k > curr {
newWrites = append(newWrites, varstate.writes[i])
r := race{varstate.writes[i], newNode}
//if isUnique(r) {
if !intersect(w.ls, newNode.ls) {
report.ReportRace(
report.Location{File: r.acc1.ev.Ops[0].SourceRef, Line: r.acc1.ev.Ops[0].Line, W: r.acc1.ev.Ops[0].Kind&util.WRITE > 0},
report.Location{File: r.acc2.ev.Ops[0].SourceRef, Line: r.acc2.ev.Ops[0].Line, W: r.acc2.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
}
}
}
//}
newWrites = append(newWrites, newNode)
varstate.wvc = varstate.wvc.set(p.T1, t1.vc.get(p.T1))
// if !varstate.rvc.leq(t1.vc) {
//concurrent reads exist
for i, r := range varstate.reads {
k := r.clock.get(r.ev.Thread)
curr := t1.vc.get(r.ev.Thread)
if k > curr {
//store race
q := race{varstate.reads[i], newNode}
if !intersect(r.ls, newNode.ls) {
report.ReportRace(
report.Location{File: q.acc1.ev.Ops[0].SourceRef, Line: q.acc1.ev.Ops[0].Line, W: q.acc1.ev.Ops[0].Kind&util.WRITE > 0},
report.Location{File: q.acc2.ev.Ops[0].SourceRef, Line: q.acc2.ev.Ops[0].Line, W: q.acc2.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
}
}
}
//}
varstate.writes = newWrites
t1.vc = t1.vc.add(p.T1, 1)
} else if p.Read {
//find concurrent writes, add wrd and store the detected race
// if !varstate.wvc.leq(t1.vc) {
for i, w := range varstate.writes {
k := w.clock.get(w.ev.Thread)
curr := t1.vc.get(w.ev.Thread)
if k > curr {
r := race{varstate.writes[i], newNode}
//if isUnique(r) {
if !intersect(w.ls, newNode.ls) {
report.ReportRace(
report.Location{File: r.acc1.ev.Ops[0].SourceRef, Line: r.acc1.ev.Ops[0].Line, W: r.acc1.ev.Ops[0].Kind&util.WRITE > 0},
report.Location{File: r.acc2.ev.Ops[0].SourceRef, Line: r.acc2.ev.Ops[0].Line, W: r.acc2.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
}
// }
}
}
//}
newReads := make([]*node, 0, len(varstate.reads))
//if !varstate.rvc.leq(t1.vc) {
for i, r := range varstate.reads {
k := r.clock.get(r.ev.Thread)
curr := t1.vc.get(r.ev.Thread)
if k > curr {
newReads = append(newReads, varstate.reads[i])
}
}
//}
newReads = append(newReads, newNode)
varstate.rvc = varstate.rvc.set(p.T1, t1.vc.get(p.T1))
varstate.reads = newReads
t1.vc = t1.vc.add(p.T1, 1)
} else { //volatile synchronize
vol, ok := volatiles[p.T2]
if !ok {
vol = newvc2()
}
t1.vc = t1.vc.ssync(vol)
vol = t1.vc.clone()
volatiles[p.T2] = vol
}
varstate.lastEv = p.Ev
variables[p.T2] = varstate
threads[p.T1] = t1
}
func (l *ListenerDataAccessHBNOLS) Put(p *util.SyncPair) {
if !p.DataAccess {
return
}
t1, ok := threads[p.T1]
if !ok {
t1 = newThread(p.T1)
}
varstate, ok := variables[p.T2]
if !ok {
varstate = newVar()
}
newNode := &node{ev: p.Ev, clock: t1.vc.clone()}
if p.Write {
newWrites := make([]*node, 0)
//if !varstate.wvc.leq(t1.vc) {
//concurrent writes exist
for i, w := range varstate.writes {
k := w.clock.get(w.ev.Thread)
curr := t1.vc.get(w.ev.Thread)
if k > curr {
newWrites = append(newWrites, varstate.writes[i])
r := race{varstate.writes[i], newNode}
report.ReportRace(
report.Location{File: r.acc1.ev.Ops[0].SourceRef, Line: r.acc1.ev.Ops[0].Line, W: r.acc1.ev.Ops[0].Kind&util.WRITE > 0},
report.Location{File: r.acc2.ev.Ops[0].SourceRef, Line: r.acc2.ev.Ops[0].Line, W: r.acc2.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
}
}
//}
newWrites = append(newWrites, newNode)
varstate.wvc = varstate.wvc.set(p.T1, t1.vc.get(p.T1))
// if !varstate.rvc.leq(t1.vc) {
//concurrent reads exist
for i, r := range varstate.reads {
k := r.clock.get(r.ev.Thread)
curr := t1.vc.get(r.ev.Thread)
if k > curr {
//store race
q := race{varstate.reads[i], newNode}
report.ReportRace(
report.Location{File: q.acc1.ev.Ops[0].SourceRef, Line: q.acc1.ev.Ops[0].Line, W: q.acc1.ev.Ops[0].Kind&util.WRITE > 0},
report.Location{File: q.acc2.ev.Ops[0].SourceRef, Line: q.acc2.ev.Ops[0].Line, W: q.acc2.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
}
}
//}
varstate.writes = newWrites
t1.vc = t1.vc.add(p.T1, 1)
} else if p.Read {
//find concurrent writes, add wrd and store the detected race
// if !varstate.wvc.leq(t1.vc) {
for i, w := range varstate.writes {
k := w.clock.get(w.ev.Thread)
curr := t1.vc.get(w.ev.Thread)
if k > curr {
r := race{varstate.writes[i], newNode}
report.ReportRace(
report.Location{File: r.acc1.ev.Ops[0].SourceRef, Line: r.acc1.ev.Ops[0].Line, W: r.acc1.ev.Ops[0].Kind&util.WRITE > 0},
report.Location{File: r.acc2.ev.Ops[0].SourceRef, Line: r.acc2.ev.Ops[0].Line, W: r.acc2.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
// }
}
}
//}
newReads := make([]*node, 0, len(varstate.reads))
//if !varstate.rvc.leq(t1.vc) {
for i, r := range varstate.reads {
k := r.clock.get(r.ev.Thread)
curr := t1.vc.get(r.ev.Thread)
if k > curr {
newReads = append(newReads, varstate.reads[i])
}
}
//}
newReads = append(newReads, newNode)
varstate.rvc = varstate.rvc.set(p.T1, t1.vc.get(p.T1))
varstate.reads = newReads
t1.vc = t1.vc.add(p.T1, 1)
} else { //volatile synchronize
vol, ok := volatiles[p.T2]
if !ok {
vol = newvc2()
}
t1.vc = t1.vc.ssync(vol)
vol = t1.vc.clone()
volatiles[p.T2] = vol
}
varstate.lastEv = p.Ev
variables[p.T2] = varstate
threads[p.T1] = t1
}
func (l *ListenerDataAccessHBNOP) Put(p *util.SyncPair) {
if !p.DataAccess {
return
}
t1, ok := threads[p.T1]
if !ok {
t1 = newThread(p.T1)
}
varstate, ok := variables[p.T2]
if !ok {
varstate = newVar()
}
newNode := &node{ev: p.Ev, clock: t1.vc.clone(), ls: make(map[uint32]struct{})}
for k := range t1.ls {
newNode.ls[k] = struct{}{}
}
if p.Write {
newWrites := make([]*node, 0)
//if !varstate.wvc.leq(t1.vc) {
//concurrent writes exist
for i, w := range varstate.writes {
k := w.clock.get(w.ev.Thread)
curr := t1.vc.get(w.ev.Thread)
if k > curr {
newWrites = append(newWrites, varstate.writes[i])
r := race{varstate.writes[i], newNode}
//if isUnique(r) {
if !intersect(w.ls, newNode.ls) {
report.ReportRace(
report.Location{File: 1, Line: 1, W: true},
report.Location{File: r.acc2.ev.Ops[0].SourceRef, Line: r.acc2.ev.Ops[0].Line, W: r.acc2.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
}
}
}
//}
newWrites = append(newWrites, newNode)
varstate.wvc = varstate.wvc.set(p.T1, t1.vc.get(p.T1))
// if !varstate.rvc.leq(t1.vc) {
//concurrent reads exist
for i, r := range varstate.reads {
k := r.clock.get(r.ev.Thread)
curr := t1.vc.get(r.ev.Thread)
if k > curr {
//store race
q := race{varstate.reads[i], newNode}
if !intersect(r.ls, newNode.ls) {
report.ReportRace(
report.Location{File: 1, Line: 1, W: false},
report.Location{File: q.acc2.ev.Ops[0].SourceRef, Line: q.acc2.ev.Ops[0].Line, W: q.acc2.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
}
}
}
//}
varstate.writes = newWrites
t1.vc = t1.vc.add(p.T1, 1)
} else if p.Read {
//find concurrent writes, add wrd and store the detected race
// if !varstate.wvc.leq(t1.vc) {
for i, w := range varstate.writes {
k := w.clock.get(w.ev.Thread)
curr := t1.vc.get(w.ev.Thread)
if k > curr {
r := race{varstate.writes[i], newNode}
//if isUnique(r) {
if !intersect(w.ls, newNode.ls) {
report.ReportRace(
report.Location{File: 1, Line: 1, W: true},
report.Location{File: r.acc2.ev.Ops[0].SourceRef, Line: r.acc2.ev.Ops[0].Line, W: r.acc2.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
}
// }
}
}
//}
newReads := make([]*node, 0, len(varstate.reads))
//if !varstate.rvc.leq(t1.vc) {
for i, r := range varstate.reads {
k := r.clock.get(r.ev.Thread)
curr := t1.vc.get(r.ev.Thread)
if k > curr {
newReads = append(newReads, varstate.reads[i])
}
}
//}
newReads = append(newReads, newNode)
varstate.rvc = varstate.rvc.set(p.T1, t1.vc.get(p.T1))
varstate.reads = newReads
t1.vc = t1.vc.add(p.T1, 1)
} else { //volatile synchronize
vol, ok := volatiles[p.T2]
if !ok {
vol = newvc2()
}
t1.vc = t1.vc.ssync(vol)
vol = t1.vc.clone()
volatiles[p.T2] = vol
}
varstate.lastEv = p.Ev
variables[p.T2] = varstate
threads[p.T1] = t1
}
var countFP = 0
var shbCountMap = make(map[string]struct{})
func (l *ListenerDataAccessSHB) Put(p *util.SyncPair) {
if !p.DataAccess {
return
}
t1, ok := threads[p.T1]
if !ok {
t1 = newThread(p.T1)
}
varstate, ok := variables[p.T2]
if !ok {
varstate = newVar()
}
newNode := &node{ev: p.Ev, clock: t1.vc.clone(), ls: make(map[uint32]struct{})}
for k := range t1.ls {
newNode.ls[k] = struct{}{}
}
if p.Write {
varstate.lwNode = newNode
newWrites := make([]*node, 0)
// if !varstate.wvc.leq(t1.vc) {
//concurrent writes exist
for i, w := range varstate.writes {
k := w.clock.get(w.ev.Thread)
curr := t1.vc.get(w.ev.Thread)
if k > curr {
newWrites = append(newWrites, varstate.writes[i])
r := race{varstate.writes[i], newNode}
if !intersect(w.ls, newNode.ls) {
report.ReportRace(
report.Location{File: r.acc1.ev.Ops[0].SourceRef, Line: r.acc1.ev.Ops[0].Line, W: r.acc1.ev.Ops[0].Kind&util.WRITE > 0},
report.Location{File: r.acc2.ev.Ops[0].SourceRef, Line: r.acc2.ev.Ops[0].Line, W: r.acc2.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
}
// if b {
// b = intersect(w.ls, newNode.ls)
// if b {
// countFP++
// } else {
// s := fmt.Sprintf("%v:%v", newNode.ev.Ops[0].SourceRef, newNode.ev.Ops[0].Line)
// shbCountMap[s] = struct{}{}
// }
// fmt.Println(">>>", b, countFP)
// }
}
}
// }
newWrites = append(newWrites, newNode)
//varstate.wvc = varstate.wvc.set(p.T1, t1.vc.get(p.T1))
// if !varstate.rvc.leq(t1.vc) {
//concurrent reads exist
for i, r := range varstate.reads {
k := r.clock.get(r.ev.Thread)
curr := t1.vc.get(r.ev.Thread)
if k > curr {
//store race
q := race{varstate.reads[i], newNode}
if !intersect(r.ls, newNode.ls) {
report.ReportRace(
report.Location{File: q.acc1.ev.Ops[0].SourceRef, Line: q.acc1.ev.Ops[0].Line, W: q.acc1.ev.Ops[0].Kind&util.WRITE > 0},
report.Location{File: q.acc2.ev.Ops[0].SourceRef, Line: q.acc2.ev.Ops[0].Line, W: q.acc2.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
}
// b := report.ReportRace(
// report.Location{File: q.acc1.ev.Ops[0].SourceRef, Line: q.acc1.ev.Ops[0].Line, W: q.acc1.ev.Ops[0].Kind&util.WRITE > 0},
// report.Location{File: q.acc2.ev.Ops[0].SourceRef, Line: q.acc2.ev.Ops[0].Line, W: q.acc2.ev.Ops[0].Kind&util.WRITE > 0},
// false, 0)
// if b {
// b = intersect(r.ls, newNode.ls)
// if b {
// countFP++
// } else {
// s := fmt.Sprintf("%v:%v", newNode.ev.Ops[0].SourceRef, newNode.ev.Ops[0].Line)
// shbCountMap[s] = struct{}{}
// }
// fmt.Println(">>>", b, countFP)
// }
}
}
// }
varstate.writes = newWrites
t1.vc = t1.vc.add(p.T1, 1)
} else if p.Read {
//find concurrent writes, add wrd and store the detected race
// if !varstate.wvc.leq(t1.vc) {
for i, w := range varstate.writes {
k := w.clock.get(w.ev.Thread)
curr := t1.vc.get(w.ev.Thread)
if k > curr {
r := race{varstate.writes[i], newNode}
// if isUnique(r) {
if !intersect(w.ls, newNode.ls) {
report.ReportRace(
report.Location{File: r.acc1.ev.Ops[0].SourceRef, Line: r.acc1.ev.Ops[0].Line, W: r.acc1.ev.Ops[0].Kind&util.WRITE > 0},
report.Location{File: r.acc2.ev.Ops[0].SourceRef, Line: r.acc2.ev.Ops[0].Line, W: r.acc2.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
}
// b := report.ReportRace(
// report.Location{File: r.acc1.ev.Ops[0].SourceRef, Line: r.acc1.ev.Ops[0].Line, W: r.acc1.ev.Ops[0].Kind&util.WRITE > 0},
// report.Location{File: r.acc2.ev.Ops[0].SourceRef, Line: r.acc2.ev.Ops[0].Line, W: r.acc2.ev.Ops[0].Kind&util.WRITE > 0},
// false, 0)
// if b {
// b = intersect(w.ls, newNode.ls)
// if b {
// countFP++
// } else {
// s := fmt.Sprintf("%v:%v", newNode.ev.Ops[0].SourceRef, newNode.ev.Ops[0].Line)
// shbCountMap[s] = struct{}{}
// }
// fmt.Println(">>>", b, countFP)
// }
// }
}
}
// }
if varstate.lwNode != nil {
t1.vc = t1.vc.ssync(varstate.lwNode.clock)
newNode.clock = t1.vc.clone()
}
newReads := make([]*node, 0, len(varstate.reads))
// if !varstate.rvc.leq(t1.vc) {
for i, r := range varstate.reads {
k := r.clock.get(r.ev.Thread)
curr := t1.vc.get(r.ev.Thread)
if k > curr {
newReads = append(newReads, varstate.reads[i])
}
}
// }
newReads = append(newReads, newNode)
//varstate.rvc = varstate.rvc.set(p.T1, t1.vc.get(p.T1))
varstate.reads = newReads
t1.vc = t1.vc.add(p.T1, 1)
} else { //volatile synchronize
vol, ok := volatiles[p.T2]
if !ok {
vol = newvc2()
}
t1.vc = t1.vc.ssync(vol)
vol = t1.vc.clone()
volatiles[p.T2] = vol
}
varstate.lastEv = p.Ev
variables[p.T2] = varstate
threads[p.T1] = t1
}
func (l *ListenerDataAccessSHBNOLS) Put(p *util.SyncPair) {
if !p.DataAccess {
return
}
t1, ok := threads[p.T1]
if !ok {
t1 = newThread(p.T1)
}
varstate, ok := variables[p.T2]
if !ok {
varstate = newVar()
}
newNode := &node{ev: p.Ev, clock: t1.vc.clone(), ls: make(map[uint32]struct{})}
for k := range t1.ls {
newNode.ls[k] = struct{}{}
}
if p.Write {
varstate.lwNode = newNode
newWrites := make([]*node, 0)
// if !varstate.wvc.leq(t1.vc) {
//concurrent writes exist
for i, w := range varstate.writes {
k := w.clock.get(w.ev.Thread)
curr := t1.vc.get(w.ev.Thread)
if k > curr {
newWrites = append(newWrites, varstate.writes[i])
r := race{varstate.writes[i], newNode}
report.ReportRace(
report.Location{File: r.acc1.ev.Ops[0].SourceRef, Line: r.acc1.ev.Ops[0].Line, W: r.acc1.ev.Ops[0].Kind&util.WRITE > 0},
report.Location{File: r.acc2.ev.Ops[0].SourceRef, Line: r.acc2.ev.Ops[0].Line, W: r.acc2.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
}
}
// }
newWrites = append(newWrites, newNode)
//varstate.wvc = varstate.wvc.set(p.T1, t1.vc.get(p.T1))
// if !varstate.rvc.leq(t1.vc) {
//concurrent reads exist
for i, r := range varstate.reads {
k := r.clock.get(r.ev.Thread)
curr := t1.vc.get(r.ev.Thread)
if k > curr {
//store race
q := race{varstate.reads[i], newNode}
report.ReportRace(
report.Location{File: q.acc1.ev.Ops[0].SourceRef, Line: q.acc1.ev.Ops[0].Line, W: q.acc1.ev.Ops[0].Kind&util.WRITE > 0},
report.Location{File: q.acc2.ev.Ops[0].SourceRef, Line: q.acc2.ev.Ops[0].Line, W: q.acc2.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
}
}
// }
varstate.writes = newWrites
t1.vc = t1.vc.add(p.T1, 1)
} else if p.Read {
//find concurrent writes, add wrd and store the detected race
// if !varstate.wvc.leq(t1.vc) {
for i, w := range varstate.writes {
k := w.clock.get(w.ev.Thread)
curr := t1.vc.get(w.ev.Thread)
if k > curr {
r := race{varstate.writes[i], newNode}
// if isUnique(r) {
report.ReportRace(
report.Location{File: r.acc1.ev.Ops[0].SourceRef, Line: r.acc1.ev.Ops[0].Line, W: r.acc1.ev.Ops[0].Kind&util.WRITE > 0},
report.Location{File: r.acc2.ev.Ops[0].SourceRef, Line: r.acc2.ev.Ops[0].Line, W: r.acc2.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
}
}
// }
if varstate.lwNode != nil {
t1.vc = t1.vc.ssync(varstate.lwNode.clock)
newNode.clock = t1.vc.clone()
}
newReads := make([]*node, 0, len(varstate.reads))
// if !varstate.rvc.leq(t1.vc) {
for i, r := range varstate.reads {
k := r.clock.get(r.ev.Thread)
curr := t1.vc.get(r.ev.Thread)
if k > curr {
newReads = append(newReads, varstate.reads[i])
}
}
// }
newReads = append(newReads, newNode)
//varstate.rvc = varstate.rvc.set(p.T1, t1.vc.get(p.T1))
varstate.reads = newReads
t1.vc = t1.vc.add(p.T1, 1)
} else { //volatile synchronize
vol, ok := volatiles[p.T2]
if !ok {
vol = newvc2()
}
t1.vc = t1.vc.ssync(vol)
vol = t1.vc.clone()
volatiles[p.T2] = vol
}
varstate.lastEv = p.Ev
variables[p.T2] = varstate
threads[p.T1] = t1
}
func (l *ListenerDataAccessSHBSingle) Put(p *util.SyncPair) {
if !p.DataAccess {
return
}
t1, ok := threads[p.T1]
if !ok {
t1 = newThread(p.T1)
}
varstate, ok := variables[p.T2]
if !ok {
varstate = newVar()
}
if !p.Write && !p.Read {
//atomic op
vol, ok := volatiles[p.T2]
if !ok {
vol = newvc2()
}
t1.vc = t1.vc.ssync(vol)
vol = t1.vc.clone()
volatiles[p.T2] = vol
t1.vc = t1.vc.add(p.T1, 1)
threads[p.T1] = t1
return
}
newNode := &node{ev: p.Ev, clock: t1.vc.clone(), ls: make(map[uint32]struct{})}
for k := range t1.ls {
newNode.ls[k] = struct{}{}
}
newFrontier := make([]*node, 0, len(varstate.writes))
for i, w := range varstate.writes {
k := w.clock.get(w.ev.Thread)
curr := t1.vc.get(w.ev.Thread)
if k > curr {
newFrontier = append(newFrontier, varstate.writes[i])
if w.ev.Ops[0].Kind&util.WRITE > 0 || newNode.ev.Ops[0].Kind&util.WRITE > 0 {
r := race{varstate.writes[i], newNode}
b := report.ReportRace(
report.Location{File: r.acc1.ev.Ops[0].SourceRef, Line: r.acc1.ev.Ops[0].Line, W: r.acc1.ev.Ops[0].Kind&util.WRITE > 0},
report.Location{File: r.acc2.ev.Ops[0].SourceRef, Line: r.acc2.ev.Ops[0].Line, W: r.acc2.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
if b {
b = intersect(w.ls, newNode.ls)
if b {
countFP++
}
fmt.Println(">>>", b, countFP)
}
}
}
}
if p.Write {
varstate.lwNode = newNode
} else if p.Read {
if varstate.lwNode != nil {
t1.vc = t1.vc.ssync(varstate.lwNode.clock)
newNode.clock = t1.vc.clone()
}
}
newFrontier = append(newFrontier, newNode)
varstate.writes = newFrontier
t1.vc = t1.vc.add(p.T1, 1)
variables[p.T2] = varstate
threads[p.T1] = t1
}
func (l *ListenerDataAccessSHBWrites) Put(p *util.SyncPair) {
if !p.DataAccess {
return
}
t1, ok := threads[p.T1]
if !ok {
t1 = newThread(p.T1)
}
varstate, ok := variables[p.T2]
if !ok {
varstate = newVar()
}
if !p.Write && !p.Read {
//atomic op
vol, ok := volatiles[p.T2]
if !ok {
vol = newvc2()
}
t1.vc = t1.vc.ssync(vol)
vol = t1.vc.clone()
volatiles[p.T2] = vol
t1.vc = t1.vc.add(p.T1, 1)
threads[p.T1] = t1
return
}
newNode := &node{ev: p.Ev, clock: t1.vc.clone(), ls: make(map[uint32]struct{})}
for k := range t1.ls {
newNode.ls[k] = struct{}{}
}
newFrontier := make([]*node, 0, len(varstate.writes))
for i, w := range varstate.writes {
k := w.clock.get(w.ev.Thread)
curr := t1.vc.get(w.ev.Thread)
if k > curr {
newFrontier = append(newFrontier, varstate.writes[i])
if w.ev.Ops[0].Kind&util.WRITE > 0 || newNode.ev.Ops[0].Kind&util.WRITE > 0 {
r := race{varstate.writes[i], newNode}
b := report.ReportRace(
report.Location{File: r.acc1.ev.Ops[0].SourceRef, Line: r.acc1.ev.Ops[0].Line, W: r.acc1.ev.Ops[0].Kind&util.WRITE > 0},
report.Location{File: r.acc2.ev.Ops[0].SourceRef, Line: r.acc2.ev.Ops[0].Line, W: r.acc2.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
if b {
b = intersect(w.ls, newNode.ls)
if b {
countFP++
}
fmt.Println(">>>", b, countFP)
}
}
} else if w.ev.Ops[0].Kind&util.WRITE > 0 && p.Read { //read events do not replace writes!
newFrontier = append(newFrontier, varstate.writes[i])
}
}
if p.Write {
varstate.lwNode = newNode
} else if p.Read {
if varstate.lwNode != nil {
t1.vc = t1.vc.ssync(varstate.lwNode.clock)
newNode.clock = t1.vc.clone()
}
}
newFrontier = append(newFrontier, newNode)
varstate.writes = newFrontier
t1.vc = t1.vc.add(p.T1, 1)
variables[p.T2] = varstate
threads[p.T1] = t1
}
func (l *ListenerDataAccessSHBDefault) Put(p *util.SyncPair) {
if !p.DataAccess {
return
}
t1, ok := threads[p.T1]
if !ok {
t1 = newThread(p.T1)
}
varstate, ok := variables[p.T2]
if !ok {
varstate = newVar()
}
if !p.Write && !p.Read {
//atomic op
vol, ok := volatiles[p.T2]
if !ok {
vol = newvc2()
}
t1.vc = t1.vc.ssync(vol)
vol = t1.vc.clone()
volatiles[p.T2] = vol
t1.vc = t1.vc.add(p.T1, 1)
threads[p.T1] = t1
return
}
if p.Write {
if !varstate.wvc.leq(t1.vc) {
report.ReportRace(report.Location{File: 1, Line: 1, W: true},
report.Location{File: p.Ev.Ops[0].SourceRef, Line: p.Ev.Ops[0].Line, W: true}, false, 0)
}
if !varstate.rvc.leq(t1.vc) {
report.ReportRace(report.Location{File: 1, Line: 1, W: true},
report.Location{File: p.Ev.Ops[0].SourceRef, Line: p.Ev.Ops[0].Line, W: true}, false, 0)
}
varstate.lastWrite = t1.vc.clone()
varstate.wvc = varstate.wvc.set(p.T1, t1.vc.get(p.T1))
} else if p.Read {
if !varstate.wvc.leq(t1.vc) {
report.ReportRace(report.Location{File: 1, Line: 1, W: true},
report.Location{File: p.Ev.Ops[0].SourceRef, Line: p.Ev.Ops[0].Line, W: true}, false, 0)
}
t1.vc = t1.vc.ssync(varstate.lastWrite)
varstate.rvc = varstate.rvc.set(p.T1, t1.vc.get(p.T1))
}
t1.vc = t1.vc.add(p.T1, 1)
variables[p.T2] = varstate
threads[p.T1] = t1
}
func (l *ListenerAsyncSnd) Put(p *util.SyncPair) {
if !p.AsyncSend {
return
}
if !p.Lock {
return
}
lock, ok := locks[p.T2]
if !ok {
lock.vc = newvc2()
}
t1, ok := threads[p.T1]
if !ok {
t1 = newThread(p.T1)
}
t1.ls[p.T2] = struct{}{}
t1.vc = t1.vc.ssync(lock.vc)
threads[p.T1] = t1
}
func (l *ListenerAsyncRcv) Put(p *util.SyncPair) {
if !p.AsyncRcv {
return
}
if !p.Unlock {
return
}
lock, ok := locks[p.T2]
if !ok {
lock.vc = newvc2()
}
t1, ok := threads[p.T1]
if !ok {
t1 = newThread(p.T1)
}
lock.vc = t1.vc.clone()
t1.vc = t1.vc.add(p.T1, 1)
delete(t1.ls, p.T2)
threads[p.T1] = t1
locks[p.T2] = lock
}
func (l *ListenerSync) Put(p *util.SyncPair) {
if !p.Sync {
return
}
t1, ok := threads[p.T1]
if !ok {
t1 = newThread(p.T1)
}
t2, ok := threads[p.T2]
if !ok {
t2 = newThread(p.T2)
}
t1.vc = t1.vc.add(p.T1, 1)
t2.vc = t2.vc.add(p.T2, 1)
t1.vc = t1.vc.ssync(t2.vc)
threads[p.T1] = t1
threads[p.T2] = t2
}
func (l *ListenerGoFork) Put(p *util.SyncPair) {
if !p.IsFork { //used for sig - wait too
return
}
t1, ok := threads[p.T1]
if !ok {
t1 = newThread(p.T1)
}
signalList[p.T2] = signal{t1.vc.clone()}
t1.vc = t1.vc.add(p.T1, 1)
threads[p.T1] = t1
}
func (l *ListenerGoWait) Put(p *util.SyncPair) {
if !p.IsWait {
return
}
vc, ok := signalList[p.T2]
if ok {
t1, ok := threads[p.T1]
if !ok {
t1 = newThread(p.T1)
}
t1.vc = t1.vc.ssync(vc.vc)
t1.vc = t1.vc.add(p.T1, 1)
threads[p.T1] = t1
}
}
func (l *ListenerNT) Put(p *util.SyncPair) {
if !p.IsNT {
return
}
t1, ok := threads[p.T1]
if !ok {
t1 = newThread(p.T1)
}
vc, ok := notifies[p.T2]
if !ok {
vc = newvc2()
}
vc = vc.ssync(t1.vc)
t1.vc = t1.vc.add(p.T1, 1)
notifies[p.T2] = vc
threads[p.T1] = t1
}
func (l *ListenerNTWT) Put(p *util.SyncPair) {
if !p.IsNTWT {
return
}
//post wait event, so notify is already synchronized
if vc, ok := notifies[p.T2]; ok {
t1, ok := threads[p.T1]
if !ok {
t1 = newThread(p.T1)
}
t1.vc = t1.vc.ssync(vc)
t1.vc = t1.vc.add(p.T1, 1)
vc = t1.vc.clone()
threads[p.T1] = t1
notifies[p.T2] = vc
}
}
func (l *ListenerDataAccessSHBNoPartner) Put(p *util.SyncPair) {
if !p.DataAccess {
return
}
t1, ok := threads[p.T1]
if !ok {
t1 = newThread(p.T1)
}
varstate, ok := variables[p.T2]
if !ok {
varstate = newVar()
}
newNode := &node{ev: p.Ev, clock: t1.vc.clone(), ls: make(map[uint32]struct{})}
for k := range t1.ls {
newNode.ls[k] = struct{}{}
}
if p.Write {
varstate.lwNode = newNode
newWrites := make([]*node, 0)
// if !varstate.wvc.leq(t1.vc) {
//concurrent writes exist
for _, w := range varstate.writes {
k := w.clock.get(w.ev.Thread)
curr := t1.vc.get(w.ev.Thread)
if k > curr {
newWrites = append(newWrites, w)
if !intersect(w.ls, newNode.ls) {
report.ReportRace(
report.Location{File: 1, Line: 1, W: true},
report.Location{File: newNode.ev.Ops[0].SourceRef, Line: newNode.ev.Ops[0].Line, W: newNode.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
}
// if b {
// b = intersect(w.ls, newNode.ls)
// if b {
// countFP++
// } else {
// s := fmt.Sprintf("%v:%v", newNode.ev.Ops[0].SourceRef, newNode.ev.Ops[0].Line)
// shbCountMap[s] = struct{}{}
// }
// fmt.Println(">>>", b, countFP)
// }
}
}
// }
newWrites = append(newWrites, newNode)
//varstate.wvc = varstate.wvc.set(p.T1, t1.vc.get(p.T1))
// if !varstate.rvc.leq(t1.vc) {
//concurrent reads exist
for _, r := range varstate.reads {
k := r.clock.get(r.ev.Thread)
curr := t1.vc.get(r.ev.Thread)
if k > curr {
if !intersect(r.ls, newNode.ls) {
report.ReportRace(
report.Location{File: 1, Line: 1, W: true},
report.Location{File: newNode.ev.Ops[0].SourceRef, Line: newNode.ev.Ops[0].Line, W: newNode.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
}
// if b {
// b = intersect(r.ls, newNode.ls)
// if b {
// countFP++
// } else {
// s := fmt.Sprintf("%v:%v", newNode.ev.Ops[0].SourceRef, newNode.ev.Ops[0].Line)
// shbCountMap[s] = struct{}{}
// }
// fmt.Println(">>>", b, countFP)
// }
}
}
// }
varstate.writes = newWrites
t1.vc = t1.vc.add(p.T1, 1)
} else if p.Read {
//find concurrent writes, add wrd and store the detected race
// if !varstate.wvc.leq(t1.vc) {
for _, w := range varstate.writes {
k := w.clock.get(w.ev.Thread)
curr := t1.vc.get(w.ev.Thread)
if k > curr {
// if isUnique(r) {
if !intersect(w.ls, newNode.ls) {
report.ReportRace(
report.Location{File: 1, Line: 1, W: true},
report.Location{File: newNode.ev.Ops[0].SourceRef, Line: newNode.ev.Ops[0].Line, W: newNode.ev.Ops[0].Kind&util.WRITE > 0},
false, 0)
}
// if b {
// b = intersect(w.ls, newNode.ls)
// if b {
// countFP++
// } else {
// s := fmt.Sprintf("%v:%v", newNode.ev.Ops[0].SourceRef, newNode.ev.Ops[0].Line)
// shbCountMap[s] = struct{}{}
// }
// fmt.Println(">>>", b, countFP)
// }
// // }
}
}
// }
if varstate.lwNode != nil {
t1.vc = t1.vc.ssync(varstate.lwNode.clock)
newNode.clock = t1.vc.clone()
}
newReads := make([]*node, 0, len(varstate.reads))
// if !varstate.rvc.leq(t1.vc) {
for i, r := range varstate.reads {
k := r.clock.get(r.ev.Thread)
curr := t1.vc.get(r.ev.Thread)
if k > curr {
newReads = append(newReads, varstate.reads[i])
}
}
// }
newReads = append(newReads, newNode)
//varstate.rvc = varstate.rvc.set(p.T1, t1.vc.get(p.T1))
varstate.reads = newReads
t1.vc = t1.vc.add(p.T1, 1)
} else { //volatile synchronize
vol, ok := volatiles[p.T2]
if !ok {
vol = newvc2()
}
t1.vc = t1.vc.ssync(vol)
vol = t1.vc.clone()
volatiles[p.T2] = vol
}
varstate.lastEv = p.Ev
variables[p.T2] = varstate
threads[p.T1] = t1
}
func (l *ListenerPostProcess) Put(p *util.SyncPair) {
if !p.PostProcess {
return
}
fmt.Println("SHB/FT #races:", len(shbCountMap))
}
|
package domain
import (
"testing"
"github.com/gofrs/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
type UserServiceMock struct {
mock.Mock
}
func (m *UserServiceMock) FindUserByUsername(username string) (UserServiceResult, error) {
args := m.Called(username)
return args.Get(0).(UserServiceResult), nil
}
func TestCreateUser(t *testing.T) {
// Given
userServiceMock := new(UserServiceMock)
userServiceMock.On("FindUserByUsername", "username").Return(UserServiceResult{})
// When
user, err := CreateUser(userServiceMock, "username", "password", "password")
// Then
assert.Nil(t, err)
assert.Equal(t, "username", user.Username)
assert.NotNil(t, user.Password)
// Given
userServiceMock2 := new(UserServiceMock)
userUID, _ := uuid.NewV4()
userServiceMock2.On("FindUserByUsername", "username").Return(UserServiceResult{
UID: userUID,
Username: "username",
})
// When
_, err = CreateUser(userServiceMock2, "username", "password", "password")
// Then
assert.NotNil(t, err)
assert.Equal(t, UserError{UserErrorUsernameExistsCode}, err)
}
func TestChangePassword(t *testing.T) {
// Given
userServiceMock := new(UserServiceMock)
userServiceMock.On("FindUserByUsername", "username").Return(UserServiceResult{})
user, err := CreateUser(userServiceMock, "username", "password", "password")
// When
errPwd := user.ChangePassword("password", "newpassword", "newpassword")
isValid, errValid := user.IsPasswordValid("newpassword")
// Then
assert.Nil(t, err)
assert.Nil(t, errPwd)
assert.Nil(t, errValid)
assert.Equal(t, true, isValid)
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
statuspb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
conreg "github.com/GoogleCloudPlatform/declarative-resource-client-library/connector/server_registration"
connectorpb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/connector_go_proto"
)
const (
// CredentialsMetadataKey contains the user-provided GCP credentials.
CredentialsMetadataKey = "X-Call-Credentials"
// UserAgentMetadataKey is an optional value used to override the default when making GCP API calls.
UserAgentMetadataKey = "User-Agent"
)
// Set by InitializeServer().
var grpcServer *grpc.Server
// BodyWriter implements io.Writer and io.Flusher as required by http.ResponseWriter
type BodyWriter struct {
header http.Header
b bytes.Buffer
code int
}
// Header returns the current http headers for writing.
func (b *BodyWriter) Header() http.Header {
return b.header
}
// Write appends data to the response body.
func (b *BodyWriter) Write(data []byte) (int, error) {
return b.b.Write(data)
}
// WriteHeader writes the http status code.
func (b *BodyWriter) WriteHeader(statusCode int) {
b.code = statusCode
}
// Flush is required by the io.Flusher interface but is unused here.
func (b *BodyWriter) Flush() {}
// InitializeServer prepares the server for future RPC requests. It must be called before
// attempting to response to any requests.
func InitializeServer() *connectorpb.InitializeResponse {
grpcServer = grpc.NewServer()
return conreg.InitializeServer(grpcServer)
}
// UnaryCall directs the gRPC call from the client library to its handler. Requires a prior call to InitializeServer().
func UnaryCall(request *connectorpb.UnaryCallRequest) *connectorpb.UnaryCallResponse {
if grpcServer == nil {
return makeErrorResponse(codes.Unavailable, "call the initialize function first")
}
r, err := makeHTTPRequest(request)
if err != nil {
return makeErrorResponse(codes.Internal, "could not prepare an HTTP request")
}
w := &BodyWriter{
header: make(http.Header),
}
grpcServer.ServeHTTP(w, r)
// While the server sets common HTTP header (e.g., Content-Type) we only care
// about Grpc-Status and Grpc-Message.
grpcStatus := codes.Internal
grpcMessage := "grpc-message field not found"
for h, v := range w.header {
if len(v) == 1 {
if strings.ToLower(h) == "grpc-status" {
i, err := strconv.Atoi(v[0])
if err != nil {
return makeErrorResponse(codes.Internal, "could not parse Grpc-Status header")
}
grpcStatus = codes.Code((i))
}
if strings.ToLower(h) == "grpc-message" {
grpcMessage = v[0]
}
}
}
if grpcStatus != codes.OK {
return makeErrorResponse(grpcStatus, grpcMessage)
}
// Parse the length-prefixed message response body:
// <1 byte compression flag><4 byte big-endian length><serialized response>
lpm := w.b.Bytes()
if len(lpm) < 5 {
return makeErrorResponse(codes.Internal, "missing response body")
}
length := (int(lpm[1]) << 24) | (int(lpm[2]) << 16) | (int(lpm[3]) << 8) | int(lpm[4])
if len(lpm) < 5+length {
return makeErrorResponse(codes.Internal, "truncated response body")
}
return connectorpb.UnaryCallResponse_builder{
Response: lpm[5:],
Status: &statuspb.Status{
Code: int32(codes.OK),
Message: grpcMessage,
},
}.Build()
}
func makeErrorResponse(code codes.Code, message string) *connectorpb.UnaryCallResponse {
return connectorpb.UnaryCallResponse_builder{
Status: &statuspb.Status{
Code: int32(code),
Message: message,
},
}.Build()
}
func makeHTTPRequest(request *connectorpb.UnaryCallRequest) (*http.Request, error) {
// Convert the serialized request proto to a length-prefixed message
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
lpm := []byte{
0, // Not compressed
byte(len(request.GetRequest()) >> 24),
byte(len(request.GetRequest()) >> 16),
byte(len(request.GetRequest()) >> 8),
byte(len(request.GetRequest())),
}
lpm = append(lpm, request.GetRequest()...)
// Wrap the request body in a suitable HTTP request
u, err := url.Parse("https://localhost" + request.GetMethod())
if err != nil {
return nil, err
}
return &http.Request{
Method: "POST",
RequestURI: request.GetMethod(),
URL: u,
Proto: "HTTP/2",
ProtoMajor: 2,
ProtoMinor: 0,
Header: http.Header{
"Content-Type": {"application/grpc+proto"},
CredentialsMetadataKey: {request.GetCredentials()},
UserAgentMetadataKey: {request.GetUserAgent()},
},
Trailer: make(http.Header),
ContentLength: -1,
Body: ioutil.NopCloser(bytes.NewReader(lpm)),
}, nil
}
|
package knative
import (
"strings"
"knative.dev/serving/pkg/apis/networking/v1alpha1"
)
// Somehow envoy doesn't match properly gRPC authorities with ports.
// The fix is to include ":*" in the domains.
// This applies both for internal and external domains.
// More info https://github.com/envoyproxy/envoy/issues/886
func ExternalDomains(rule v1alpha1.IngressRule, localDomainName string) []string {
var res []string
for _, host := range rule.Hosts {
if !strings.Contains(host, localDomainName) {
res = append(res, host, host+":*")
}
}
return res
}
// InternalDomains returns domains with the following formats:
// - sub-route_host
// - sub-route_host.namespace
// - sub-route_host.namespace.svc
// - Each of the previous ones with ":*" appended
func InternalDomains(rule v1alpha1.IngressRule, localDomainName string) []string {
var res []string
for _, host := range rule.Hosts {
if strings.Contains(host, localDomainName) {
res = append(res, host, host+":*")
splits := strings.Split(host, ".")
domain := splits[0] + "." + splits[1]
res = append(res, domain, domain+":*")
domain = splits[0] + "." + splits[1] + ".svc"
res = append(res, domain, domain+":*")
}
}
return res
}
func RuleIsExternal(rule v1alpha1.IngressRule, ingressVisibility v1alpha1.IngressVisibility) bool {
switch rule.Visibility {
case v1alpha1.IngressVisibilityExternalIP:
return true
case v1alpha1.IngressVisibilityClusterLocal:
return false
default:
// If the rule does not have a visibility set, use the one at the ingress level
// If there is not anything set, Knative defaults to "external"
return ingressVisibility != v1alpha1.IngressVisibilityClusterLocal
}
}
|
package main
import "fmt"
func main() {
var test int
test = 1
fmt.Println(test)
test = 2
fmt.Println(test)
ExampleLol := "Chvfefe"
// fmt.Println(ExampleLol)
/*In line 11, it replaces `var YourName string` to something like `x := "y"`
And since := assigns, for example in line 11, ExampleLol as a "Chvfefe",
don't have to re-type :=, since the string has been already assigned.
If we want to change it's value, we simply type =. For example:
Oh, and don't forget to print it again, like in line 20. I forgot it already.
Edit 2: And remove line 12, so it doesn't print the value two times.*/
ExampleLol = "Chvfefe 2.0"
fmt.Println(ExampleLol)
/* Give it a run on cmd (go run variables.go), and see it for yourself!
lol :)
Another reminder, since ExampleLol has been declared as a string,
if you make any mistakes while typing, the cmd will tell you that
ExampleLol is a stringtype. Because, you declared it as a string,
in line 11. Good to know, lol!
Short reminder to myself:
Always change the variable itself, like ExampleLol2, and give it a new string,
if you simply want a new string, with a new variable.
Only use line 7, 9 or 20 as an example, if you want to change the variable value.
It can be confusing at times, but it's very basic.
Now, if we want to declare the variable from line 9 and line 20, here's an example:
*/
ExampleLol, test = "Chvfefe is always", 100
fmt.Println(ExampleLol, test)
NewVariableHaha := "Amazing new variable wow!"
ExampleLol, NewVariableHaha = NewVariableHaha, ExampleLol
fmt.Println(ExampleLol, NewVariableHaha, test)
/* Lol, I think I get it now from time to time. It's kinda cool!*/
TheFourthExample := "This should comes at the end, wow it's getting confusing"
fmt.Println(test, NewVariableHaha, ExampleLol, TheFourthExample)
VarIntroduction := "So, I basically changed ExampleLol to NewVariable, and NewVariable to ExampleLol."
TheFourthExample = "So, I could basically add more and more variables, but decided that this will be the last text. Oh, I btw just changed the value from this one aswell, gosh I'm so cool. Vote for chvfefe"
fmt.Println(test, ExampleLol, NewVariableHaha, test, VarIntroduction, TheFourthExample)
}
/*Oh, and something to add before closing this tab and forgetting everything,
you need to use everything you type in. If you declare something without using it,
go will try to throw out an error, because they want you to remove it.
It's good and bad, but I think we should take advantatge of that.
Specially because in JavaScript, I was kinda lost.*/
|
package main
import "testing"
func TestP81(t *testing.T) {
cases := []struct {
in string
out int
}{
{"./p081_matrix_small.txt", 2427},
{"./p081_matrix.txt", 427337},
}
for _, c := range cases {
v := solve(c.in)
if v != c.out {
t.Errorf("P81: %v\tExpected: %v", v, c.out)
}
}
}
|
package gosnowth
import (
"bytes"
"encoding/xml"
"io"
"net/http"
"net/url"
"strings"
"testing"
"time"
)
func float64Ptr(f float64) *float64 {
return &f
}
func stringPtr(s string) *string {
return &s
}
type noOpReadCloser struct {
*bytes.Buffer
WasClosed bool
}
func (n *noOpReadCloser) Close() error {
n.WasClosed = true
return nil
}
func TestResolveURL(t *testing.T) {
t.Parallel()
base, _ := url.Parse("http://localhost:1234")
result := resolveURL(base, "/a/resource/path")
exp := "http://localhost:1234/a/resource/path"
if result != exp {
t.Errorf("Expected result: %v, got: %v", exp, result)
}
}
func TestDecodeJSON(t *testing.T) {
t.Parallel()
resp := &http.Response{
Body: &noOpReadCloser{
bytes.NewBufferString(`{
"something": 1,
"something_else": 2
}`),
false,
},
}
v := make(map[string]int)
if err := decodeJSON(resp.Body, &v); err != nil {
t.Error("error encountered from decode function: ", err)
}
if v["something"] != 1 {
t.Error("something should be 1")
}
if v["something_else"] != 2 {
t.Error("something_else should be 2")
}
}
func TestDecodeXML(t *testing.T) {
t.Parallel()
resp := &http.Response{
Body: &noOpReadCloser{
bytes.NewBufferString(`<data><something>1</something><somethingelse>2</somethingelse></data>`),
false,
},
}
type data struct {
XMLName xml.Name `xml:"data"`
Something int `xml:"something"`
SomethingElse int `xml:"somethingelse"`
}
decoded := &data{}
if err := decodeXML(resp.Body, decoded); err != nil {
t.Error("error encountered from decode function: ", err)
}
if decoded.Something != 1 {
t.Error("something should be 1")
}
if decoded.SomethingElse != 2 {
t.Error("something else should be 2")
}
}
func TestEncodeXML(t *testing.T) {
t.Parallel()
type data struct {
XMLName xml.Name `xml:"data"`
Something int `xml:"something"`
SomethingElse int `xml:"somethingelse"`
}
d := &data{
Something: 1,
SomethingElse: 2,
}
reader, err := encodeXML(d)
if err != nil {
t.Error("error encountered encoding: ", err)
}
b, _ := io.ReadAll(reader)
if !strings.Contains(string(b), "somethingelse") {
t.Error("Should contain somethingelse")
}
}
func TestFormatTimestamp(t *testing.T) {
t.Parallel()
tm := time.Unix(123456789, int64(time.Millisecond))
exp := "123456789.001"
res := formatTimestamp(tm)
if res != exp {
t.Errorf("Expected string: %v, got: %v", exp, res)
}
tm = time.Unix(123456789, 0)
exp = "123456789"
res = formatTimestamp(tm)
if res != exp {
t.Errorf("Expected string: %v, got: %v", exp, res)
}
}
func TestParseTimestamp(t *testing.T) {
t.Parallel()
res, err := parseTimestamp("123456789.001")
if err != nil {
t.Fatal(err)
}
exp := time.Unix(123456789, int64(time.Millisecond))
if !res.Equal(exp) {
t.Errorf("Expected time: %v, got: %v", exp, res)
}
}
func TestParseDuration(t *testing.T) {
t.Parallel()
res, err := parseDuration("1")
if err != nil {
t.Fatal(err)
}
if exp := time.Second; res != exp {
t.Errorf("Expected duration: %v, got: %v", exp, res)
}
}
|
package random
import (
"fmt"
"strconv"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/irisnet/irismod/modules/random/keeper"
"github.com/irisnet/irismod/modules/random/types"
)
// InitGenesis stores genesis data
func InitGenesis(ctx sdk.Context, k keeper.Keeper, data types.GenesisState) {
if err := types.ValidateGenesis(data); err != nil {
panic(fmt.Errorf("failed to initialize random genesis state: %s", err.Error()))
}
for height, requests := range data.PendingRandomRequests {
for _, request := range requests.Requests {
h, _ := strconv.ParseInt(height, 10, 64)
k.EnqueueRandomRequest(ctx, h, types.GenerateRequestID(request), request)
}
}
}
// ExportGenesis outputs genesis data
func ExportGenesis(ctx sdk.Context, k keeper.Keeper) *types.GenesisState {
pendingRequests := make(map[string]types.Requests)
k.IterateRandomRequestQueue(ctx, func(height int64, request types.Request) bool {
leftHeight := fmt.Sprintf("%d", height-ctx.BlockHeight()+1)
heightRequests, ok := pendingRequests[leftHeight]
if ok {
heightRequests.Requests = append(heightRequests.Requests, request)
} else {
heightRequests = types.Requests{
Requests: []types.Request{request},
}
}
pendingRequests[leftHeight] = heightRequests
return false
})
return &types.GenesisState{PendingRandomRequests: pendingRequests}
}
|
package utils
import (
"fmt"
"time"
)
/**
timeid
*/
type U struct {
prefix string
c chan int
d chan struct{}
}
func NewU(t int64, n int) *U {
u := &U{
prefix: time.Unix(t, 0).Format("060102150405"),
c: make(chan int, n),
d: make(chan struct{}),
}
u.start()
return u
}
func (u *U) start() {
go func() {
i := 0
for {
select {
case u.c <- i:
i++
case <-u.d:
return
}
}
}()
}
func (u *U) stop() {
u.d <- struct{}{}
close(u.c)
}
func (u *U) Next() string {
return u.prefix + fmt.Sprintf("%d", <-u.c)
}
type TimeID struct {
o *U
c *U
n *U
l int
}
func NewTimeID(l int) *TimeID {
return &TimeID{l: l}
}
func (u *TimeID) Start() error {
go func() {
t := time.NewTicker(time.Second)
u.n = NewU(time.Now().Unix(), u.l)
for {
u.o = u.c
u.c = u.n
u.n = NewU(time.Now().Unix()+1, u.l)
if u.o != nil {
u.o.stop()
}
<-t.C
}
}()
return nil
}
func (u *TimeID) Next() string {
return u.c.Next()
}
var _systemID *TimeID
func init() {
fmt.Println("---------------")
_systemID = NewTimeID(10)
_systemID.Start()
}
func RandomTimeString() string {
return _systemID.Next()
}
|
package dbBeans
import (
"strings"
"github.com/kinwyb/go/db"
)
//CREATE TABLE `bank` (
// `id` int(10) unsigned NOT NULL AUTO_INCREMENT COMMENT '银行ID',
// `bank_name` varchar(255) NOT NULL COMMENT '银行名称',
// `bank_type` tinyint(3) unsigned NOT NULL DEFAULT '1' COMMENT '银行类型',
// `bank_account` varchar(255) NOT NULL DEFAULT '' COMMENT '银行帐号',
// `bank_people` varchar(255) NOT NULL COMMENT '联系人',
// `bank_phone` varchar(20) NOT NULL DEFAULT '' COMMENT '联系电话',
// `bank_money` decimal(20,3) NOT NULL DEFAULT '0.000' COMMENT '初期余额',
// `bank_money_usa` decimal(20,3) NOT NULL DEFAULT '0.000',
// PRIMARY KEY (`id`),
// KEY `type` (`bank_type`)
//) ENGINE=InnoDB AUTO_INCREMENT=29 DEFAULT CHARSET=utf8 COMMENT='银行数据'
const TableBank = "bank"
const BankColumns = "`id`,`bank_name`,`bank_type`,`bank_account`,`bank_people`,`bank_phone`,`bank_money`,`bank_money_usa`"
type BankDB struct {
Id int64 `description:"银行ID" db:"id" primary:"true"`
BankName string `description:"银行名称" db:"bank_name"`
BankType int `description:"银行类型" db:"bank_type"`
BankAccount string `description:"银行帐号" db:"bank_account"`
BankPeople string `description:"联系人" db:"bank_people"`
BankPhone string `description:"联系电话" db:"bank_phone"`
BankMoney float64 `description:"初期余额" db:"bank_money"`
BankMoneyUsa float64 `description:"" db:"bank_money_usa"`
}
type Bank struct {
Id int64 `description:"银行ID" db:"id" primary:"true"`
BankName string `description:"银行名称" db:"bank_name"`
BankType int `description:"银行类型" db:"bank_type"`
BankAccount string `description:"银行帐号" db:"bank_account"`
BankPeople string `description:"联系人" db:"bank_people"`
BankPhone string `description:"联系电话" db:"bank_phone"`
BankMoney float64 `description:"初期余额" db:"bank_money"`
BankMoneyUsa float64 `description:"" db:"bank_money_usa"`
}
//Bank数据转换
func (b *Bank) SetMapValue(m map[string]interface{}) {
b.Id = db.Int64Default(m["id"])
b.BankName = db.StringDefault(m["bank_name"])
b.BankType = db.IntDefault(m["bank_type"])
b.BankAccount = db.StringDefault(m["bank_account"])
b.BankPeople = db.StringDefault(m["bank_people"])
b.BankPhone = db.StringDefault(m["bank_phone"])
b.BankMoney = db.Float64Default(m["bank_money"])
b.BankMoneyUsa = db.Float64Default(m["bank_money_usa"])
}
//Bank单个查询
func BankGetOne(where string, q db.Query, args ...interface{}) *Bank {
sqlBuilder := strings.Builder{}
sqlBuilder.WriteString("SELECT ")
sqlBuilder.WriteString(BankColumns)
sqlBuilder.WriteString(" FROM ")
sqlBuilder.WriteString(q.Table(TableBank))
if where != "" {
sqlBuilder.WriteString(" WHERE ")
sqlBuilder.WriteString(where)
}
m := q.QueryRow(sqlBuilder.String(), args...).GetMap()
if m == nil {
return nil
}
ret := &Bank{}
ret.SetMapValue(m)
return ret
}
//Bank列表查询
func BankGetList(where string, q db.Query, args ...interface{}) []*Bank {
sqlBuilder := strings.Builder{}
sqlBuilder.WriteString("SELECT ")
sqlBuilder.WriteString(BankColumns)
sqlBuilder.WriteString(" FROM ")
sqlBuilder.WriteString(q.Table(TableBank))
if where != "" {
sqlBuilder.WriteString(" WHERE ")
sqlBuilder.WriteString(where)
}
var ret []*Bank
q.QueryRows(sqlBuilder.String(), args...).ForEach(func(m map[string]interface{}) bool {
r := &Bank{}
r.SetMapValue(m)
ret = append(ret, r)
return true
})
return ret
}
//Bank列表查询
func BankGetPageList(where string, q db.Query, pg *db.PageObj, args ...interface{}) []*Bank {
sqlBuilder := strings.Builder{}
sqlBuilder.WriteString("SELECT ")
sqlBuilder.WriteString(BankColumns)
sqlBuilder.WriteString(" FROM ")
sqlBuilder.WriteString(q.Table(TableBank))
if where != "" {
sqlBuilder.WriteString(" WHERE ")
sqlBuilder.WriteString(where)
}
var ret []*Bank
q.QueryWithPage(sqlBuilder.String(), pg, args...).ForEach(func(m map[string]interface{}) bool {
r := &Bank{}
r.SetMapValue(m)
ret = append(ret, r)
return true
})
return ret
}
|
package user
//UserInformation :
type UserInformation struct {
ID string `json:"id"`
Name string `json:"name"`
Email string `json:"email"`
Phone string `json:"phone"`
ProfileImage string `json:"profileImage"`
Password string `json:"password"`
}
//LoginDetails :
type LoginDetails struct {
Email string `json:"email"`
Password string `json:"password"`
}
|
package ecs
import "sort"
// State is an Entity-Component-System.
// It is simply a slice of systems.
type State []System
// AddSystem adds a system to the State
func (s *State) AddSystem(system System) {
*s = append(*s, system)
sort.Sort(s)
}
// Update calls Update in all systems.
// u can be ignored, or used for run-levels, a time-delta, or anything.
// it is passed to all systems
func (s State) Update(u interface{}) {
for i := range s {
s[i].Update(u)
}
}
// Add adds an entity to all systems.
// If an error occurs, it removes the entity from already-added systems,
// and returns the error. If the Entity'd ID is 0, it will be generated.
func (s State) Add(e Entity) error {
for i := range s {
err := s[i].Add(e)
if err != nil {
for i--; i >= 0; i-- {
s[i].Remove(e)
}
return err
}
}
return nil
}
// Remove removes an entity from all systems
func (s State) Remove(e Entity) {
for i := range s {
s[i].Remove(e)
}
}
// These functions implement sort.Interface
func (s State) Len() int { return len(s) }
func (s State) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s State) Less(i, j int) bool {
var ip, jp int
if p, ok := s[i].(Prioritizer); ok {
ip = p.GetPriority()
}
if p, ok := s[j].(Prioritizer); ok {
jp = p.GetPriority()
}
return ip < jp
}
|
package core
import (
"context"
"fmt"
"sort"
"strconv"
"github.com/borchero/switchboard/api/v1alpha1"
"github.com/borchero/switchboard/backends"
"github.com/borchero/switchboard/core/utils"
"go.borchero.com/typewriter"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type zoneRecordReconciler struct {
*Reconciler
log typewriter.Logger
}
// RegisterZoneRecordReconciler adds a new reconciliation loop to watch for changes of
// DNSZoneRecord.
func RegisterZoneRecordReconciler(
base *Reconciler, manager ctrl.Manager, log typewriter.Logger,
) error {
reconciler := &zoneRecordReconciler{base, log.With("zonerecords")}
return ctrl.
NewControllerManagedBy(manager).
For(&v1alpha1.DNSZoneRecord{}).
Owns(&v1alpha1.DNSResource{}).
Complete(reconciler)
}
func (r *zoneRecordReconciler) Reconcile(request ctrl.Request) (ctrl.Result, error) {
var record v1alpha1.DNSZoneRecord
return r.doReconcile(
request, &record, r.log,
func(log typewriter.Logger) error { return r.update(&record, log) },
noDelete, noEmptyDelete,
)
}
func (r *zoneRecordReconciler) update(
record *v1alpha1.DNSZoneRecord, logger typewriter.Logger,
) error {
ctx := context.Background()
logger.Info("updating")
// 1) Ensure DNS resource update
// 1.1) Get zone (needed for the domain)
backend, ok := r.cache.Get(record.Spec.ZoneName)
if !ok {
return fmt.Errorf("Backend zone '%s' not found in cache", record.Spec.ZoneName)
}
// 1.2) Get expected IP of DNS resources
ip, err := r.getIP(record.Spec.IPSource)
if err != nil {
return fmt.Errorf("Failed getting IP: %s", err)
}
// 1.4) Get expected status, i.e. list all expected DNS resources
expectedResources, err := r.listExpectedResources(*record, backend, ip)
if err != nil {
return err
}
// 1.5) Get current status, i.e. list existing DNS resources
ownedResources, err := r.listOwnedResources(*record)
if err != nil {
return err
}
// 1.6) Compute diff
insertset, deletionset := utils.ResourceDiff(expectedResources, ownedResources)
// 1.7) Create non-existing resources
if len(insertset) > 0 {
logger.
WithV(typewriter.KV("count", strconv.Itoa(len(insertset)))).
Info("Creating resources")
}
for _, insert := range insertset {
if err := r.client.Create(ctx, &insert); err != nil {
return fmt.Errorf("Failed creating DNS resource: %s", err)
}
}
// 1.8) Delete falsely existing resources
if len(deletionset) > 0 {
logger.
WithV(typewriter.KV("count", strconv.Itoa(len(insertset)))).
Info("Deleting resources")
}
for _, delete := range deletionset {
if err := r.client.Delete(ctx, &delete); err != nil {
return fmt.Errorf("Failed deleting DNS resource: %s", err)
}
}
// 2) Finally, everything is up-to-date. Note that we do not need any finalizers as all this
// record does is creating other records - which will be deleted by Kubernetes' garbage
// collection.
return nil
}
func (r *zoneRecordReconciler) getIP(source v1alpha1.IPSource) (string, error) {
switch {
case source.Static != nil:
return source.Static.IP, nil
case source.Service != nil:
ip, err := r.getServiceIP(*source.Service)
if err != nil {
return "", fmt.Errorf("Failed getting service IP: %s", err)
}
return ip, nil
case source.Node != nil:
ip, err := r.getNodeIP(*source.Node)
if err != nil {
return "", fmt.Errorf("Failed getting node IP: %s", err)
}
return ip, nil
default:
return "", fmt.Errorf("IP source not existing")
}
}
func (r *zoneRecordReconciler) getServiceIP(svc v1alpha1.ServiceIPSource) (string, error) {
ctx := context.Background()
// 1) Get service
var service v1.Service
if err := r.client.Get(ctx, svc.NamespacedName(), &service); err != nil {
return "", fmt.Errorf("Failed getting service: %s", err)
}
// 2) Extract IP
switch svc.Type {
case v1alpha1.ServiceIPTypeCluster:
if service.Spec.ClusterIP == "" {
return "", fmt.Errorf("Cluster IP not available")
}
return service.Spec.ClusterIP, nil
case v1alpha1.ServiceIPTypeExternal:
ingresses := service.Status.LoadBalancer.Ingress
if len(ingresses) == 0 {
return "", fmt.Errorf("Load balancer not available")
}
return ingresses[0].IP, nil
default:
return "", fmt.Errorf("Unknown service type '%s'", svc.Type)
}
}
func (r *zoneRecordReconciler) getNodeIP(source v1alpha1.NodeIPSource) (string, error) {
ctx := context.Background()
// 1) Get all nodes matching the selectors
selector := labels.NewSelector()
if source.LabelSelectors != nil {
for k, v := range source.LabelSelectors {
requirement, err := labels.NewRequirement(k, selection.Equals, []string{v})
if err != nil {
return "", fmt.Errorf("Failed building label selector: %s", err)
}
selector.Add(*requirement)
}
}
var nodes v1.NodeList
if err := r.client.List(ctx, &nodes); err != nil {
return "", fmt.Errorf("Failed listing nodes: %s", err)
}
// 2) Sort node IPs and choose the smallest one
if len(nodes.Items) == 0 {
return "", fmt.Errorf("Unable to find any node with specified labels")
}
ips := make([]string, 0)
for _, item := range nodes.Items {
for _, address := range item.Status.Addresses {
if (source.Type == v1alpha1.NodeIPTypeExternal && address.Type == v1.NodeExternalIP) ||
(source.Type == v1alpha1.NodeIPTypeInternal && address.Type == v1.NodeInternalIP) {
ips = append(ips, address.Address)
break
}
}
}
if len(ips) == 0 {
return "", fmt.Errorf("Unable to find any matching node specifying %s", source.Type)
}
sort.Strings(ips)
return ips[0], nil
}
func (r *zoneRecordReconciler) listExpectedResources(
record v1alpha1.DNSZoneRecord, zone backends.DNSZone, ip string,
) ([]v1alpha1.DNSResource, error) {
expectedResources := make([]v1alpha1.DNSResource, 0)
// 1) A records
for _, host := range record.Spec.Hosts {
resource, err := r.resource(record, zone, v1alpha1.DNSTypeA, host, ip)
if err != nil {
return nil, fmt.Errorf("Failed listing expected hosts: %s", err)
}
expectedResources = append(expectedResources, resource)
}
// 2) CNAME records
primaryDomain := expectedResources[0].Spec.Domain
for _, cname := range record.Spec.Cnames {
resource, err := r.resource(record, zone, v1alpha1.DNSTypeCname, cname, primaryDomain)
if err != nil {
return nil, fmt.Errorf("Failed listing expected hosts: %s", err)
}
expectedResources = append(expectedResources, resource)
}
return expectedResources, nil
}
func (r *zoneRecordReconciler) listOwnedResources(
record v1alpha1.DNSZoneRecord,
) ([]v1alpha1.DNSResource, error) {
ctx := context.Background()
var existingResources v1alpha1.DNSResourceList
option := &client.ListOptions{
Namespace: record.Namespace,
FieldSelector: fields.OneTermEqualSelector(indexFieldOwner, record.Name),
}
if err := r.client.List(ctx, &existingResources, option); err != nil {
return nil, fmt.Errorf("Failed listing owned resources: %s", err)
}
return existingResources.Items, nil
}
func (r *zoneRecordReconciler) resource(
record v1alpha1.DNSZoneRecord, zone backends.DNSZone,
kind v1alpha1.DNSType, host string, data string,
) (v1alpha1.DNSResource, error) {
// 1) Get key values
host = utils.FullHost(host, zone)
// 2) Generate resource
resource := v1alpha1.DNSResource{
ObjectMeta: metav1.ObjectMeta{
GenerateName: fmt.Sprintf("%s-", record.Name),
Namespace: record.Namespace,
},
Spec: v1alpha1.DNSResourceSpec{
ZoneName: record.Spec.ZoneName,
Domain: host,
Type: kind,
Data: data,
TTL: record.Spec.TTL,
},
}
// 3) Set owner
if err := ctrl.SetControllerReference(&record, &resource, r.scheme); err != nil {
return v1alpha1.DNSResource{}, fmt.Errorf("Failed setting owner: %s", err)
}
return resource, nil
}
|
package main
import (
"context"
"errors"
"fmt"
"log"
"os"
"github.com/jmoiron/sqlx"
"github.com/urfave/cli"
"github.com/xo/dburl"
"github.com/akito0107/xmigrate"
"github.com/akito0107/xmigrate/cmd"
"github.com/akito0107/xmigrate/toposort"
)
func main() {
app := cli.NewApp()
app.Name = "pgmigrate"
app.Usage = "postgres db migration utility"
app.UsageText = "pgmigrate [db url] [OPTIONS]"
app.Flags = []cli.Flag{
cli.StringFlag{Name: "schemapath, f", Value: "schema.sql", Usage: "schema sql path"},
cli.BoolFlag{Name: "apply, a", Usage: "apply migration"},
}
app.Action = func(c *cli.Context) error {
dbsrc := c.Args().Get(0)
if dbsrc == "" {
return errors.New("db url is required")
}
u, err := dburl.Parse(dbsrc)
if err != nil {
return err
}
return syncAction(c, u)
}
if err := app.Run(os.Args); err != nil {
log.Fatal(err)
}
}
func syncAction(c *cli.Context, u *dburl.URL) error {
ctx := context.Background()
schemapath := c.String("schemapath")
diffs, _, err := cmd.GetDiff(ctx, schemapath, u)
if err != nil {
return err
}
apply := c.Bool("apply")
if !apply {
fmt.Println("dry-run mode (with --apply flag will be exec below queries)")
}
var db *sqlx.DB
if apply {
d, err := sqlx.Open(u.Driver, u.DSN)
if err != nil {
return err
}
db = d
defer db.Close()
}
graph := xmigrate.CalcGraph(diffs)
resolved, err := toposort.ResolveGraph(graph)
if err != nil {
return err
}
for _, n := range resolved.Nodes {
d := n.(*xmigrate.DiffNode)
sql := d.Diff.Spec.ToSQLString()
fmt.Printf("applying: %s\n", sql)
if apply {
if _, err := db.Exec(sql); err != nil {
return err
}
}
}
return nil
}
|
package main
import (
"fmt"
"github.com/bijaythapaa/MakaluGo/lcr-game-packaged/lcr"
)
func main() {
fmt.Println("Welcome to LCR dice game :D")
g := lcr.NewGame()
fmt.Println("Please enter how many players will play the game?")
fmt.Println("Note: enter number more than 2.")
// need players count and will take it as input
var playersCount int
for {
fmt.Scanln(&playersCount)
if playersCount < 3 {
fmt.Println("Note: enter number more than 2.")
continue
}
break
}
for i := 0; i < playersCount; i++ {
playerName := fmt.Sprintf("P%v", i)
p := g.Join(playerName)
fmt.Println(fmt.Sprintf("player: %v joined.", p.Name()))
}
for {
turn := g.Turn()
// check if player have tokens
// if not skip him
if turn.Tokens() == 0 {
fmt.Println(fmt.Sprintf("\nplayer %v, you have 0 tokens.", turn.Name()))
continue
}
// if yes ask him to hit enter to roll dice
fmt.Println(fmt.Sprintf("\nplayer %v, you have %v tokens. hit any key to roll dices", turn.Name(), turn.Tokens()))
var playerInput string
fmt.Scanln(&playerInput)
if playerInput == "Exit" {
fmt.Println("You killed the game :(")
return
}
// roll dice & apply chenges
dicesResult := turn.RollDice()
fmt.Println("You got:", dicesResult)
for _, p := range g.Players() {
fmt.Println(fmt.Sprintf("player %v, have %v tokens", p.Name(), p.Tokens()))
}
// check if any one won the game
// exit
winner := g.Finished()
if winner != nil {
fmt.Println("\nWinner: ", winner.Name())
return
}
}
}
|
package main
import "encoding/json"
// 连接器对象初始化
var h = hub{
connections: make(map[*connection]bool), // connections 注册了连接器
broadcast: make(chan []byte), // 从连接器发送的信息
register: make(chan *connection), // 从连接器注册请求
unregister: make(chan *connection), // 销毁请
}
// 处理ws 的逻辑实现
func (h *hub) run() {
// 监听数据管道,在后端不断处理管道数据
for {
// 根据不同的数据管道处理不同逻辑
select {
case c := <-h.register:
// 标识注册了
h.connections[c] = true
c.data.Ip = c.ws.RemoteAddr().String()
c.data.Type = "handshake"
c.data.UserList = userList
dataB, _ := json.Marshal(c.data)
c.send <- dataB
case c := <-h.unregister:
// 判断map 里灿在处理的数据
if _, ok := h.connections[c]; ok {
delete(h.connections, c)
close(c.send)
}
case data := <-h.broadcast:
// 处理数据流转,主句同步到所有用户
// c 是具体的每一个连接
for c := range h.connections {
select {
case c.send <- data:
default:
// 防止死循环
delete(h.connections, c)
close(c.send)
}
}
}
}
}
|
/*
* Copyright 2018, CS Systemes d'Information, http://www.c-s.fr
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package metadata
import (
"bytes"
"fmt"
"strings"
log "github.com/sirupsen/logrus"
"github.com/CS-SI/SafeScale/providers"
"github.com/CS-SI/SafeScale/providers/api"
"github.com/CS-SI/SafeScale/providers/objectstorage"
)
var bucketName string
// InitializeBucket creates the Object Storage Bucket that will store the metadata
func InitializeBucket(location objectstorage.Location) error {
_, err := location.CreateBucket(bucketName)
if err != nil {
return fmt.Errorf("failed to create Object Storage Bucket '%s': %s", bucketName, err.Error())
}
return nil
}
//Folder describes a metadata folder
type Folder struct {
//path contains the base path where to read/write record in Object Storage
path string
service *providers.Service
crypt bool
cryptKey []byte
}
// FolderDecoderCallback is the prototype of the function that will decode data read from Metadata
type FolderDecoderCallback func([]byte) error
// NewFolder creates a new Metadata Folder object, ready to help access the metadata inside it
func NewFolder(svc *providers.Service, path string) *Folder {
if svc == nil {
panic("svc is nil!")
}
cfg, err := svc.GetCfgOpts()
if err != nil {
panic(fmt.Sprintf("config options are not available! %s", err.Error()))
}
cryptKey, crypt := cfg.Get("MetadataKey")
f := &Folder{
path: strings.Trim(path, "/"),
service: svc,
// bucketName: name.(string),
crypt: crypt,
}
if crypt {
f.cryptKey = []byte(cryptKey.(string))
}
return f
}
// GetService returns the service used by the folder
func (f *Folder) GetService() *providers.Service {
return f.service
}
// GetClient returns the api.ClientAPI used by the folder
func (f *Folder) GetClient() api.ClientAPI {
return f.service.ClientAPI
}
// GetBucket returns the bucket used by the folder to store Object Storage
func (f *Folder) GetBucket() objectstorage.Bucket {
return f.service.MetadataBucket
}
// GetPath returns the base path of the folder
func (f *Folder) GetPath() string {
return f.path
}
// absolutePath returns the fullpath to reach the 'path'+'name' starting from the folder path
func (f *Folder) absolutePath(path ...string) string {
for len(path) > 0 && (path[0] == "" || path[0] == ".") {
path = path[1:]
}
var relativePath string
for _, item := range path {
if item != "" {
relativePath += "/" + item
}
}
return strings.Join([]string{f.path, strings.Trim(relativePath, "/")}, "/")
}
// Search tells if the object named 'name' is inside the ObjectStorage folder
func (f *Folder) Search(path string, name string) (bool, error) {
absPath := strings.Trim(f.absolutePath(path), "/")
list, err := f.service.MetadataBucket.List(absPath, objectstorage.NoPrefix)
if err != nil {
return false, err
}
if absPath != "" {
absPath += "/"
}
found := false
for _, item := range list {
if item == absPath+name {
found = true
break
}
}
return found, nil
}
// Delete removes metadata passed as parameter
func (f *Folder) Delete(path string, name string) error {
err := f.service.MetadataBucket.DeleteObject(f.absolutePath(path, name))
if err != nil {
return fmt.Errorf("failed to remove metadata in Object Storage: %s", err.Error())
}
return nil
}
// Read loads the content of the object stored in metadata bucket
// returns false, nil if the object is not found
// returns false, err if an error occured
// returns true, nil if the object has been found
// The callback function has to know how to decode it and where to store the result
func (f *Folder) Read(path string, name string, callback FolderDecoderCallback) (bool, error) {
found, err := f.Search(path, name)
if err != nil {
return false, err
}
if found {
var buffer bytes.Buffer
_, err := f.service.MetadataBucket.ReadObject(f.absolutePath(path, name), &buffer, 0, 0)
if err != nil {
return false, err
}
data := buffer.Bytes()
if f.crypt {
data, err = decrypt(f.cryptKey, data)
if err != nil {
return false, err
}
}
return true, callback(data)
}
return false, nil
}
// Write writes the content in Object Storage
func (f *Folder) Write(path string, name string, content []byte) error {
var (
data []byte
err error
)
if f.crypt {
data, err = encrypt(f.cryptKey, content)
if err != nil {
return err
}
} else {
data = content
}
source := bytes.NewBuffer(data)
_, err = f.service.MetadataBucket.WriteObject(f.absolutePath(path, name), source, int64(source.Len()), nil)
return err
}
// Browse browses the content of a specific path in Metadata and executes 'cb' on each entry
func (f *Folder) Browse(path string, callback FolderDecoderCallback) error {
list, err := f.service.MetadataBucket.List(f.absolutePath(path), objectstorage.NoPrefix)
if err != nil {
log.Errorf("Error browsing metadata: listing objects: %+v", err)
return err
}
for _, i := range list {
var buffer bytes.Buffer
_, err = f.service.MetadataBucket.ReadObject(i, &buffer, 0, 0)
if err != nil {
log.Errorf("Error browsing metadata: reading from buffer: %+v", err)
return err
}
data := buffer.Bytes()
if f.crypt {
data, err = decrypt(f.cryptKey, data)
if err != nil {
return err
}
}
err = callback(data)
if err != nil {
log.Errorf("Error browsing metadata: running callback: %+v", err)
return err
}
}
return nil
}
|
package main
func P1Channel(param int) int {
sum := 0
ch := make(chan int)
go func() {
for i := 1; i < param; i++ {
if i%3 == 0 || i%5 == 0 {
ch <- i
}
}
close(ch)
}()
for s := range ch {
sum += s
}
return sum
}
func P1Normal(param int) int {
sum := 0
for i := 1; i < param; i++ {
if i%3 == 0 || i%5 == 0 {
sum += i
}
}
return sum
}
|
package catm
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00100102 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:catm.001.001.02 Document"`
Message *StatusReportV02 `xml:"StsRpt"`
}
func (d *Document00100102) AddMessage() *StatusReportV02 {
d.Message = new(StatusReportV02)
return d.Message
}
// Informs the master terminal manager (MTM) or the terminal manager (TM) about the status of the acceptor system including the identification of the POI, its components and their installed versions.
type StatusReportV02 struct {
// Set of characteristics related to the transfer of the status report.
Header *iso20022.Header4 `xml:"Hdr"`
// Status of the point of interaction (POI), its components and their installed versions.
StatusReport *iso20022.StatusReport2 `xml:"StsRpt"`
// Trailer of the message containing a MAC or a digital signature.
SecurityTrailer *iso20022.ContentInformationType4 `xml:"SctyTrlr"`
}
func (s *StatusReportV02) AddHeader() *iso20022.Header4 {
s.Header = new(iso20022.Header4)
return s.Header
}
func (s *StatusReportV02) AddStatusReport() *iso20022.StatusReport2 {
s.StatusReport = new(iso20022.StatusReport2)
return s.StatusReport
}
func (s *StatusReportV02) AddSecurityTrailer() *iso20022.ContentInformationType4 {
s.SecurityTrailer = new(iso20022.ContentInformationType4)
return s.SecurityTrailer
}
|
package auth
import "dmicro/gate/micro/plugin"
// Options 就是该插件的参数,目前只有 SkipperFunc 就是处理函数了。
// skipperFunc
type Options struct {
skipperFunc plugin.SkipperFunc
}
type Option func(*Options)
// new opts
func newOptions(opts ...Option) Options {
opt := Options{skipperFunc: plugin.DefaultSkipperFunc}
for _, o := range opts {
o(&opt) // set default parameters for OptionFunc
}
return opt
}
// register skipper func
func WithSkipperFunc(skipperFunc plugin.SkipperFunc) Option {
return func(o *Options) {
o.skipperFunc = skipperFunc
}
}
|
package main
import "os"
const (
exitOK = iota
exitError
)
var (
// Version is semantic version of the tool, set by goreleaser
Version = ""
// Revision is commit hash of the build, set by goreleaser
Revision = ""
)
func main() {
os.Exit(realMain())
}
func realMain() int {
return exitOK
}
|
package api
import (
"bytes"
"encoding/json"
"github.com/go-errors/errors"
"fmt"
"io"
"net/http"
"os"
"tezos-contests.izibi.com/backend/signing"
)
type Server struct {
Base string
ApiKey string
teamKeyPair *signing.KeyPair
client *http.Client
LastError string /* last error */
LastDetails string /* details of last error */
}
type ServerResponse struct {
Result interface{} `json:"result"`
Error string `json:"error"`
Details string `json:"details"`
}
func New (base string, apiKey string, teamKeyPair *signing.KeyPair) (*Server) {
return &Server{
Base: base,
ApiKey: apiKey,
teamKeyPair: teamKeyPair,
client: new(http.Client),
}
}
func (s *Server) Author() string {
return "@" + s.teamKeyPair.Public
}
func (s *Server) GetRequest(path string, result interface{}) (err error) {
var req *http.Request
req, err = http.NewRequest("GET", s.Base + path, nil)
if err != nil { err = errors.Wrap(err, 0); return }
req.Header.Add("X-API-Version", Version)
var resp *http.Response
resp, err = s.client.Do(req)
if err != nil { err = errors.Wrap(err, 0); return }
if resp.StatusCode < 200 || resp.StatusCode >= 299 {
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
err = errors.Errorf("Failed to GET %s: %v\n%s", s.Base + path, err, buf.String())
return
}
sr := ServerResponse{result, "", ""}
err = json.NewDecoder(resp.Body).Decode(&sr)
if sr.Error != "" {
s.LastError = sr.Error
s.LastDetails = sr.Details
return errors.New("API error")
}
return
}
func (s *Server) postRequest(path string, body io.Reader, result interface{}) (err error) {
var resp *http.Response
resp, err = http.Post(s.Base + path,
"application/json; charset=utf-8", body)
if err != nil { return }
if resp.StatusCode < 200 || resp.StatusCode >= 299 {
buf := new(bytes.Buffer)
buf.ReadFrom(resp.Body)
fmt.Fprintln(os.Stderr, buf.String()) // XXX output to stderr
err = errors.New(resp.Status)
return
}
if resp.StatusCode == 200 {
err = json.NewDecoder(resp.Body).Decode(&result)
}
return
}
func (s *Server) PlainRequest(path string, msg interface{}, result interface{}) error {
b := new(bytes.Buffer)
err := json.NewEncoder(b).Encode(msg)
if err != nil { return err }
return s.postRequest(path, b, result)
}
func (s *Server) SignedRequest(path string, msg interface{}, result interface{}) error {
s.LastError = ""
s.LastDetails = ""
if s.teamKeyPair == nil {
return errors.Errorf("team keypair is missing")
}
b := new(bytes.Buffer)
err := json.NewEncoder(b).Encode(msg)
if err != nil { return errors.Errorf("malformed message in request: %s", err) }
bs, err := signing.Sign(s.teamKeyPair.Private, s.ApiKey, b.Bytes())
if err != nil { return errors.Errorf("failed to sign message: %s", err) }
resp := ServerResponse{result, "", ""}
err = s.postRequest(path, bytes.NewReader(bs), &resp)
if err != nil { return errors.Errorf("failed to contact API: %s", err) }
if resp.Error != "" {
s.LastError = resp.Error
s.LastDetails = resp.Details
return errors.New("API error")
}
return nil
}
|
package failure
import (
"fmt"
"net/http"
"github.com/bborbe/server/renderer"
"github.com/bborbe/server/renderer/content"
)
type failureView struct {
renderer renderer.Renderer
}
func NewFailureView(err error) *failureView {
v := new(failureView)
contentRenderer := content.NewContentRenderer()
contentRenderer.SetContentString(fmt.Sprintf("%v", err))
v.renderer = contentRenderer
return v
}
func (v *failureView) Render(responseWriter http.ResponseWriter) error {
responseWriter.Header().Set("Content-Type", "text/plain")
responseWriter.WriteHeader(http.StatusInternalServerError)
return v.renderer.Render(responseWriter)
}
|
package lox
type exprVisitor interface {
visitBinary(eb *ExprBinary) interface{}
visitGrouping(eg *ExprGrouping) interface{}
visitLiteral(el *ExprLiteral) interface{}
visitUnary(eu *ExprUnary) interface{}
}
|
package pie_test
import (
"github.com/elliotchance/pie/v2"
"github.com/stretchr/testify/assert"
"testing"
)
func TestFloat64s(t *testing.T) {
assert.Equal(t, []float64(nil), pie.Float64s([]float64(nil)))
assert.Equal(t,
[]float64{92.384, 823.324, 453},
pie.Float64s([]float64{92.384, 823.324, 453}))
}
|
package main
import (
"bufio"
"encoding/csv"
"flag"
"fmt"
"io"
"log"
"os"
"strconv"
"strings"
)
type selector interface {
choose(cols []string) []string
}
func parseSelector(str string) (selector, bool) {
tokens := strings.Split(str, "-")
if len(tokens) < 1 || len(tokens) > 2 {
return nil, false
}
if len(tokens) == 1 {
col, err := strconv.Atoi(tokens[0])
if err != nil {
return nil, false
}
return singleColumn{column: col - 1}, true
}
if len(tokens) == 2 {
start, err := strconv.Atoi(tokens[0])
if err != nil {
return nil, false
}
if len(tokens[1]) == 0 {
return fromColumn{column: start - 1}, true
}
stop, err := strconv.Atoi(tokens[1])
if err != nil {
return nil, false
}
return columnRange{start: start - 1, stop: stop - 1}, true
}
return nil, false
}
type singleColumn struct {
column int
}
func (c singleColumn) choose(cols []string) []string {
if c.column < len(cols) {
return []string{cols[c.column]}
}
return []string{}
}
type fromColumn struct {
column int
}
func (c fromColumn) choose(cols []string) []string {
if c.column < len(cols) {
return cols[c.column:]
}
return []string{}
}
type columnRange struct {
start, stop int
}
func (c columnRange) choose(cols []string) []string {
start, stop := c.start, c.stop
flip := start > stop
if flip {
stop, start = start, stop
}
if start < 0 {
start = 0
}
if stop >= len(cols) {
stop = len(cols) - 1
}
result := cols[start : stop+1]
if flip {
top := len(result) - 1
for i := 0; i < len(result)/2; i++ {
result[i], result[top-i] = result[top-i], result[i]
}
}
return result
}
type options struct {
selectors []selector
printToc bool
squash bool
tsv bool
raw bool
}
func initOptions() options {
splice := flag.String("c", "", "Comma separated list of columns to include in result")
printToc := flag.Bool("header", false, "Dump the header row w/ index values")
squash := flag.Bool("trim", false, "Trim rows that have no data to output")
tsv := flag.Bool("tsv", false, "Output in tsv format")
raw := flag.Bool("raw", false, "Output raw data")
flag.Parse()
// splice := flag.Arg(0)
opts := options{}
if len(*splice) > 0 {
for _, arg := range strings.Split(*splice, ",") {
if sel, ok := parseSelector(arg); ok {
opts.selectors = append(opts.selectors, sel)
continue
}
log.Fatalf("Bad selector: %s\n", arg)
}
}
opts.printToc = *printToc
opts.squash = *squash
opts.tsv = *tsv
opts.raw = *raw
if len(opts.selectors) == 0 {
opts.selectors = append(opts.selectors, fromColumn{0})
}
return opts
}
func dumpRows(r csv.Reader, write func([]string), selectors []selector, squash bool) {
for {
row, err := r.Read()
if err == io.EOF {
break
} else if err != nil {
log.Fatal(err)
}
outRow := []string{}
for _, sel := range selectors {
outRow = append(outRow, sel.choose(row)...)
}
// When `squash` is true, we want to omit any rows that have no data.
for _, v := range outRow {
if len(v) == 0 && squash {
continue
}
write(outRow)
break
}
}
}
func printToc(r csv.Reader) {
header, err := r.Read()
if err != nil {
log.Fatal(err)
}
for i, h := range header {
fmt.Printf("%4d %s\n", i+1, h)
}
}
func main() {
opts := initOptions()
var outfn func([]string)
if opts.raw {
writer := bufio.NewWriter(os.Stdout)
outfn = func(cols []string) {
_, err := writer.WriteString(fmt.Sprintln(strings.Join(cols, " ")))
if err != nil {
log.Fatal(err)
}
err = writer.Flush()
if err != nil {
log.Fatal(err)
}
}
} else {
writer := csv.NewWriter(os.Stdout)
if opts.tsv {
writer.Comma = '\t'
}
outfn = func(cols []string) {
err := writer.Write(cols)
if err != nil {
log.Fatal(nil)
}
writer.Flush()
}
}
reader := csv.NewReader(os.Stdin)
if opts.printToc {
printToc(*reader)
} else {
dumpRows(*reader, outfn, opts.selectors, opts.squash)
}
}
|
// Copyright 2020 The Operator-SDK Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package definitions
import (
"fmt"
"reflect"
"sort"
"strings"
"github.com/fatih/structtag"
"github.com/markbates/inflect"
"github.com/operator-framework/api/pkg/operators/v1alpha1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/version"
crdmarkers "sigs.k8s.io/controller-tools/pkg/crd/markers"
"sigs.k8s.io/controller-tools/pkg/markers"
"github.com/operator-framework/operator-sdk/internal/util/k8sutil"
)
// MakeFullGroupFromName returns everything but the first element of a CRD name,
// which by definition is <resource>.<full group>.
func MakeFullGroupFromName(name string) string {
return getHalfBySep(name, ".", 1)
}
// MakeGroupFromFullGroup returns the first element of an API group, ex. "foo" of "foo.example.com".
func MakeGroupFromFullGroup(group string) string {
return getHalfBySep(group, ".", 0)
}
// getHalfBySep splits s by the first sep encountered and returns the first
// (half = 0) or second (half = 1) element of the result.
func getHalfBySep(s, sep string, half uint) string {
if split := strings.SplitN(s, sep, 2); len(split) == 2 && half < 2 {
return split[half]
}
return s
}
// buildCRDDescriptionFromType builds a crdDescription for the Go API defined
// by key from markers and type information in g.types.
func (g generator) buildCRDDescriptionFromType(gvk schema.GroupVersionKind, kindType *markers.TypeInfo) (v1alpha1.CRDDescription, error) {
// Initialize the description.
description := v1alpha1.CRDDescription{
Description: kindType.Doc,
DisplayName: k8sutil.GetDisplayName(gvk.Kind),
Version: gvk.Version,
Kind: gvk.Kind,
}
// Parse resources and displayName from the kind type's markers.
for _, markers := range kindType.Markers {
for _, marker := range markers {
switch d := marker.(type) {
case Description:
if d.DisplayName != "" {
description.DisplayName = d.DisplayName
}
if len(d.Resources) != 0 {
refs, err := d.Resources.toResourceReferences()
if err != nil {
return v1alpha1.CRDDescription{}, err
}
description.Resources = append(description.Resources, refs...)
}
case crdmarkers.Resource:
if d.Path != "" {
description.Name = fmt.Sprintf("%s.%s", d.Path, gvk.Group)
}
}
}
}
// The default, if the resource marker's path value is not set, is to use a pluralized form of lowercase kind.
if description.Name == "" {
description.Name = fmt.Sprintf("%s.%s", inflect.Pluralize(strings.ToLower(gvk.Kind)), gvk.Group)
}
sortDescription(description.Resources)
// Find spec and status in the kind type.
spec, err := findChildForDescType(kindType, specDescType)
if err != nil {
return v1alpha1.CRDDescription{}, err
}
status, err := findChildForDescType(kindType, statusDescType)
if err != nil {
return v1alpha1.CRDDescription{}, err
}
// Find annotated fields of spec and parse them into specDescriptors.
markedFields, err := g.getMarkedChildrenOfField(spec)
if err != nil {
return v1alpha1.CRDDescription{}, err
}
specDescriptors := []v1alpha1.SpecDescriptor{}
for _, fields := range markedFields {
for _, field := range fields {
if descriptor, include := field.toSpecDescriptor(); include {
specDescriptors = append(specDescriptors, descriptor)
}
}
}
sortDescriptors(specDescriptors)
description.SpecDescriptors = specDescriptors
// Find annotated fields of status and parse them into statusDescriptors.
markedFields, err = g.getMarkedChildrenOfField(status)
if err != nil {
return v1alpha1.CRDDescription{}, err
}
statusDescriptors := []v1alpha1.StatusDescriptor{}
for _, fields := range markedFields {
for _, field := range fields {
if descriptor, include := field.toStatusDescriptor(); include {
statusDescriptors = append(statusDescriptors, descriptor)
}
}
}
sortDescriptors(statusDescriptors)
description.StatusDescriptors = statusDescriptors
return description, nil
}
// findChildForDescType returns a field with a tag matching string(typ) by searching all top-level fields in info.
// If no field is found, an error is returned.
func findChildForDescType(info *markers.TypeInfo, typ descType) (markers.FieldInfo, error) {
for _, field := range info.Fields {
tags, err := structtag.Parse(string(field.Tag))
if err != nil {
return markers.FieldInfo{}, err
}
jsonTag, err := tags.Get("json")
if err == nil && jsonTag.Name == string(typ) {
return field, nil
}
}
return markers.FieldInfo{}, fmt.Errorf("no %s found for type %s", typ, info.Name)
}
// sortDescriptors sorts a slice of structs with a Path field by comparing Path strings naturally.
func sortDescriptors(v interface{}) {
slice := reflect.ValueOf(v)
values := toValueSlice(slice)
sort.Slice(values, func(i, j int) bool {
return values[i].FieldByName("Path").String() < values[j].FieldByName("Path").String()
})
for i := 0; i < slice.Len(); i++ {
slice.Index(i).Set(values[i])
}
}
// sortDescription sorts a slice of structs with Name, Kind, and Version fields
// by comparing those field's strings in natural order.
func sortDescription(v interface{}) {
slice := reflect.ValueOf(v)
values := toValueSlice(slice)
sort.Slice(values, func(i, j int) bool {
nameI := values[i].FieldByName("Name").String()
nameJ := values[j].FieldByName("Name").String()
if nameI == nameJ {
kindI := values[i].FieldByName("Kind").String()
kindJ := values[j].FieldByName("Kind").String()
if kindI == kindJ {
versionI := values[i].FieldByName("Version").String()
versionJ := values[j].FieldByName("Version").String()
return version.CompareKubeAwareVersionStrings(versionI, versionJ) > 0
}
return kindI < kindJ
}
return nameI < nameJ
})
for i := 0; i < slice.Len(); i++ {
slice.Index(i).Set(values[i])
}
}
// toValueSlice creates a slice of values that can be sorted by arbitrary fields.
func toValueSlice(slice reflect.Value) []reflect.Value {
sliceCopy := reflect.MakeSlice(slice.Type(), slice.Len(), slice.Len())
reflect.Copy(sliceCopy, slice)
values := make([]reflect.Value, sliceCopy.Len())
for i := 0; i < sliceCopy.Len(); i++ {
values[i] = sliceCopy.Index(i)
}
return values
}
|
package architecture
type Func struct {
Name string
Package string
Filename string
ParmTypes []Type
ReturnTypes []Type
}
|
package runner
import (
pb "github.com/tradingAI/proto/gen/go/scheduler"
"github.com/tradingAI/runner/plugins"
)
func creatTestRunner() (r *Runner) {
conf, _ := LoadConf()
r, _ = New(conf)
return
}
func createTestJob() (job *pb.Job) {
job = &pb.Job{
Id: uint64(123456789),
RunnerId: "test_runner_id",
Type: pb.JobType_TRAIN,
Input: plugins.CreateDefaultTbaseTrainJobInput(),
}
return
}
func createTestContainer() (container *Container) {
job := createTestJob()
container = &Container{
Name: "123456789",
ID: "123456789",
ShortID: "123",
Job: job,
Plugin: plugins.New(job),
}
return
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
package types
import (
"bytes"
"encoding/json"
"errors"
"io"
"strconv"
)
// ExtendedMemoryStats type.
//
// https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/nodes/_types/Stats.ts#L261-L264
type ExtendedMemoryStats struct {
AdjustedTotalInBytes *int64 `json:"adjusted_total_in_bytes,omitempty"`
FreeInBytes *int64 `json:"free_in_bytes,omitempty"`
FreePercent *int `json:"free_percent,omitempty"`
Resident *string `json:"resident,omitempty"`
ResidentInBytes *int64 `json:"resident_in_bytes,omitempty"`
Share *string `json:"share,omitempty"`
ShareInBytes *int64 `json:"share_in_bytes,omitempty"`
TotalInBytes *int64 `json:"total_in_bytes,omitempty"`
TotalVirtual *string `json:"total_virtual,omitempty"`
TotalVirtualInBytes *int64 `json:"total_virtual_in_bytes,omitempty"`
UsedInBytes *int64 `json:"used_in_bytes,omitempty"`
UsedPercent *int `json:"used_percent,omitempty"`
}
func (s *ExtendedMemoryStats) UnmarshalJSON(data []byte) error {
dec := json.NewDecoder(bytes.NewReader(data))
for {
t, err := dec.Token()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
switch t {
case "adjusted_total_in_bytes":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return err
}
s.AdjustedTotalInBytes = &value
case float64:
f := int64(v)
s.AdjustedTotalInBytes = &f
}
case "free_in_bytes":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return err
}
s.FreeInBytes = &value
case float64:
f := int64(v)
s.FreeInBytes = &f
}
case "free_percent":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.FreePercent = &value
case float64:
f := int(v)
s.FreePercent = &f
}
case "resident":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.Resident = &o
case "resident_in_bytes":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return err
}
s.ResidentInBytes = &value
case float64:
f := int64(v)
s.ResidentInBytes = &f
}
case "share":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.Share = &o
case "share_in_bytes":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return err
}
s.ShareInBytes = &value
case float64:
f := int64(v)
s.ShareInBytes = &f
}
case "total_in_bytes":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return err
}
s.TotalInBytes = &value
case float64:
f := int64(v)
s.TotalInBytes = &f
}
case "total_virtual":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.TotalVirtual = &o
case "total_virtual_in_bytes":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return err
}
s.TotalVirtualInBytes = &value
case float64:
f := int64(v)
s.TotalVirtualInBytes = &f
}
case "used_in_bytes":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return err
}
s.UsedInBytes = &value
case float64:
f := int64(v)
s.UsedInBytes = &f
}
case "used_percent":
var tmp interface{}
dec.Decode(&tmp)
switch v := tmp.(type) {
case string:
value, err := strconv.Atoi(v)
if err != nil {
return err
}
s.UsedPercent = &value
case float64:
f := int(v)
s.UsedPercent = &f
}
}
}
return nil
}
// NewExtendedMemoryStats returns a ExtendedMemoryStats.
func NewExtendedMemoryStats() *ExtendedMemoryStats {
r := &ExtendedMemoryStats{}
return r
}
|
package db
import (
"fmt"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"github.com/helloferdie/stdgo/libslice"
"github.com/helloferdie/stdgo/logger"
"github.com/go-sql-driver/mysql"
_ "github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
)
// ConnectionString -
func ConnectionString() string {
conn := "default:default@(127.0.0.1:3306)/golang?timeout=10s&charset=utf8mb4&parseTime=true"
dbHost := os.Getenv("db_host")
if dbHost != "" {
host := os.Getenv("db_host")
user := os.Getenv("db_user")
pass := os.Getenv("db_pass")
port := os.Getenv("db_port")
dbName := os.Getenv("db_name")
conn = user + ":" + pass + "@(" + host + ":" + port + ")/" + dbName + "?timeout=10s&charset=utf8mb4&parseTime=true"
}
return conn
}
// CustomConnectionString -
func CustomConnectionString(config map[string]string) string {
host := config["db_host"]
user := config["db_user"]
pass := config["db_pass"]
port := config["db_port"]
dbName := config["db_name"]
conn := user + ":" + pass + "@(" + host + ":" + port + ")/" + dbName + "?timeout=10s&charset=utf8&parseTime=true"
return conn
}
// Open -
func Open(conn string) (*sqlx.DB, error) {
return OpenRetry(conn, 3)
}
// OpenRetry -
func OpenRetry(conn string, maxRetry int) (*sqlx.DB, error) {
if conn == "" {
conn = ConnectionString()
}
if maxRetry <= 0 {
maxRetry = 3
}
for retry := 0; retry < maxRetry; retry++ {
db, err := sqlx.Connect("mysql", conn)
if err != nil {
if retry == 2 {
// Stop retry after 3 times
logger.PrintLogEntry("error", fmt.Sprintf("Error establish database connection after retry 3 times %v", err), true)
return db, err
}
errType := reflect.TypeOf(err).String()
/* if err == mysql.ErrPktSyncMul {
// Sleep and wait for connection availability
time.Sleep(time.Second * 3)
continue
} else {
_, exist := libslice.Contains(errType, []string{"*net.OpError"})
if exist {
fmt.Println("im here")
time.Sleep(time.Second * 3)
continue
}
}*/
logger.PrintLogEntry("error", fmt.Sprintf("Error establish database connection %v type: %s", err, errType), true)
time.Sleep(time.Second * 3)
continue
}
return db, nil
}
return nil, nil
}
// Exec -
func Exec(db *sqlx.DB, query string, values map[string]interface{}) (int64, int64, error) {
result, err := db.NamedExec(query, values)
if err != nil {
logger.MakeLogEntry(nil, true).Errorf("Error execute query %v", err)
return 0, 0, err
}
id, err := result.LastInsertId()
if err != nil {
return 0, 0, err
}
rows, err := result.RowsAffected()
if err != nil {
return 0, 0, err
}
return id, rows, nil
}
// ExecList -
func ExecList(db *sqlx.DB, list []interface{}) error {
tx, err := db.Beginx()
if err != nil {
logger.MakeLogEntry(nil, true).Errorf("Error begin query transactions %v", err)
return err
}
for k, data := range list {
d, ok := data.(map[string]interface{})
if !ok {
logger.MakeLogEntry(nil, true).Errorf("Failed to convert interface{} for statement %v", strconv.Itoa(k))
return fmt.Errorf("%s", "general.error_query_transaction")
}
q, qExist := d["query"].(string)
if !qExist {
logger.MakeLogEntry(nil, true).Errorf("Failed to find query for statement %v", strconv.Itoa(k))
return fmt.Errorf("%s", "general.error_query_transaction")
}
v, vExist := d["values"].(map[string]interface{})
if !vExist {
logger.MakeLogEntry(nil, true).Errorf("Failed to find argument values for statement %v", strconv.Itoa(k))
return fmt.Errorf("%s", "general.error_query_transaction")
}
_, err = tx.NamedExec(q, v)
if err != nil {
logger.MakeLogEntry(nil, true).Errorf("Failed to find execute statement %v", strconv.Itoa(k))
break
}
}
if err == nil {
tx.Commit()
} else {
logger.MakeLogEntry(nil, true).Errorf("Error execute list query transactions, operation has been rollback %v", err)
tx.Rollback()
}
return nil
}
// Get -
func Get(db *sqlx.DB, list interface{}, query string, values map[string]interface{}) (bool, error) {
exist := false
rows, err := db.NamedQuery(query, values)
if err != nil {
logger.MakeLogEntry(nil, true).Errorf("Error get query %v", err)
return exist, err
}
defer rows.Close()
if rows.Next() {
err = rows.StructScan(list)
if err != nil {
logger.MakeLogEntry(nil, true).Errorf("Error scan row %v", err)
return exist, err
}
exist = true
}
rows.Close()
return exist, nil
}
// Select -
func Select(db *sqlx.DB, list interface{}, query string, values map[string]interface{}) error {
nstmt, err := db.PrepareNamed(query)
if err != nil {
logger.MakeLogEntry(nil, true).Errorf("Error select prepare named query %v", err)
return err
}
err = nstmt.Select(list, values)
if err != nil {
logger.MakeLogEntry(nil, true).Errorf("Error select query %v", err)
return err
}
return nil
}
// PrepareInsert -
func PrepareInsert(table string, data interface{}, skip []string) (string, map[string]interface{}) {
rVal := reflect.ValueOf(data)
if rVal.Kind() == reflect.Ptr {
rVal = rVal.Elem()
}
rType := rVal.Type()
col := ""
val := ""
isCustom := true
if len(skip) == 0 {
skip = []string{"id", "created_at", "updated_at", "deleted_at"}
isCustom = false
}
v := map[string]interface{}{}
for i := 0; i < rVal.NumField(); i++ {
tag := rType.Field(i).Tag.Get("db")
if tag == "" {
continue
}
_, exist := libslice.Contains(tag, skip)
if exist {
continue
}
if col != "" {
col += ", "
val += ", "
}
col += "`" + tag + "`"
val += ":" + tag
v[tag] = rVal.Field(i).Interface()
}
if !isCustom {
col += ", `created_at`, `updated_at`"
val += ", :created_at, :updated_at"
v["created_at"] = time.Now().UTC()
v["updated_at"] = time.Now().UTC()
}
query := "INSERT INTO " + table + " (" + col + ") VALUES (" + val + ")"
return query, v
}
// PrepareInsertOnly -
func PrepareInsertOnly(table string, data interface{}, only []string) (string, map[string]interface{}) {
rVal := reflect.ValueOf(data)
if rVal.Kind() == reflect.Ptr {
rVal = rVal.Elem()
}
rType := rVal.Type()
col := ""
val := ""
v := map[string]interface{}{}
for i := 0; i < rVal.NumField(); i++ {
tag := rType.Field(i).Tag.Get("db")
if tag == "" {
continue
}
_, exist := libslice.Contains(tag, only)
if !exist {
continue
}
if col != "" {
col += ", "
val += ", "
}
col += "`" + tag + "`"
val += ":" + tag
v[tag] = rVal.Field(i).Interface()
}
query := "INSERT INTO " + table + " (" + col + ") VALUES (" + val + ")"
return query, v
}
// PrepareUpdate -
func PrepareUpdate(table string, old interface{}, new interface{}, skip []string, condition string, conditionVal map[string]interface{}) (string, map[string]interface{}, map[string]interface{}) {
rVal := reflect.ValueOf(old)
if rVal.Kind() == reflect.Ptr {
rVal = rVal.Elem()
}
rType := rVal.Type()
nVal := reflect.ValueOf(new)
if nVal.Kind() == reflect.Ptr {
nVal = nVal.Elem()
}
col := ""
isCustom := true
if len(skip) == 0 {
skip = []string{"id", "created_at", "updated_at"}
isCustom = false
}
v := map[string]interface{}{}
diff := map[string]interface{}{}
for i := 0; i < rVal.NumField(); i++ {
tag := rType.Field(i).Tag.Get("db")
if tag == "" {
continue
}
_, exist := libslice.Contains(tag, skip)
if exist {
continue
}
if rVal.Field(i).Interface() == nVal.Field(i).Interface() {
continue
}
if col != "" {
col += ", "
}
col += "`" + tag + "` = :" + tag
v[tag] = nVal.Field(i).Interface()
diff[tag] = map[string]interface{}{
"o": rVal.Field(i).Interface(),
"n": nVal.Field(i).Interface(),
}
}
if !isCustom {
col += ", updated_at = :updated_at, deleted_at = NULL"
v["updated_at"] = time.Now().UTC()
}
if condition == "" {
condition += "AND id = :id "
}
for k, c := range conditionVal {
v[k] = c
}
query := "UPDATE " + table + " SET " + col + " WHERE 1=1 " + condition
return query, v, diff
}
// PrepareDelete -
func PrepareDelete(table string, pk interface{}, softDelete bool) (string, map[string]interface{}) {
v := map[string]interface{}{
"id": pk,
}
query := "DELETE FROM " + table + " WHERE id = :id"
if softDelete {
query = "UPDATE " + table + " SET updated_at = :updated_at, deleted_at = :deleted_at WHERE id = :id"
v["updated_at"] = time.Now().UTC()
v["deleted_at"] = time.Now().UTC()
}
return query, v
}
// PrepareOrder -
func PrepareOrder(params map[string]interface{}, def map[string]interface{}) string {
query := ""
orderVal, orderExist := params["field"].(string)
defVal, _ := def["field"].(string)
customOrder := false
if orderExist && orderVal != "" {
query += "ORDER BY `" + orderVal + "` "
} else {
defCustomVal, _ := def["custom"].(string)
if defCustomVal == "" {
query += "ORDER BY `" + defVal + "` "
} else {
query += defCustomVal + " "
customOrder = true
}
}
if !customOrder {
orderVal, orderExist = params["direction"].(string)
defVal, _ = def["direction"].(string)
if orderExist && orderVal != "" {
query += orderVal + " "
} else {
query += defVal + " "
}
}
showVal, showExist := params["show"].(bool)
if !showExist || !showVal {
query += "LIMIT "
startVal, startExist := params["start"].(int64)
defStartVal, _ := def["start"].(int64)
if startExist {
query += strconv.FormatInt(startVal, 10) + ", "
} else {
query += strconv.FormatInt(defStartVal, 10) + ", "
}
limitVal, limitExist := params["limit"].(int64)
defLimitVal, _ := def["limit"].(int64)
if limitExist {
query += strconv.FormatInt(limitVal, 10) + " "
} else {
query += strconv.FormatInt(defLimitVal, 10) + " "
}
}
return query
}
// CheckTableExists -
func CheckTableExists(db *sqlx.DB, dbName string, table string) (bool, error) {
exist := false
if dbName == "" {
dbName = os.Getenv("db_name")
}
values := map[string]interface{}{
"table": table,
"db": dbName,
}
type pagination struct {
TotalItems int64 `db:"total"`
}
p := new(pagination)
query := "SELECT COUNT(*) AS total FROM information_schema.tables WHERE table_schema = :db AND table_name = :table LIMIT 1;"
_, err := Get(db, p, query, values)
if err == nil && p.TotalItems == 1 {
exist = true
}
return exist, err
}
// Query -
func Query(db *sqlx.DB, query string, args []interface{}) (*sqlx.Rows, error) {
rows, err := db.Queryx(query, args...)
if err != nil {
logger.MakeLogEntry(nil, true).Errorf("Error select query %v", err)
return nil, err
}
return rows, nil
}
// InsertMultiple -
func InsertMultiple(db *sqlx.DB, table string, data interface{}, value []interface{}, skip []string) (int64, int64, error) {
rVal := reflect.ValueOf(data)
if rVal.Kind() == reflect.Ptr {
rVal = rVal.Elem()
}
rType := rVal.Type()
col := ""
if len(skip) == 0 {
skip = []string{"id", "created_at", "updated_at", "deleted_at"}
}
for i := 0; i < rVal.NumField(); i++ {
tag := rType.Field(i).Tag.Get("db")
if tag == "" {
continue
}
_, exist := libslice.Contains(tag, skip)
if exist {
continue
}
if col != "" {
col += ", "
}
col += "`" + tag + "`"
}
var vals []interface{}
query := "INSERT INTO " + table + " (" + col + ") VALUES "
cols := strings.Split(col, ",")
for _, row := range value {
query += `(?` + strings.Repeat(",?", len(cols)-1) + `),`
v := row.([]interface{})
for i := 0; i < len(v); i++ {
vals = append(vals, v[i])
}
}
//trim the last ,
query = query[0 : len(query)-1]
//prepare the statement
stmt, err := db.Prepare(query)
if err != nil {
logger.MakeLogEntry(nil, true).Errorf("Error prepare query %v", err)
return 0, 0, err
}
//format all vals at once
res, err := stmt.Exec(vals...)
if err != nil {
logger.MakeLogEntry(nil, true).Errorf("Error exec query %v", err)
return 0, 0, err
}
id, err := res.LastInsertId()
if err != nil {
return 0, 0, err
}
rows, err := res.RowsAffected()
if err != nil {
return 0, 0, err
}
return id, rows, err
}
// Regex for duplicate key
var regexDuplicateKey = regexp.MustCompile(`'([^'']*)'`)
// ParseError -
func ParseError(err error) (int, string, error) {
p, ok := err.(*mysql.MySQLError)
if !ok {
return -1, "", err
}
if p.Number == 1062 {
key := ""
matches := regexDuplicateKey.FindAllStringSubmatch(p.Message, -1)
if len(matches) >= 2 {
key = matches[1][1]
}
return 1062, key, err
}
return int(p.Number), p.Message, err
}
|
package PV
import (
"DataApi.Go/lib/common"
)
type SumPV struct {
Total int
}
type StatPagePV struct {
ID uint `gorm:"primary_key"`
DatetimeIntid int `gorm:"type:int(11);column:datetime_intid;"`
PageId string `gorm:"type:varchar(64);column:page_id;"`
PageTitle string `gorm:"type:varchar(2048);column:page_title;"`
PageAuthor string `gorm:"type:varchar(45);column:page_author;"`
PageUrl string `gorm:"type:varchar(8192);column:page_url;"`
PageHostname string `gorm:"type:varchar(512);column:page_hostname;"`
PvAlgonum string `gorm:"type:varchar(45);column:pv_algonum;"`
Pv int `gorm:"type:int(11);column:pv;"`
PvValid int `gorm:"type:int(11);column:pv_valid;"`
PvInvalid int `gorm:"type:int(11);column:pv_invalid;"`
YpaAge int `gorm:"type:int(11);column:ypa_age;"`
YpaGender int `gorm:"type:int(11);column:ypa_gender;"`
FbId int `gorm:"type:int(11);column:fb_id;"`
LineId int `gorm:"type:int(11);column:line_id;"`
Highlightedtext int `gorm:"type:int(11);column:highlightedtext;"`
Openlink int `gorm:"type:int(11);column:openlink;"`
PvCount int `gorm:"type:int(11);column:pv_count;"`
Stay0Count int `gorm:"type:int(11);column:stay_0_count;"`
Stay1Count int `gorm:"type:int(11);column:stay_1_count;"`
}
// Serialize serializes user data
func (u *StatPagePV) Serialize() common.JSON {
return common.JSON{
"id": u.ID,
"pv": u.Pv,
"page_id": u.PageId,
}
}
func (u *StatPagePV) Read(m common.JSON) {
u.ID = uint(m["id"].(float64))
u.Pv = m["pv"].(int)
u.PageId = m["page_id"].(string)
}
func (u *SumPV) Serialize() common.JSON {
return common.JSON{
"total": u.Total,
}
}
func (u *SumPV) Read(m common.JSON) {
u.Total = m["total"].(int)
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
)
// DB is an interface that interacts with the addressBook database
type DB struct{}
func (d *DB) create(filename string) error {
file, err := os.Create(filename)
defer file.Close()
return err
}
func (d *DB) writeToFile(location string, data string) {
err := ioutil.WriteFile(location, []byte(data), 0777)
if err != nil {
log.Fatal(err)
}
}
func (d *DB) readFromFile(location string) []byte {
byte, err := ioutil.ReadFile(location)
if err != nil {
log.Fatal(err)
}
return byte
}
// ByteToString converts an array of bytes to string
func ByteToString(b *[]byte) string {
result := *b
return string(result[:])
}
// DeleteFile removes file in storage
func (d *DB) DeleteFile(path string) error {
var err = os.Remove(path)
fmt.Println("==> done deleting file -> " + path)
return err
}
|
// Package historicalbeat is a Metricbeat module that contains MetricSets.
package historicalbeat
|
package store
import (
"github.com/johnwyles/vrddt-reboot/pkg/reddit"
"github.com/johnwyles/vrddt-reboot/pkg/vrddt"
)
type Selector map[string]interface{}
// Store is the generic interface for a persistence store
type Store interface {
Cleanup() (err error)
CreateRedditVideo(redditVideo *reddit.Video) (err error)
CreateVrddtVideo(vrddtVideo *vrddt.Video) (err error)
DeleteRedditVideo(selector Selector) (err error)
DeleteRedditVideos(selector Selector) (err error)
DeleteVrddtVideo(selector Selector) (err error)
DeleteVrddtVideos(selector Selector) (err error)
GetRedditVideo(selector Selector) (redditVideo *reddit.Video, err error)
GetRedditVideos(selector Selector) (redditVideo []*reddit.Video, err error)
GetVrddtVideo(selector Selector) (vrddtVideo *vrddt.Video, err error)
GetVrddtVideos(selector Selector) (vrddtVideo []*vrddt.Video, err error)
Init() (err error)
}
|
package keeper
import (
"context"
"errors"
"fmt"
"time"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
"github.com/octalmage/gitgood/x/gitgood/types"
"github.com/tendermint/tendermint/crypto"
)
func (k msgServer) CreateStat(goCtx context.Context, msg *types.MsgCreateStat) (*types.MsgCreateStatResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
goals := k.GetAllGoal(ctx)
var goal types.Goal
var found bool
for _, v := range goals {
if int32(v.Id) == msg.StatType {
goal = v
found = true
}
}
if !found {
return nil, errors.New("bad goal")
}
var comparison bool
if goal.Comparison == 0 {
comparison = msg.Final >= msg.Initial
} else if goal.Comparison == 1 {
comparison = msg.Final > msg.Initial
}
if comparison {
feeCoins, err := sdk.ParseCoinsNormalized(fmt.Sprintf("%dexp", goal.Exp))
if err != nil {
return nil, err
}
// mint fresh exp to the mint module.
if err := k.bankKeeper.MintCoins(ctx, minttypes.ModuleName, feeCoins); err != nil {
return nil, err
}
// TODO: Use team name instead of team ID.
teamAcct := sdk.AccAddress(crypto.AddressHash([]byte(msg.Owner)))
// exp to team
if err := k.bankKeeper.SendCoinsFromModuleToAccount(ctx, minttypes.ModuleName, teamAcct, feeCoins); err != nil {
return nil, err
}
}
var stat = types.Stat{
Creator: msg.Creator,
StatType: msg.StatType,
Initial: msg.Initial,
Final: msg.Final,
Owner: msg.Owner,
CreatedAt: time.Now().Unix(),
}
id := k.AppendStat(
ctx,
stat,
)
return &types.MsgCreateStatResponse{
Id: id,
}, nil
}
func (k msgServer) UpdateStat(goCtx context.Context, msg *types.MsgUpdateStat) (*types.MsgUpdateStatResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
var stat = types.Stat{
Creator: msg.Creator,
Id: msg.Id,
StatType: msg.StatType,
Initial: msg.Initial,
Final: msg.Final,
Owner: msg.Owner,
}
// Checks that the element exists
if !k.HasStat(ctx, msg.Id) {
return nil, sdkerrors.Wrap(sdkerrors.ErrKeyNotFound, fmt.Sprintf("key %d doesn't exist", msg.Id))
}
// Checks if the the msg sender is the same as the current owner
if msg.Creator != k.GetStatOwner(ctx, msg.Id) {
return nil, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "incorrect owner")
}
k.SetStat(ctx, stat)
return &types.MsgUpdateStatResponse{}, nil
}
func (k msgServer) DeleteStat(goCtx context.Context, msg *types.MsgDeleteStat) (*types.MsgDeleteStatResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
if !k.HasStat(ctx, msg.Id) {
return nil, sdkerrors.Wrap(sdkerrors.ErrKeyNotFound, fmt.Sprintf("key %d doesn't exist", msg.Id))
}
if msg.Creator != k.GetStatOwner(ctx, msg.Id) {
return nil, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "incorrect owner")
}
k.RemoveStat(ctx, msg.Id)
return &types.MsgDeleteStatResponse{}, nil
}
|
package alertmanager
import (
"context"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
"github.com/go-openapi/strfmt"
"github.com/prometheus/alertmanager/api/v2/models"
"github.com/prometheus/alertmanager/types"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
)
const (
jsonStatus = `
{
"cluster": {
"name": "01EXA9YHW49D5MR2K45MX69408",
"peers": [
{
"address": "100.64.3.143:9094",
"name": "01EXA9YHW49D5MR2K45MX69408"
}
],
"status": "ready"
},
"config": {
"original": "global"
},
"uptime": "2021-01-30T18:47:40",
"versionInfo": {
"branch": "HEAD",
"buildDate": "20200617-08:54:02",
"buildUser": "root@dee35927357f",
"goVersion": "go1.14.4",
"revision": "4c6c03ebfe21009c546e4d1e9b92c371d67c021d",
"version": "0.21.0"
}
}
`
jsonAlerts = `
[
{
"annotations": {
"message": "This is an alert meant to ensure that the entire alerting pipeline is functional."
},
"endsAt": "2021-02-22T00:52:37.000Z",
"fingerprint": "7a90bbdd1d39f61b",
"receivers": [{"name": "healthcheck"}],
"startsAt": "2021-01-27T16:56:37.000Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2021-02-22T00:48:37.000Z",
"generatorURL": "https://prometheus.io/graph?g0.expr=vector%281%29\u0026g0.tab=1",
"labels": {
"alertname": "Watchdog",
"prometheus": "monitoring/k8s",
"severity": "none"
}
}
]`
jsonSilences = `[
{
"id": "34f5f82b-b66f-456b-aff7-b556a7eafe81",
"status": {"state": "active"},
"updatedAt": "2021-01-11T16:10:11.000Z",
"comment": "foo",
"createdBy": "metalmatze",
"endsAt": "2022-01-11T16:10:02.000Z",
"matchers": [
{
"isRegex": false,
"name": "alertname",
"value": "KubeMemoryOvercommit"
},
{
"isRegex": false,
"name": "prometheus",
"value": "monitoring/metalmatze"
},
{
"isRegex": false,
"name": "severity",
"value": "warning"
}
],
"startsAt": "2021-01-11T16:10:11.000Z"
}
]
`
)
func TestClient(t *testing.T) {
m := http.NewServeMux()
m.HandleFunc("/api/v2/status", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(jsonStatus))
})
m.HandleFunc("/api/v2/alerts", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(jsonAlerts))
})
m.HandleFunc("/api/v2/silences", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(jsonSilences))
})
s := httptest.NewServer(m)
defer s.Close()
u, _ := url.Parse(s.URL)
client, err := NewClient(u)
require.NoError(t, err)
{
cs := "ready"
pa := "100.64.3.143:9094"
pn := "01EXA9YHW49D5MR2K45MX69408"
config := "global"
uptime := strfmt.DateTime(time.Date(2021, 01, 30, 18, 47, 40, 0, time.UTC))
branch := "HEAD"
buildDate := "20200617-08:54:02"
buildUser := "root@dee35927357f"
goVersion := "go1.14.4"
revision := "4c6c03ebfe21009c546e4d1e9b92c371d67c021d"
version := "0.21.0"
expected := &models.AlertmanagerStatus{
Cluster: &models.ClusterStatus{
Name: "01EXA9YHW49D5MR2K45MX69408",
Peers: []*models.PeerStatus{{
Address: &pa,
Name: &pn,
}},
Status: &cs,
},
Config: &models.AlertmanagerConfig{Original: &config},
Uptime: &uptime,
VersionInfo: &models.VersionInfo{
Branch: &branch,
BuildDate: &buildDate,
BuildUser: &buildUser,
GoVersion: &goVersion,
Revision: &revision,
Version: &version,
},
}
status, err := client.Status(context.Background())
require.NoError(t, err)
require.Equal(t, expected, status)
}
{
expected := []*types.Alert{{
Alert: model.Alert{
Labels: model.LabelSet{
model.LabelName("alertname"): model.LabelValue("Watchdog"),
model.LabelName("prometheus"): model.LabelValue("monitoring/k8s"),
model.LabelName("severity"): model.LabelValue("none"),
},
Annotations: model.LabelSet{
model.LabelName("message"): model.LabelValue("This is an alert meant to ensure that the entire alerting pipeline is functional."),
},
StartsAt: time.Date(2021, 01, 27, 16, 56, 37, 0, time.UTC),
EndsAt: time.Date(2021, 02, 22, 0, 52, 37, 0, time.UTC),
GeneratorURL: "https://prometheus.io/graph?g0.expr=vector%281%29&g0.tab=1",
},
UpdatedAt: time.Date(2021, 02, 22, 0, 48, 37, 0, time.UTC),
Timeout: false,
}}
alerts, err := client.ListAlerts(context.Background(), "", false)
require.NoError(t, err)
require.Equal(t, expected, alerts)
}
{
expected := []*types.Silence{{
ID: "34f5f82b-b66f-456b-aff7-b556a7eafe81",
CreatedBy: "metalmatze",
Comment: "foo",
StartsAt: time.Date(2021, 01, 11, 16, 10, 11, 0, time.UTC),
EndsAt: time.Date(2022, 01, 11, 16, 10, 02, 0, time.UTC),
UpdatedAt: time.Date(2021, 01, 11, 16, 10, 11, 0, time.UTC),
Matchers: types.Matchers{},
Status: types.SilenceStatus{
State: types.SilenceStateActive,
},
}}
alerts, err := client.ListSilences(context.Background())
require.NoError(t, err)
require.Equal(t, expected, alerts)
}
}
|
package payments
import (
"encoding/json"
"io/ioutil"
"net/http"
"github.com/gorilla/mux"
"github.com/jinzhu/gorm"
"github.com/loubard/sfapi/models"
"github.com/loubard/sfapi/sql"
)
// Fetch returns a payment resource based on the id
func Fetch(db *gorm.DB) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
p, err := sql.GetByID(db, vars["id"])
if err != nil || vars["id"] == "" {
w.WriteHeader(http.StatusNotFound)
return
}
d := models.FetchResponse{Data: p}
j, err := json.Marshal(d)
if err != nil {
w.WriteHeader(http.StatusBadGateway)
return
}
_, err = w.Write(j)
if err != nil {
w.WriteHeader(http.StatusBadGateway)
}
}
}
// List returns all payment resources
func List(db *gorm.DB) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
p := sql.GetAll(db)
d := models.ListResponse{Data: p}
j, err := json.Marshal(d)
if err != nil {
w.WriteHeader(http.StatusBadGateway)
return
}
_, err = w.Write(j)
if err != nil {
w.WriteHeader(http.StatusBadGateway)
}
}
}
// Delete a payment resource
func Delete(db *gorm.DB) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
err := sql.Delete(db, vars["id"])
if err != nil {
w.WriteHeader(http.StatusBadGateway)
return
}
}
}
// Create a payment resource
func Create(db *gorm.DB) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
requestBody, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
var p models.Payment
err = json.Unmarshal(requestBody, &p)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
err = sql.Create(db, &p)
if err != nil {
w.WriteHeader(http.StatusBadGateway)
return
}
w.WriteHeader(http.StatusCreated)
}
}
// Update a payment resource
func Update(db *gorm.DB) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
requestBody, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
var p models.Payment
err = json.Unmarshal(requestBody, &p)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
vars := mux.Vars(r)
err = sql.Update(db, vars["id"], &p)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
return
}
w.WriteHeader(http.StatusCreated)
}
}
|
package iamiam
const (
// EmailProfile is a profile for returning the email.
EmailProfile string = "email"
// SimpleProfile is a profile for returning email, firstname and lastname.
SimpleProfile string = "simple"
)
// UserInfo contains info for profile creation.
type UserInfo struct {
Email string `json:"email"`
FirstName string `json:"firstName"`
LastName string `json:"lastName"`
}
// CreateEmailProfile creates email user profile.
func (u *UserInfo) CreateEmailProfile() *UserInfo {
return &UserInfo{Email: u.Email}
}
// CreateSimpleProfile creates simple user profile.
func (u *UserInfo) CreateSimpleProfile() *UserInfo {
return &UserInfo{Email: u.Email, FirstName: u.FirstName, LastName: u.LastName}
}
|
package _76_Minimum_Window_Substring
func minWindow(s string, t string) string {
//return minWindowWithSlidingWindow(s, t)
return minWindowWithSlidingWindowFast(s, t)
}
// 提升对比效率的滑动窗口
func minWindowWithSlidingWindowFast(s, t string) string {
if len(s) < len(t) { // bad case
return ""
}
var (
tcMap = make(map[int32]int)
count int // 作为匹配数的计算值,用于确认是否满足匹配,避免对于tcmap的遍历操作。触发条件为 count == len(t)
left, right int
ret string // 返回值
minLen = len(s) + 1 // 用作计算是否为最小长度的变量。不用len(ret)的方式计算,这样不用预置ret的初始长度,避免找不到的情况下特判返回为空
)
// 初始化target字符串组成结构
for _, tt := range t {
tcMap[tt]++
}
// 开始遍历,以右边界为准,在每一个右边界的位置,收敛好当前右界下,所有左界情况
for right = 0; right < len(s); right++ {
cur := int32(s[right])
if _, ok := tcMap[cur]; ok {
tcMap[cur]-- // 此位置尝试-1
if tcMap[cur] >= 0 { // 如果-1有效,说明匹配到了
count++
}
for count == len(t) { // 如果成功匹配
if right-left+1 < minLen { // 如果更小,更新
minLen = right - left + 1
ret = s[left : left+minLen]
}
curLeft := int32(s[left])
if _, ok := tcMap[curLeft]; ok { // 如果是匹配的值,说明会影响
tcMap[curLeft]++
if tcMap[curLeft] > 0 { // 说明不满足条件了,拉齐count值
count--
}
}
left++ // 左边界尝试右移缩小窗口
}
}
}
return ret
}
// 滑动窗口思路没问题,判断子窗口是否满足条件,重复计算比较多,会超时,需要优化
func minWindowWithSlidingWindow(s, t string) string {
if len(s) < len(t) { // bad case
return ""
}
var (
ret string = s + t
p, q int // [)
)
for q < len(s) || p < len(s) {
if q-p < len(t) { // 长度不够,需要+1
//fmt.Printf("not enough len with %s, p=%d, q=%d\n", s[p:q], p, q)
if q == len(s) { // q不能加了,说明没了
break
} else { // 还能加
q += 1
continue
}
}
tmp := s[p:q]
//fmt.Printf("test str=%s, p=%d, q=%d\n", tmp, p, q)
if isValid(tmp, t) { // 如果有,尝试缩短,看看是否有冗余空间
if len(tmp) < len(ret) { // 看长短,看是否更新
ret = tmp
}
p += 1
continue
} else { // 现在不存在,尝试扩展范围,类似长度不够的逻辑
if q == len(s) { // q不能加了,说明没了
break
} else { // 还能加
q += 1
continue
}
}
}
if len(ret) > len(s) {
return ""
}
return ret
}
func isValid(s, t string) bool {
sm := make(map[int32]int)
for _, ss := range s {
n := sm[ss]
n++
sm[ss] = n
}
for _, tt := range t {
n := sm[tt]
n--
if n < 0 {
return false
}
sm[tt] = n
}
return true
}
|
// Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
package wasmlib
const CoreAccounts = ScHname(0x3c4b5e02)
const CoreAccountsFuncDeposit = ScHname(0xbdc9102d)
const CoreAccountsFuncWithdrawToAddress = ScHname(0x26608cb5)
const CoreAccountsFuncWithdrawToChain = ScHname(0x437bc026)
const CoreAccountsViewAccounts = ScHname(0x3c4b5e02)
const CoreAccountsViewBalance = ScHname(0x84168cb4)
const CoreAccountsViewTotalAssets = ScHname(0xfab0f8d2)
const CoreAccountsParamAgentID = Key("a")
const CoreBlob = ScHname(0xfd91bc63)
const CoreBlobFuncStoreBlob = ScHname(0xddd4c281)
const CoreBlobViewGetBlobField = ScHname(0x1f448130)
const CoreBlobViewGetBlobInfo = ScHname(0xfde4ab46)
const CoreBlobViewListBlobs = ScHname(0x62ca7990)
const CoreBlobParamField = Key("field")
const CoreBlobParamHash = Key("hash")
const CoreEventlog = ScHname(0x661aa7d8)
const CoreEventlogViewGetNumRecords = ScHname(0x2f4b4a8c)
const CoreEventlogViewGetRecords = ScHname(0xd01a8085)
const CoreEventlogParamContractHname = Key("contractHname")
const CoreEventlogParamFromTs = Key("fromTs")
const CoreEventlogParamMaxLastRecords = Key("maxLastRecords")
const CoreEventlogParamToTs = Key("toTs")
const CoreRoot = ScHname(0xcebf5908)
const CoreRootFuncClaimChainOwnership = ScHname(0x03ff0fc0)
const CoreRootFuncDelegateChainOwnership = ScHname(0x93ecb6ad)
const CoreRootFuncDeployContract = ScHname(0x28232c27)
const CoreRootFuncGrantDeployPermission = ScHname(0xf440263a)
const CoreRootFuncRevokeDeployPermission = ScHname(0x850744f1)
const CoreRootFuncSetContractFee = ScHname(0x8421a42b)
const CoreRootFuncSetDefaultFee = ScHname(0x3310ecd0)
const CoreRootViewFindContract = ScHname(0xc145ca00)
const CoreRootViewGetChainInfo = ScHname(0x434477e2)
const CoreRootViewGetFeeInfo = ScHname(0x9fe54b48)
const CoreRootParamChainOwner = Key("$$owner$$")
const CoreRootParamDeployer = Key("$$deployer$$")
const CoreRootParamDescription = Key("$$description$$")
const CoreRootParamHname = Key("$$hname$$")
const CoreRootParamName = Key("$$name$$")
const CoreRootParamOwnerFee = Key("$$ownerfee$$")
const CoreRootParamProgramHash = Key("$$proghash$$")
const CoreRootParamValidatorFee = Key("$$validatorfee$$")
|
package main
import (
"fmt"
"log"
"os"
)
// Go标准库
// log, 内置的简单日志库
func main() {
//4. 日志的设置
log.SetPrefix("Test: ") // 设置前缀
log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)
file, err := os.OpenFile("./cli.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0755)
if err != nil {
log.Fatalln(err)
}
//5.设置日志的输出位置
log.SetOutput(file)
//1.
log.Print("Hello")
log.Printf("Hello %s", "world") // 格式化打印
log.Println("hello bob") // 打印并换行
//2.
//log.Fatalln("打印日志后退出")
//3.
//log.Panicln("hello") // 打印日志,及其详细信息,然后退出。
fmt.Println("小小雪也可以改动了数据")
fmt.Println("小小雪也可以改动了数据")
fmt.Println("小小雪也可以改动了数据")
fmt.Println("小小雪也可以改动了数据")
fmt.Print("1231")
fmt.Println("12314")
fmt.Println("小小雪发现,合并到master的分支的功能,还不完善,继续修改了文件")
fmt.Println("master 有被其他同事修改了")
fmt.Println("小小雪太菜了,做一个功能,第三次修改。")
fmt.Println("小小雪第四次提交了自己修改")
fmt.Println("修改了log")
fmt.Println("修改了log2---")
fmt.Println("修改了log3++++++")
fmt.Println("修改了log")
fmt.Println("修改了log2---")
fmt.Println("修改了log3++++++")
fmt.Println("修改了log")
fmt.Println("修改了log2---")
fmt.Println("修改了log3++++++")
fmt.Println("修改了log")
fmt.Println("修改了log2---")
fmt.Println("修改了log3++++++")
}
|
package ens
import (
"fmt"
"time"
"github.com/imroc/req"
"github.com/sirupsen/logrus"
"github.com/imsilence/gocmdb/agent/entity"
"github.com/imsilence/gocmdb/agent/gconf"
)
type ENS struct {
config *gconf.Config
Heartbeat chan interface{}
Register chan interface{}
Task chan interface{}
TaskResult chan interface{}
Log chan interface{}
}
func NewENS(c *gconf.Config) *ENS {
return &ENS{
config: c,
Heartbeat: make(chan interface{}, 16),
Register: make(chan interface{}, 16),
Task: make(chan interface{}, 64),
TaskResult: make(chan interface{}, 128),
Log: make(chan interface{}, 10240),
}
}
func (e *ENS) Start() {
headers := req.Header{"Token": e.config.Token}
go func() {
endpoint := fmt.Sprintf("%s/heartbeat/%s/", e.config.Endpoint, e.config.UUID)
for evt := range e.Heartbeat {
if body, ok := evt.(entity.Heartbeat); ok {
response, err := req.New().Post(endpoint, req.BodyJSON(body), headers)
if err != nil {
logrus.Error(response, err)
} else {
logrus.Debug(response, err)
}
}
}
}()
go func() {
endpoint := fmt.Sprintf("%s/register/%s/", e.config.Endpoint, e.config.UUID)
for evt := range e.Register {
if body, ok := evt.(entity.Register); ok {
response, err := req.New().Post(endpoint, req.BodyJSON(body), headers)
if err != nil {
logrus.Error(response, err)
} else {
logrus.Debug(response, err)
}
}
}
}()
go func() {
endpoint := fmt.Sprintf("%s/log/%s/", e.config.Endpoint, e.config.UUID)
for evt := range e.Log {
if body, ok := evt.(entity.Log); ok {
response, err := req.New().Post(endpoint, req.BodyJSON(body), headers)
if err != nil {
logrus.Error(response, err)
} else {
logrus.Debug(response, err)
}
}
}
}()
go func() {
endpoint := fmt.Sprintf("%s/result/%s/", e.config.Endpoint, e.config.UUID)
for evt := range e.TaskResult {
if body, ok := evt.(entity.TaskResult); ok {
response, err := req.New().Post(endpoint, req.BodyJSON(body), headers)
if err != nil {
logrus.Error(response, err)
} else {
logrus.Debug(response, err)
}
}
}
}()
go func() {
endpoint := fmt.Sprintf("%s/task/%s/", e.config.Endpoint, e.config.UUID)
for now := range time.Tick(10 * time.Second) {
response, err := req.New().Get(endpoint, req.QueryParam{"time": now.Unix()}, headers)
if err != nil {
logrus.Error(response, err)
} else {
logrus.Debug(response, err)
}
}
}()
}
|
package wordbreak
import (
"golang/helper"
"testing"
)
func Test(t *testing.T) {
s, wordDict := "leetcode", []string{"leet", "code"}
helper.Assert(wordBreak(s, wordDict), true, t)
s, wordDict = "applepenapple", []string{"apple", "pen"}
helper.Assert(wordBreak(s, wordDict), true, t)
s, wordDict = "catsandog", []string{"cats", "dog", "sand", "and", "cat"}
helper.Assert(wordBreak(s, wordDict), false, t)
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"
wordDict = []string{"a", "aa", "aaa", "aaaa", "aaaaa", "aaaaaa", "aaaaaaa", "aaaaaaaa", "aaaaaaaaa", "aaaaaaaaaa"}
helper.Assert(wordBreak(s, wordDict), false, t)
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"
wordDict = []string{"a", "aa", "aaa", "aaaa", "aaaaa", "aaaaaa", "aaaaaaa", "aaaaaaaa", "aaaaaaaaa", "aaaaaaaaaa"}
helper.Assert(wordBreak(s, wordDict), false, t)
}
|
package oauth20
import "time"
type Config struct {
ClientEndpoint string `envconfig:"APP_OAUTH20_CLIENT_ENDPOINT"`
PublicAccessTokenEndpoint string `envconfig:"APP_OAUTH20_PUBLIC_ACCESS_TOKEN_ENDPOINT"`
HTTPClientTimeout time.Duration `envconfig:"default=105s,APP_OAUTH20_HTTP_CLIENT_TIMEOUT"`
}
|
package graphql_test
import (
"net/http/httptest"
"testing"
"github.com/99designs/gqlgen/client"
"github.com/Sirupsen/logrus"
"github.com/Tinee/go-graphql-chat/graphql"
"github.com/Tinee/go-graphql-chat/inmemory"
)
func Test_graphql_mutationResolver(t *testing.T) {
inmem := inmemory.NewClient()
err := inmem.FillWithMockData("../inmemory/mock_data.json")
if err != nil {
t.Fatal("Error: We need to have mock data inserted.")
}
srv := httptest.NewServer(graphql.NewGraphQLHandlerFunc(
inmem.UserRepository(),
inmem.MessageRepository(),
inmem.ProfileRepository(),
logrus.New(),
"localSecret",
))
c := client.New(srv.URL)
t.Run("Mutation Register", func(t *testing.T) {
var resp struct {
Register struct {
ID string
Username string
}
}
c.MustPost(`mutation { register(input: { username: "Marcus", password: "admin" }) { id, username } }`, &resp)
if resp.Register.ID == "" {
t.Error("Didn't expect this to be empty.")
}
if resp.Register.Username != "Marcus" {
t.Errorf("Expected username to be (%v) but got (%v)", "Marcus", resp.Register.Username)
}
})
t.Run("Mutation Login", func(t *testing.T) {
var resp struct {
Login struct {
ID string
Username string
Token string
}
}
c.MustPost(`mutation { login(input: { username: "tine", password: "test1" }) { username } }`, &resp)
if resp.Login.Username != "tine" {
t.Errorf("Expected (%v) but got (%v)", "tine", resp.Login.Username)
}
})
t.Run("Mutation PostMessage", func(t *testing.T) {
var resp struct {
PostMessage struct {
SenderID string
}
}
c.MustPost(`mutation { postMessage(input:{text:"Foo", senderId:"2", receiverId:"1"}) { senderId } }`, &resp)
if resp.PostMessage.SenderID != "2" {
t.Errorf("Expected (%v) but got (%v)", "2", resp.PostMessage.SenderID)
}
})
t.Run("Mutation PostProfile", func(t *testing.T) {
var resp struct {
PostProfile struct {
UserID string
}
}
c.MustPost(`mutation { postProfile(input:{ userId:"Foo", firstName:"Foo", lastName:"Bar", age: 25 }) { userId } }`, &resp)
if resp.PostProfile.UserID != "Foo" {
t.Errorf("Expected (%v) but got (%v)", "Foo", resp.PostProfile.UserID)
}
})
}
|
package worker
import osm "github.com/JesseleDuran/gograph/osm/pbf"
//go:generate mockery --name S3Client
type S3Client interface {
Get(bucketName, objectName, fileName string) error
Put(bucketName, objectName, filePath string) (int64, error)
GetAllObjectKeys(bucketName string) []string
}
//go:generate mockery --name S3FileManager
type FileManager interface {
Download(source, destination string) (string, error)
Upload(source, destination string) error
AllKeys() []string
}
type FileProvider interface {
Fetch() (string, error)
}
type Graph interface {
Create(filter osm.Filter, content string) error
}
|
// Package clause.
package gigasecond
import (
"math"
"time"
)
// Constant declaration.
const testVersion = 4 // find the value in gigasecond_test.go
// API function. It uses a type from the Go standard library.
func AddGigasecond(t time.Time) time.Time {
return t.Add(time.Duration(math.Pow(10, 9)) * time.Duration(time.Second))
}
|
package main
import "fmt"
type Stack struct {
items []int
}
// Push will add value
func (s *Stack) Push(i int) {
s.items = append(s.items, i)
}
// Pop will remove value
func (s *Stack) Pop() int {
value := s.items[len(s.items)-1]
s.items = s.items[1 : len(s.items)-1]
return value
}
func main() {
myStack := Stack{}
fmt.Println(myStack)
myStack.Push(3)
myStack.Push(10)
myStack.Push(20)
fmt.Println(myStack.Pop())
fmt.Println(myStack)
}
|
package main
import "fmt"
func main() {
// slice of expenses
expenses := []Expense{}
keepGoing := true
for keepGoing {
fmt.Println("Expenses Manager")
fmt.Println("Choose one:")
fmt.Println("1. Add expense")
fmt.Println("2. Display expenses")
fmt.Println("3. Quit")
n := 0
fmt.Scanf("%d", &n)
switch n {
case 1:
expenses = addExpense(expenses)
case 2:
displayExpense(expenses)
case 3:
keepGoing = false
}
}
}
func addExpense(expenses []Expense) []Expense {
fmt.Println("Add expense <title> <category> <cost>")
expenseName := ""
category := ""
var amount float32 = 0.0
fmt.Scanf("%s %s %f", &expenseName, &category, &amount)
expense := CreateExpense(expenseName, category, amount)
expenses = append(expenses, *expense)
return expenses
}
func displayExpense(expenses []Expense) {
fmt.Printf("Number of expenses: %d\n", len(expenses))
for _, e := range(expenses) {
fmt.Println(e)
}
}
|
package mysort
import (
"fmt"
"math/rand"
"sort"
"testing"
)
// go语言的slice() 不仅可以对int类型的数组进行排序,也可以对struct类型的数组进行排序
// 排序函数如下
// 1.Slice() 排序不稳定
// 2.SliceStable() 稳定排序
// 3.SlicesSorted()判断是否已排序
type test struct {
value int
str string
}
func TestSortSlices(t *testing.T) {
s := make([]test, 5)
s[0] = test{value: 4, str: "test1"}
s[1] = test{value: 2, str: "test2"}
s[2] = test{value: 3, str: "test3"}
s[3] = test{value: 5, str: "test5"}
s[4] = test{value: 1, str: "test4"}
fmt.Println("初始化的结果")
fmt.Println(s)
// 从小到大不稳定排序
// sort.Slice(s,func(i,j int)bool{
// if s[i].value < s[j].value {
// return true
// }
// return false
// })
// fmt.Println("从小到大排序的结果")
// fmt.Println(s)
// 打乱
rand.Shuffle(len(s), func(i, j int) {
s[i], s[j] = s[j], s[i]
})
fmt.Println("打乱的结果")
fmt.Println(s)
// 从小到大稳定排序
sort.SliceStable(s, func(i, j int) bool {
if s[i] == s[j] {
fmt.Println("==")
return s[i].value == s[j].value
} else if s[i].value < s[j].value {
fmt.Println("<")
// return true
return s[i].value < s[j].value
}
// return s[i].value > s[j].value
return false
})
fmt.Println("从小到大排序的结果")
fmt.Println(s)
// 判断数组是否已排序
bless := sort.SliceIsSorted(s, func(i, j int) bool {
if s[i].value < s[j].value {
return true
}
return false
})
fmt.Printf("S 是否已完成排序 %v\n", bless)
}
|
package internal
import "testing"
func TestDeque(t *testing.T) {
t.Run("pop", func(t *testing.T) {
var dq Deque[int]
dq.Push(1)
dq.Push(2)
if dq.Pop() != 2 {
t.Error("Didn't pop 2 first")
}
if dq.Pop() != 1 {
t.Error("Didn't pop 1 second")
}
if dq.Pop() != 0 {
t.Error("Didn't pop zero")
}
})
t.Run("shift", func(t *testing.T) {
var td Deque[int]
td.Push(1)
td.Push(2)
if td.Shift() != 1 {
t.Error("Didn't shift 1 first")
}
if td.Shift() != 2 {
t.Error("Didn't shift b second")
}
if td.Shift() != 0 {
t.Error("Didn't shift zero")
}
})
t.Run("push", func(t *testing.T) {
var td Deque[int]
td.Push(1)
td.Push(2)
td.Shift()
for i := 1; i <= 12; i++ {
td.Push(i)
}
if td.Shift() != 2 {
t.Error("Didn't shift 2 first")
}
for i := 1; i <= 12; i++ {
if v := td.Shift(); v != i {
t.Fatalf("Shifted %d at pos %d", v, i)
}
}
})
t.Run("grow", func(t *testing.T) {
var td Deque[int]
td.Push(1)
td.Push(2)
td.Push(3)
td.Shift()
td.Grow(7)
if len(td.elems) < 9 {
t.Fatal("Expected at least 9 elements, got", len(td.elems))
}
if cap(td.elems)&(cap(td.elems)-1) != 0 {
t.Fatalf("Capacity %d is not a power of two", cap(td.elems))
}
if td.Shift() != 2 || td.Shift() != 3 {
t.Fatal("Elements don't match after grow")
}
})
}
|
package cgroup
import (
"bufio"
"os"
"path"
"strconv"
"strings"
)
// Info reads the cgroup mount info from /proc/cgroups
type Info struct {
Hierarchy int
NumCgroups int
Enabled bool
}
// GetCgroupV1Info read /proc/cgroups and return the result
func GetCgroupV1Info() (map[string]Info, error) {
f, err := os.Open(procCgroupsPath)
if err != nil {
return nil, err
}
defer f.Close()
rt := make(map[string]Info)
s := bufio.NewScanner(f)
for s.Scan() {
text := s.Text()
if text[0] == '#' {
continue
}
parts := strings.Fields(text)
if len(parts) < 4 {
continue
}
// format: subsys_name hierarchy num_cgroups enabled
name := parts[0]
hierarchy, err := strconv.Atoi(parts[1])
if err != nil {
return nil, err
}
numCgroups, err := strconv.Atoi(parts[2])
if err != nil {
return nil, err
}
enabled := parts[3] != "0"
rt[name] = Info{
Hierarchy: hierarchy,
NumCgroups: numCgroups,
Enabled: enabled,
}
}
if err := s.Err(); err != nil {
return nil, err
}
return rt, nil
}
// GetAvailableControllerV1 reads /proc/cgroups and get all available controller as set
func GetAvailableControllerV1() (map[string]bool, error) {
info, err := GetCgroupV1Info()
if err != nil {
return nil, err
}
rt := make(map[string]bool)
for k, v := range info {
if !v.Enabled {
continue
}
rt[k] = true
}
return rt, nil
}
// GetAvailableControllerV2 reads /sys/fs/cgroup/cgroup.controllers to get all controller
func GetAvailableControllerV2() (map[string]bool, error) {
c, err := readFile(path.Join(basePath, cgroupControllers))
if err != nil {
return nil, err
}
m := make(map[string]bool)
f := strings.Fields(string(c))
for _, v := range f {
m[v] = true
}
return m, nil
}
|
// Package address contains utilities for handling moonbeam addresses.
package address
import (
"errors"
"strings"
"github.com/btcsuite/btcutil/base58"
)
// Encode a moonbeam address for the given bitcoin address and domain.
func Encode(bitcoinAddr, domain string) (string, error) {
if _, _, err := base58.CheckDecode(bitcoinAddr); err != nil {
return "", err
}
if strings.Contains(domain, "@") {
return "", errors.New("invalid domain")
}
s := bitcoinAddr + "+mb@" + domain
encoded := base58.CheckEncode([]byte(s), 1)
version := string(encoded[0])
checksum := string(encoded[len(encoded)-4:])
return bitcoinAddr + "+mb" + version + checksum + "@" + domain, nil
}
// Decode a moonbeam address into its constituent bitcoin address and domain.
func Decode(addr string) (bitcoinAddr, domain string, valid bool) {
i := strings.Index(addr, "@")
if i < 0 {
return "", "", false
}
before := addr[:i]
domain = addr[i+1:]
i = strings.Index(before, "+")
if i < 0 {
return "", "", false
}
bitcoinAddr = before[:i]
expected, err := Encode(bitcoinAddr, domain)
if err != nil {
return "", "", false
}
if addr != expected {
return "", "", false
}
return bitcoinAddr, domain, true
}
|
package musictheory
import (
"fmt"
"math"
)
// Quality types
const (
PerfectType QualityType = iota
MajorType
MinorType
AugmentedType
DiminishedType
)
// IntervalFunc creates an interval at as specific step/degree
type IntervalFunc func(int) Interval
// Perfect interval
func Perfect(step int) Interval {
return qualityInterval(step, Quality{PerfectType, 0})
}
// Major interval
func Major(step int) Interval {
return qualityInterval(step, Quality{MajorType, 0})
}
// Minor interval
func Minor(step int) Interval {
return qualityInterval(step, Quality{MinorType, 0})
}
// Augmented interval
func Augmented(step int) Interval {
return qualityInterval(step, Quality{AugmentedType, 1})
}
// DoublyAugmented interval
func DoublyAugmented(step int) Interval {
return qualityInterval(step, Quality{AugmentedType, 2})
}
// Diminished interval
func Diminished(step int) Interval {
return qualityInterval(step, Quality{DiminishedType, 1})
}
// DoublyDiminished interval
func DoublyDiminished(step int) Interval {
return qualityInterval(step, Quality{DiminishedType, 2})
}
// Octave interval
func Octave(step int) Interval {
return Interval{step, 0, 0}
}
// Semitones is an interval using direct semitones
func Semitones(step int) Interval {
return Interval{chromaticOctaves(step), chromaticToDiatonic(step), step}
}
func qualityInterval(step int, quality Quality) Interval {
absStep := int(math.Abs(float64(step)))
diatonic := normalizeDiatonic(absStep - 1)
diff := qualityDiff(quality, canBePerfect(diatonic))
octaves := diatonicOctaves(absStep - 1)
i := NewInterval(absStep, octaves, diff)
if step > 0 {
return i
}
return i.Negate()
}
// NewInterval builds a new Interval
func NewInterval(step, octaves, offset int) Interval {
diatonic := normalizeDiatonic(step - 1)
chromatic := diatonicToChromatic(diatonic) + offset
return Interval{octaves, diatonic, chromatic}
}
// Interval represents an interval in 12-tone equal temperament
type Interval struct {
Octaves int
Diatonic int
Chromatic int
}
func (i Interval) String() string {
return fmt.Sprintf("(octaves: %d, diatonic: %d, chromatic: %d)", i.Octaves, i.Diatonic, i.Chromatic)
}
// Semitones returns the total number of semitones that make up the interval
func (i Interval) Semitones() int {
return i.Octaves*12 + i.Chromatic
}
// Quality returns the Quality
func (i Interval) Quality() Quality {
quality := diffQuality(i.Chromatic-diatonicToChromatic(i.Diatonic), canBePerfect(i.Diatonic))
if i.Octaves < 0 {
return quality.Invert()
}
return quality
}
// Ratio returns the interval ratio
func (i Interval) Ratio() float64 {
return math.Exp2(float64(i.Semitones()) / 12.0)
}
// Transpose returns a new Interval that has been transposed by the given Interval
func (i Interval) Transpose(o Interval) Transposer {
var diatonic int
// TODO: Accomodate weird behavior of sequential minor second transpositions. We don't need to advance the diatonic
// every transposition. We're currently modeling things as integers, but maybe we need to model as floats and
// accumulate over time; whole numbers trigger a move.
if o.Diatonic == o.Chromatic {
if diatonicToChromatic(i.Diatonic) == i.Chromatic {
diatonic = i.Diatonic + o.Diatonic
} else {
diatonic = i.Diatonic
}
} else {
diatonic = i.Diatonic + o.Diatonic
}
diatonicOctaves := diatonicOctaves(diatonic)
diatonicRemainder := normalizeDiatonic(diatonic)
octaves := i.Octaves + o.Octaves + diatonicOctaves
chromatic := normalizeChromatic(i.Chromatic + o.Chromatic)
return Interval{octaves, diatonicRemainder, chromatic}
}
// Negate returns a new, negated Interval
func (i Interval) Negate() Interval {
if i.Diatonic == 0 && i.Chromatic == 0 {
return Interval{-i.Octaves, i.Diatonic, i.Chromatic}
}
return Interval{-(i.Octaves + 1), inverseDiatonic(i.Diatonic), inverseChromatic(i.Chromatic)}
}
// Eq determines if another interval is the same
func (i Interval) Eq(o Interval) bool {
return i.Semitones() == o.Semitones()
}
// QualityType represents the type a Quality can take
type QualityType int
func (q QualityType) String() string {
switch q {
case PerfectType:
return "perfect"
case MajorType:
return "major"
case MinorType:
return "minor"
case AugmentedType:
return "augmented"
case DiminishedType:
return "diminished"
default:
return "unknown"
}
}
// Quality describes the quality of an interval
type Quality struct {
Type QualityType
Size int
}
// Invert returns a new, inverted Quality
func (q Quality) Invert() Quality {
switch q.Type {
case PerfectType:
return q
case MajorType:
return Quality{MinorType, q.Size}
case MinorType:
return Quality{MajorType, q.Size}
case AugmentedType:
return Quality{DiminishedType, q.Size}
case DiminishedType:
return Quality{AugmentedType, q.Size}
default:
panic(fmt.Sprintf("invalid type: %d", q.Type))
}
}
// Eq checks two Qualities for equality
func (q Quality) Eq(o Quality) bool {
return q.Type == o.Type && q.Size == o.Size
}
func (q Quality) String() string {
switch q.Type {
case PerfectType, MajorType, MinorType:
return fmt.Sprintf("%s", q.Type)
case AugmentedType, DiminishedType:
return fmt.Sprintf("%s(%d)", q.Type, q.Size)
default:
return "unknown"
}
}
func diatonicToChromatic(interval int) int {
if interval >= len(diatonicToChromaticLookup) {
panic(fmt.Sprintf("interval out of range: %d", interval))
}
return diatonicToChromaticLookup[interval]
}
var diatonicToChromaticLookup = []int{0, 2, 4, 5, 7, 9, 11}
func chromaticToDiatonic(v int) int {
mag := 1
if v < 0 {
mag = -1
v = -v
}
v = normalizeChromatic(v)
for i, c := range diatonicToChromaticLookup {
if v == c || v < c {
return i * mag
}
}
return 6 * mag
}
func qualityDiff(q Quality, perfect bool) int {
if q.Type == PerfectType || q.Type == MajorType {
return 0
} else if q.Type == MinorType {
return -1
} else if q.Type == AugmentedType {
return q.Size
} else if q.Type == DiminishedType {
if perfect {
return -q.Size
}
return -(q.Size + 1)
}
panic("invalid quality")
}
func diffQuality(diff int, perfect bool) Quality {
if perfect {
if diff == 0 {
return Quality{PerfectType, 0}
} else if diff > 0 {
return Quality{AugmentedType, diff}
}
return Quality{DiminishedType, -diff}
}
if diff == 0 {
return Quality{MajorType, 0}
} else if diff == -1 {
return Quality{MinorType, 0}
} else if diff > 0 {
return Quality{AugmentedType, diff}
}
return Quality{DiminishedType, -(diff + 1)}
}
func canBePerfect(interval int) bool {
return interval == 0 || interval == 3 || interval == 4
}
|
package mysql
import (
_ "github.com/go-sql-driver/mysql"
"github.com/go-xorm/xorm"
"cms/config"
"fmt"
"log"
"strings"
."cms/structs"
)
const BatchSize int = 500
var engine *xorm.Engine
func init() {
conf := config.AppConfig.MySQL
dataSourceName := fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True&loc=Local", conf.Username, conf.Password, conf.Url, conf.Database)
e, err := xorm.NewEngine("mysql", dataSourceName)
//设置首字母小写
//e.SetMapper(LowerFirstMapper{})
e.SetMaxIdleConns(conf.MaxIdle)
e.SetMaxOpenConns(conf.MaxActive)
if config.AppConfig.Server.LogModelEnable {
e.ShowSQL(true)
} else {
e.ShowSQL(false)
}
if err != nil {
log.Fatalf("mysql connection failed: %q", err)
}
//将维护的表放到这里
if err = e.Sync(
new(User));
err != nil {
log.Fatalf("Fail to sync struct to table schema : %v", err)
} else {
fmt.Println("Succ sync struct to table schema")
}
//if config.AppConfig.Server.LogModelEnable {
// engine.Logger().SetLevel(core.LOG_DEBUG)
//} else {
// engine.Logger().SetLevel(core.LOG_ERR)
//}
engine = e
}
type LowerFirstMapper struct {
}
func (m LowerFirstMapper) Obj2Table(o string) string {
return strings.ToLower(o[:1]) + o[1:]
}
func (m LowerFirstMapper) Table2Obj(t string) string {
return t
} |
package parseBoolExpr
func parseBoolExpr(expression string) bool {
if expression == "" {
return false
}
switch expression[0] {
case 't':
return true
case 'f':
return false
case '!':
expressions, _ := getContentInBracket(expression, 1)
if len(expressions) != 1 {
return false
}
return !parseBoolExpr(expressions[0])
case '&':
flag := true
expressions, _ := getContentInBracket(expression, 1)
for _, s := range expressions {
flag = parseBoolExpr(s) && flag
if !flag {
return flag
}
}
return flag
case '|':
flag := false
expressions, _ := getContentInBracket(expression, 1)
for _, s := range expressions {
flag = parseBoolExpr(s) || flag
if flag {
return flag
}
}
return flag
default:
return true
}
}
func getContentInBracket(s string, start int) (expressions []string, end int) {
if s == "" || start < 0 || start >= len(s) || s[start] != '(' {
return
}
stack := 1
for i := start + 1; i < len(s); i++ {
switch s[i] {
case '(':
stack++
case ')':
stack--
if stack == 0 {
expressions = append(expressions, s[start+1:i])
end = i + 1
break
}
case ',':
if stack == 1 {
expressions = append(expressions, s[start+1:i])
start = i
}
}
}
return
}
|
package lexer
import (
"github.com/BOBO1997/monkey/token"
)
// Lexer is a struct holding the information of whole source code and the counter of lexer
type Lexer struct {
input string // the whole source
position int // the positing currently reading (alrerady read)
readPosition int // the next position to be read
ch rune // one charactor at the position
}
// New function makes a new *Lexer struct
// input is a string of raw token
func New(input string) *Lexer {
l := &Lexer{input: input}
l.readChar()
return l
}
// readChar method update the *Lexer struct, with making value of ch field and updating position and readPosition
// this method is the updating function for lexer counters
func (l *Lexer) readChar() {
if l.readPosition >= len(l.input) {
l.ch = 0 // 0 represents EOF
} else {
l.ch = []rune(l.input)[l.readPosition] // read a new rune
}
l.position = l.readPosition
l.readPosition++
}
// readIdentifier method reads forward the source code and return an identifier
func (l *Lexer) readIdentifier() string {
startPos := l.position
for isLetter(l.ch) {
l.readChar()
}
return l.input[startPos:l.position]
}
// readNumber method reads forward the source code and return a number
func (l *Lexer) readNumber() string {
startPos := l.position
for isDigit(l.ch) {
l.readChar()
}
return l.input[startPos:l.position]
}
// readString method reads forward the source code and return a string
func (l *Lexer) readString() string {
startPos := l.position + 1
for {
l.readChar()
if l.ch == '"' || l.ch == 0 {
break
}
}
return l.input[startPos:l.position]
}
// peekChar method peeks the next rune, for finding the operator with two rune
func (l *Lexer) peekChar() rune {
if len([]rune(l.input)) <= l.readPosition {
return 0
} else {
return rune(l.input[l.readPosition])
}
}
// skipWhitespace method skips the white space and escape sequences
func (l *Lexer) skipWhitespace() {
for l.ch == ' ' || l.ch == '\n' || l.ch == '\t' || l.ch == '\r' {
l.readChar()
}
}
// NextToken method returns a token.Token, according to the symbol in Lexer.ch
// This method is to update the counters of Lexer structure and return the token
func (l *Lexer) NextToken() token.Token {
var tok token.Token
l.skipWhitespace()
switch l.ch {
case '=':
if l.peekChar() == '=' {
ch := l.ch
l.readChar()
tok = token.Token{Type: token.EQ, Literal: string(ch) + string(l.ch)}
} else {
tok = newToken(token.ASSIGN, l.ch)
}
case ';':
tok = newToken(token.SEMICOLON, l.ch)
case ':':
tok = newToken(token.COLON, l.ch)
case '(':
tok = newToken(token.LPAREN, l.ch)
case ')':
tok = newToken(token.RPAREN, l.ch)
case ',':
tok = newToken(token.COMMA, l.ch)
case '+':
tok = newToken(token.PLUS, l.ch)
case '-':
tok = newToken(token.MINUS, l.ch)
case '*':
tok = newToken(token.ASTERISK, l.ch)
case '/':
tok = newToken(token.SLASH, l.ch)
case '<':
if l.peekChar() == '=' {
ch := l.ch
l.readChar()
tok = token.Token{Type: token.LEQ, Literal: string(ch) + string(l.ch)}
} else {
tok = newToken(token.LT, l.ch)
}
case '>':
if l.peekChar() == '=' {
ch := l.ch
l.readChar()
tok = token.Token{Type: token.GEQ, Literal: string(ch) + string(l.ch)}
} else {
tok = newToken(token.GT, l.ch)
}
case '!':
if l.peekChar() == '=' {
ch := l.ch
l.readChar()
tok = token.Token{Type: token.NEQ, Literal: string(ch) + string(l.ch)}
} else {
tok = newToken(token.BANG, l.ch)
}
case '{':
tok = newToken(token.LBRACE, l.ch)
case '}':
tok = newToken(token.RBRACE, l.ch)
case '[':
tok = newToken(token.LBRACKET, l.ch)
case ']':
tok = newToken(token.RBRACKET, l.ch)
case '"':
tok.Type = token.STRING
tok.Literal = l.readString()
case 0:
tok.Literal = ""
tok.Type = "EOF"
default:
if isLetter(l.ch) {
tok.Literal = l.readIdentifier()
tok.Type = token.LookupIdent(tok.Literal)
return tok
} else if isDigit(l.ch) {
tok.Literal = l.readNumber()
tok.Type = token.INT
return tok
} else {
tok = newToken(token.ILLEGAL, l.ch)
}
}
l.readChar()
return tok
}
// newToken function creates a new token.Token, with its token type and symbol.
func newToken(tokenType token.TokenType, ch rune) token.Token {
return token.Token{Type: tokenType, Literal: string(ch)} // make and return a new token
}
// isLetter function checks whether the current rune is a letter or not
func isLetter(ch rune) bool {
return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') || ch == '_'
}
// isDigit function checks whether the current rune is a digit or not
func isDigit(ch rune) bool {
return '0' <= ch && ch <= '9'
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-08-31 15:31
# @File : lt_95_Unique_Binary_Search_Trees_II.go
# @Description :
# @Attention :
*/
package v0
func generateTrees(n int) []*TreeNode {
if n ==0 {
return nil
}
return helper(1, n)
}
func helper(start int, end int) []*TreeNode {
if start > end {
return []*TreeNode{nil}
}
var result []*TreeNode
for i := start; i <= end; i++ {
lefts := helper(start, i-1)
rights := helper(i+1, end)
for _, left := range lefts {
for _, right := range rights {
root := &TreeNode{
Val: i,
Left: left,
Right: right,
}
result = append(result, root)
}
}
}
return result
}
|
/*
Copyright 2020 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"context"
"github.com/gravitational/teleport/lib/backend"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/trace"
)
// GetAppSession gets an application web session.
func (s *IdentityService) GetAppSession(ctx context.Context, req services.GetAppSessionRequest) (services.WebSession, error) {
if err := req.Check(); err != nil {
return nil, trace.Wrap(err)
}
item, err := s.Get(ctx, backend.Key(appsPrefix, sessionsPrefix, req.SessionID))
if err != nil {
return nil, trace.Wrap(err)
}
session, err := services.GetWebSessionMarshaler().UnmarshalWebSession(item.Value, services.SkipValidation())
if err != nil {
return nil, trace.Wrap(err)
}
return session, nil
}
// GetAppSessions gets all application web sessions.
func (s *IdentityService) GetAppSessions(ctx context.Context) ([]services.WebSession, error) {
startKey := backend.Key(appsPrefix, sessionsPrefix)
result, err := s.GetRange(ctx, startKey, backend.RangeEnd(startKey), backend.NoLimit)
if err != nil {
return nil, trace.Wrap(err)
}
out := make([]services.WebSession, len(result.Items))
for i, item := range result.Items {
session, err := services.GetWebSessionMarshaler().UnmarshalWebSession(item.Value, services.SkipValidation())
if err != nil {
return nil, trace.Wrap(err)
}
out[i] = session
}
return out, nil
}
// UpsertAppSession creates an application web session.
func (s *IdentityService) UpsertAppSession(ctx context.Context, session services.WebSession) error {
value, err := services.GetWebSessionMarshaler().MarshalWebSession(session)
if err != nil {
return trace.Wrap(err)
}
item := backend.Item{
Key: backend.Key(appsPrefix, sessionsPrefix, session.GetName()),
Value: value,
Expires: session.GetExpiryTime(),
}
if _, err = s.Put(ctx, item); err != nil {
return trace.Wrap(err)
}
return nil
}
// DeleteAppSession removes an application web session.
func (s *IdentityService) DeleteAppSession(ctx context.Context, req services.DeleteAppSessionRequest) error {
if err := s.Delete(ctx, backend.Key(appsPrefix, sessionsPrefix, req.SessionID)); err != nil {
return trace.Wrap(err)
}
return nil
}
// DeleteAllAppSessions removes all application web sessions.
func (s *IdentityService) DeleteAllAppSessions(ctx context.Context) error {
startKey := backend.Key(appsPrefix, sessionsPrefix)
if err := s.DeleteRange(ctx, startKey, backend.RangeEnd(startKey)); err != nil {
return trace.Wrap(err)
}
return nil
}
|
package study_avltree
import (
"fmt"
"strings"
)
func toString(n *node) string {
if n == nil {
return ""
}
return fmt.Sprintf("[%v:%v:%v]", n.key, n.value, n.height)
}
func allEntryIsNotNil(list []*node) bool {
for _, e := range list {
if e != nil {
return true
}
}
return false
}
func (t *Tree) Print() {
if t.root == nil {
return
}
if t.root.left == nil && t.root.right == nil {
fmt.Printf("%v\n", toString(t.root))
return
}
queue := []*node{ t.root }
next_queue := []*node{}
output := []string{ "" }
next_output := []string{}
space := 0
next_space := 0
for allEntryIsNotNil(queue) {
for _, node := range queue {
if node == nil {
next_queue = append(next_queue, nil, nil)
} else {
next_queue = append(next_queue, node.right, node.left)
}
s := strings.Repeat(" ", space) + toString(node)
next_output = append(next_output, s)
next_output = append(next_output, output[0])
output = output[1:]
if len(s) > next_space {
next_space = len(s)
}
}
queue = next_queue
output = next_output
space = next_space
next_queue = []*node{}
next_output = []string{}
}
for _, o := range output {
fmt.Printf("%s\n", o)
}
}
|
package teesdk
type TrustClient interface {
Close()
Submit(method string, cipher string) (string, error)
}
|
package main
import "fmt"
type myFloat float64
func (f *myFloat) Scale(s float64) { // Methods with pointer receivers can modify the value
*f = *f * myFloat(s)
}
func main() {
v := myFloat(3.14159265)
v.Scale(100.00)
fmt.Println(v)
}
|
package adapter
import (
"io"
"mqtt-adapter/src/logger"
"github.com/sirupsen/logrus"
"github.com/surgemq/surgemq/service"
)
type TestSubscriber struct {
needPanic bool
}
func (s TestSubscriber) Subscribe(topic string, writer io.Writer) {}
func (s TestSubscriber) SubscribeBridge(topic string, msgChan chan<- string) {
if s.needPanic {
panic("test Panic")
}
msgChan <- `{"topic":"test"}`
msgChan <- `{"topic":123}`
close(msgChan)
}
func (s TestSubscriber) Disconnect() {}
type TestPublisher struct{}
func (p TestPublisher) Publish(msg string) error { return nil }
func (p TestPublisher) Disconnect() {}
type writer struct {
data string
}
func (w *writer) Write(p []byte) (n int, err error) {
w.data = string(p)
return len(p), nil
}
func setLog(wr io.Writer) {
form := new(logrus.TextFormatter)
logger.Log = logrus.New()
logger.Log.SetFormatter(form)
logger.Log.SetOutput(wr)
}
const mockURL = "tcp://:15352"
// runMockServer creates mock server for testing
func getMockServer()*service.Server {
return &service.Server{
KeepAlive: 300, // seconds
ConnectTimeout: 2, // seconds
SessionsProvider: "mem", // keeps sessions in memory
Authenticator: "mockSuccess", // always succeed
TopicsProvider: "mem", // keeps topic subscriptions in memory
}
} |
package main
import (
"studentDetails/gomodule/routes"
"github.com/gin-gonic/gin"
)
func main() {
// My router configuration with rest calls
router := gin.Default()
router.GET("/student", routes.GetStudentDetails)
router.POST("/student", routes.PostStudentDetails)
router.GET("/student/:id", routes.GetStudentDetails)
// Connect to Database
// database.ConnectToDB()
// Starting the GIN server
router.Run()
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
)
func main() {
envVars, err := parseJSONFile("config.dev.json")
if err != nil {
log.Fatal("Failed to parse json config", err)
}
for varKey := range envVars {
log.Printf("Set key %s and value %s", varKey, envVars[varKey])
err := os.Setenv(varKey, envVars[varKey])
if err != nil {
log.Println("Failed to set env var")
}
}
}
// all parsed data will converted into map[string]string so it can be processed fruther using os.SetEnv
func parseJSONFile(filepath string) (map[string]string, error) {
jsonContent, err := ioutil.ReadFile(filepath)
if err != nil {
return nil, err
}
vars := make(map[string]interface{})
err = json.Unmarshal(jsonContent, &vars)
if err != nil {
return nil, err
}
sanitzedVars := make(map[string]string)
for key := range vars {
switch vars[key].(type) {
case string:
// do something with string
sanitzedVars[key] = vars[key].(string)
default:
// do something if not string
return nil, fmt.Errorf("Key %s have non 'string' value data type. Value is: %v", key, vars[key])
}
}
return sanitzedVars, nil
}
|
package sheets
import (
"strings"
"testing"
)
var configTests = []struct {
config string
errExpected bool
}{
{"{}", true},
{`{
"type": "service_account",
"project_id": "testproject-123456",
"private_key_id": "abcdef",
"private_key": "-----BEGIN PRIVATE KEY-----\nnotarealkey\n-----END PRIVATE KEY-----\n",
"client_email": "robot@testproject-123456.iam.gserviceaccount.com",
"client_id": "115842414406405072982",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://accounts.google.com/o/oauth2/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/robot%40testproject-123456.iam.gserviceaccount.com"
}`, false},
}
func TestNewServiceAccountClient(t *testing.T) {
for _, tt := range configTests {
_, err := NewServiceAccountClientFromReader(strings.NewReader(tt.config))
if tt.errExpected && err == nil {
t.Error("Expected error, but got none")
}
if !tt.errExpected && err != nil {
t.Errorf("Unexpected error: %v", err)
}
}
}
|
package controllers
import (
"lili_style_test/src/models"
"lili_style_test/src/utils"
)
func GetBusinessStanceData(userdata []string) models.BusinessStance {
answer := userdata[70:83]
// はいで加点を整形
yesAdd := answer[0:11]
yesAdd = append(yesAdd,answer[12])
// いいえで加点を整
var noAdd []string
noAdd = append(noAdd,answer[11])
yesPointAnswer := utils.ParsedAnswerYesAdd(yesAdd)
noPointAnswer := utils.ParsedAnswerNoAdd(noAdd)
ParsedAnswer := append(yesPointAnswer,noPointAnswer...)
answerInt := utils.ParsedRate(ParsedAnswer, 13)
BusinessStance := GetBusinessStanceAnswer(userdata)
BusinessStance.Rate = answerInt
return BusinessStance
}
func GetBusinessStanceAnswer(user []string) models.BusinessStance {
return models.BusinessStance{
One: user[70],
Two: user[71],
Three: user[72],
Four: user[73],
Five: user[74],
Six: user[75],
Seven: user[76],
Eight: user[77],
Nine: user[78],
Ten: user[79],
Eleven: user[80],
Twelve: user[81],
Thirteen: user[82],
}
} |
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package globalconn_test
import (
"fmt"
"math"
"runtime"
"testing"
"github.com/pingcap/tidb/util/globalconn"
"github.com/stretchr/testify/assert"
)
func TestToConnID(t *testing.T) {
assert := assert.New(t)
type Case struct {
gcid globalconn.GCID
shouldPanic bool
expected uint64
}
cases := []Case{
{
gcid: globalconn.GCID{
Is64bits: true,
ServerID: 1001,
LocalConnID: 123,
},
shouldPanic: false,
expected: (uint64(1001) << 41) | (uint64(123) << 1) | 1,
},
{
gcid: globalconn.GCID{
Is64bits: true,
ServerID: 1 << 22,
LocalConnID: 123,
},
shouldPanic: true,
expected: 0,
},
{
gcid: globalconn.GCID{
Is64bits: true,
ServerID: 1001,
LocalConnID: 1 << 40,
},
shouldPanic: true,
expected: 0,
},
{
gcid: globalconn.GCID{
Is64bits: false,
ServerID: 1001,
LocalConnID: 123,
},
shouldPanic: false,
expected: (uint64(1001) << 21) | (uint64(123) << 1),
},
{
gcid: globalconn.GCID{
Is64bits: false,
ServerID: 1 << 11,
LocalConnID: 123,
},
shouldPanic: true,
expected: 0,
},
{
gcid: globalconn.GCID{
Is64bits: false,
ServerID: 1001,
LocalConnID: 1 << 20,
},
shouldPanic: true,
expected: 0,
},
}
for _, c := range cases {
if c.shouldPanic {
assert.Panics(func() {
c.gcid.ToConnID()
})
} else {
assert.Equal(c.expected, c.gcid.ToConnID())
}
}
}
func TestGlobalConnID(t *testing.T) {
assert := assert.New(t)
var (
err error
isTruncated bool
)
// exceeds int64
_, _, err = globalconn.ParseConnID(0x80000000_00000321)
assert.NotNil(err)
// 64bits truncated
_, isTruncated, err = globalconn.ParseConnID(101)
assert.Nil(err)
assert.True(isTruncated)
// 64bits
id1 := (uint64(1001) << 41) | (uint64(123) << 1) | 1
gcid1, isTruncated, err := globalconn.ParseConnID(id1)
assert.Nil(err)
assert.False(isTruncated)
assert.Equal(uint64(1001), gcid1.ServerID)
assert.Equal(uint64(123), gcid1.LocalConnID)
assert.True(gcid1.Is64bits)
// exceeds uint32
_, _, err = globalconn.ParseConnID(0x1_00000320)
assert.NotNil(err)
// 32bits
id2 := (uint64(2002) << 21) | (uint64(321) << 1)
gcid2, isTruncated, err := globalconn.ParseConnID(id2)
assert.Nil(err)
assert.False(isTruncated)
assert.Equal(uint64(2002), gcid2.ServerID)
assert.Equal(uint64(321), gcid2.LocalConnID)
assert.False(gcid2.Is64bits)
assert.Equal(gcid2.ToConnID(), id2)
}
func TestGetReservedConnID(t *testing.T) {
assert := assert.New(t)
simpleAlloc := globalconn.NewSimpleAllocator()
assert.Equal(math.MaxUint64-uint64(0), simpleAlloc.GetReservedConnID(0))
assert.Equal(math.MaxUint64-uint64(1), simpleAlloc.GetReservedConnID(1))
serverID := func() uint64 {
return 1001
}
globalAlloc := globalconn.NewGlobalAllocator(serverID, true)
var maxLocalConnID uint64 = 1<<40 - 1
assert.Equal(uint64(1001)<<41|(maxLocalConnID)<<1|1, globalAlloc.GetReservedConnID(0))
assert.Equal(uint64(1001)<<41|(maxLocalConnID-1)<<1|1, globalAlloc.GetReservedConnID(1))
}
func benchmarkLocalConnIDAllocator32(b *testing.B, pool globalconn.IDPool) {
var (
id uint64
ok bool
)
// allocate local conn ID.
for {
if id, ok = pool.Get(); ok {
break
}
runtime.Gosched()
}
// deallocate local conn ID.
if ok = pool.Put(id); !ok {
b.Fatal("pool unexpected full")
}
}
func BenchmarkLocalConnIDAllocator(b *testing.B) {
b.ReportAllocs()
concurrencyCases := []int{1, 3, 10, 20, 100}
for _, concurrency := range concurrencyCases {
b.Run(fmt.Sprintf("Allocator 64 x%v", concurrency), func(b *testing.B) {
pool := globalconn.AutoIncPool{}
pool.InitExt(1<<globalconn.LocalConnIDBits64, true, globalconn.LocalConnIDAllocator64TryCount)
b.SetParallelism(concurrency)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
id, ok := pool.Get()
if !ok {
b.Fatal("AutoIncPool.Get() failed.")
}
pool.Put(id)
}
})
})
b.Run(fmt.Sprintf("Allocator 32(LockBased) x%v", concurrency), func(b *testing.B) {
pool := LockBasedCircularPool{}
pool.InitExt(1<<globalconn.LocalConnIDBits32, math.MaxUint32)
b.SetParallelism(concurrency)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
benchmarkLocalConnIDAllocator32(b, &pool)
}
})
})
b.Run(fmt.Sprintf("Allocator 32(LockFreeCircularPool) x%v", concurrency), func(b *testing.B) {
pool := globalconn.LockFreeCircularPool{}
pool.InitExt(1<<globalconn.LocalConnIDBits32, math.MaxUint32)
b.SetParallelism(concurrency)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
benchmarkLocalConnIDAllocator32(b, &pool)
}
})
})
}
}
|
package slack
import (
"context"
"net/url"
)
// RotateTokens exchanges a refresh token for a new app configuration token
func (api *Client) RotateTokens(configToken string, refreshToken string) (*TokenResponse, error) {
return api.RotateTokensContext(context.Background(), configToken, refreshToken)
}
// RotateTokensContext exchanges a refresh token for a new app configuration token with a custom context
func (api *Client) RotateTokensContext(ctx context.Context, configToken string, refreshToken string) (*TokenResponse, error) {
if configToken == "" {
configToken = api.configToken
}
if refreshToken == "" {
refreshToken = api.configRefreshToken
}
values := url.Values{
"refresh_token": {refreshToken},
}
response := &TokenResponse{}
err := api.getMethod(ctx, "tooling.tokens.rotate", configToken, values, response)
if err != nil {
return nil, err
}
return response, response.Err()
}
// UpdateConfigTokens replaces the configuration tokens in the client with those returned by the API
func (api *Client) UpdateConfigTokens(response *TokenResponse) {
api.configToken = response.Token
api.configRefreshToken = response.RefreshToken
}
type TokenResponse struct {
Token string `json:"token,omitempty"`
RefreshToken string `json:"refresh_token,omitempty"`
TeamId string `json:"team_id,omitempty"`
UserId string `json:"user_id,omitempty"`
IssuedAt uint64 `json:"iat,omitempty"`
ExpiresAt uint64 `json:"exp,omitempty"`
SlackResponse
}
|
package main
import (
"bufio"
"fmt"
"log"
"net"
"strconv"
"sync"
"time"
)
func main() {
listener, err := net.Listen("tcp", ":6430")
if err != nil {
panic(err)
}
go broadcaster()
for {
conn, err := listener.Accept()
if err != nil {
log.Println(err)
continue
}
go handleConn(conn)
}
}
type User struct {
ID uint64
Addr string
EnterAt time.Time
MessageChannel chan string
}
type Message struct {
OwnerID uint64
Content string
}
func (u *User) String() string {
return u.Addr + ", UID:" + strconv.FormatUint(u.ID, 10) + ", Enter At:" +
u.EnterAt.Format("2006-01-01 15:04:05+8000")
}
var (
enteringChannel = make(chan *User)
leavingChannel = make(chan *User)
messageChannel = make(chan Message, 8)
)
// 用于记录聊天室用户 并进行消息广播
// 新用户
// 用户普通消息
// 用户离开
func broadcaster() {
users := make(map[*User]struct{})
for {
select {
case user := <-enteringChannel:
users[user] = struct{}{}
case user := <-leavingChannel:
delete(users, user)
close(user.MessageChannel)
case msg := <-messageChannel:
for user := range users {
if user.ID == msg.OwnerID { //不给自己发送
continue
}
user.MessageChannel <- msg.Content
}
}
}
}
func handleConn(conn net.Conn) {
defer conn.Close()
user := &User{
ID: GenUserID(),
Addr: conn.RemoteAddr().String(),
EnterAt: time.Now(),
MessageChannel: make(chan string, 8),
}
go sendMessage(conn, user.MessageChannel)
// 发送欢迎消息
user.MessageChannel <- "Welcome to ECHO, " + user.String()
msg := Message{
OwnerID: user.ID,
Content: fmt.Sprintf("[%d] has enter", user.ID),
}
messageChannel <- msg
enteringChannel <- user
var userActive = make(chan struct{})
go func() {
d := time.Minute
timer := time.NewTimer(d)
for {
select {
case <-timer.C:
conn.Close()
case <-userActive:
timer.Reset(d)
}
}
}()
input := bufio.NewScanner(conn)
for input.Scan() {
msg.Content = strconv.FormatUint(user.ID, 10) + ":" + input.Text()
messageChannel <- msg
// 用户活跃
userActive <- struct{}{}
}
if err := input.Err(); err != nil {
log.Println("读取错误", err)
}
leavingChannel <- user
msg.Content = fmt.Sprintf("[%d] has left", user.ID)
messageChannel <- msg
}
func sendMessage(conn net.Conn, ch <-chan string) {
for msg := range ch {
fmt.Fprintln(conn, msg)
}
}
var idLock sync.Mutex
var globalID uint64
func GenUserID() uint64 {
idLock.Lock()
defer idLock.Unlock()
globalID++
return globalID
}
|
package leetcode
func twoSum(nums []int, target int) []int {
index :=[]int{0,0}
for i := 0; i < len(nums); i++ {
for j := 0; j < i; j++ {
if nums[i]+nums[j] == target {
index[0] = j
index[1] = i
return index
}
}
}
return index
}
|
package main
import "fmt"
func main() {
nums :=[]int{0,1,0,1,0,1,99}
fmt.Println(singleNumber(nums))
}
func singleNumber(nums []int) int {
res :=0
m := make(map[int]int)
for _, v := range nums {
m[v] += 1
}
for k, v := range m {
if v == 1 {
res = k
}
}
return res
}
|
package service
import (
"context"
"fmt"
"io"
"net/http"
"time"
pbCQRS "github.com/go-ocf/cloud/resource-aggregate/pb"
pbRA "github.com/go-ocf/cloud/resource-aggregate/pb"
pbRD "github.com/go-ocf/cloud/resource-directory/pb/resource-directory"
"github.com/go-ocf/kit/log"
kitNetGrpc "github.com/go-ocf/kit/net/grpc"
"github.com/valyala/fasthttp"
)
type DeviceResources struct {
Resources map[string]*pbRA.Resource `json:"resources"`
}
type GetResourceLinksResponse struct {
Devices map[string]*DeviceResources `json:"devices"`
}
func (r *RequestHandler) listResources(ctx *fasthttp.RequestCtx, token, sub string) {
log.Debugf("RequestHandler.listResources start")
t := time.Now()
defer func() {
log.Debugf("RequestHandler.listResources takes %v", time.Since(t))
}()
getResourceLinksClient, err := r.rdClient.GetResourceLinks(kitNetGrpc.CtxWithToken(context.Background(), token), &pbRD.GetResourceLinksRequest{
AuthorizationContext: &pbCQRS.AuthorizationContext{
UserId: sub,
},
})
if err != nil {
logAndWriteErrorResponse(fmt.Errorf("cannot list resource directory: %v", err), http.StatusBadRequest, ctx)
return
}
defer getResourceLinksClient.CloseSend()
response := GetResourceLinksResponse{
Devices: make(map[string]*DeviceResources),
}
for {
resLink, err := getResourceLinksClient.Recv()
if err == io.EOF {
break
}
if err != nil {
logAndWriteErrorResponse(fmt.Errorf("cannot list device directory: %v", err), http.StatusBadRequest, ctx)
return
}
device, ok := response.Devices[resLink.Resource.DeviceId]
if !ok {
device = &DeviceResources{
Resources: make(map[string]*pbRA.Resource),
}
response.Devices[resLink.Resource.DeviceId] = device
}
device.Resources[resLink.Resource.Id] = resLink.Resource
}
writeJson(response, fasthttp.StatusOK, ctx)
}
|
package main
import (
"context"
"flag"
"fmt"
"net/http"
_ "net/http/pprof"
"os"
"os/signal"
"path"
"syscall"
"github.com/gin-gonic/gin"
"github.com/lizhaoliu/konsen/v2/core"
"github.com/lizhaoliu/konsen/v2/rpc"
"github.com/lizhaoliu/konsen/v2/store"
"github.com/lizhaoliu/konsen/v2/web/httpserver"
"github.com/sirupsen/logrus"
)
var (
clusterConfigPath string
dbDir string
)
func init() {
flag.StringVar(&clusterConfigPath, "cluster_config_path", "", "Cluster configuration file path.")
flag.StringVar(&dbDir, "db_dir", "db", "Local database directory path.")
flag.Parse()
if clusterConfigPath == "" {
logrus.Fatalf("cluster_config_path is unspecified.")
}
if dbDir == "" {
logrus.Fatalf("db_dir is unspecified.")
}
logrus.SetOutput(os.Stdout)
logrus.SetFormatter(&logrus.TextFormatter{
FullTimestamp: true,
})
logrus.SetLevel(logrus.InfoLevel)
gin.SetMode(gin.ReleaseMode)
}
func createClients(cluster *core.ClusterConfig) (map[string]core.RaftService, error) {
clients := make(map[string]core.RaftService)
for server, endpoint := range cluster.Servers {
if server != cluster.LocalServerName {
c, err := rpc.NewRaftGRPCClient(rpc.RaftGRPCClientConfig{
Endpoint: endpoint,
})
if err != nil {
return nil, fmt.Errorf("failed to create GRPC client: %v", err)
}
clients[server] = c
}
}
return clients, nil
}
func main() {
ctx := context.Background()
cluster, err := core.ParseClusterConfig(clusterConfigPath)
if err != nil {
logrus.Fatalf("%v", err)
}
if err := os.MkdirAll(dbDir, 0755); err != nil {
logrus.Fatalf("Failed to create dir: %v", err)
}
storage, err := store.NewBadger(store.BadgerConfig{
LogDir: path.Join(dbDir, "logs"),
StateDir: path.Join(dbDir, "state"),
})
if err != nil {
logrus.Fatalf("%v", err)
}
clients, err := createClients(cluster)
if err != nil {
logrus.Fatalf("%v", err)
}
sm, err := core.NewStateMachine(core.StateMachineConfig{
Storage: storage,
Cluster: cluster,
Clients: clients,
})
if err != nil {
logrus.Fatalf("Failed to create state machine: %v", err)
}
raftServer := rpc.NewRaftGRPCServer(rpc.RaftGRPCServerConfig{
Endpoint: cluster.Servers[cluster.LocalServerName],
StateMachine: sm,
})
httpSrv := httpserver.NewServer(httpserver.ServerConfig{
StateMachine: sm,
Address: cluster.HttpServers[cluster.LocalServerName],
})
go func() {
if err := raftServer.Serve(); err != nil {
logrus.Fatalf("%v", err)
}
}()
//
go func() {
sm.Run(ctx)
}()
go func() {
if err := httpSrv.Run(); err != nil {
logrus.Fatalf("Failed to start HTTP server: %v", err)
}
}()
// Starts pprof server.
go func() {
if err := http.ListenAndServe(":6060", nil); err != nil {
logrus.Errorf("Failed to start pprof server: %v", err)
}
}()
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
<-sigCh
sm.Close()
raftServer.Stop()
storage.Close()
}
|
package memrepo
import (
"github.com/scjalliance/drivestream/commit"
"github.com/scjalliance/drivestream/resource"
)
var _ commit.StateReference = (*CommitState)(nil)
// CommitState is a reference to a commit state.
type CommitState struct {
repo *Repository
drive resource.ID
commit commit.SeqNum
state commit.StateNum
}
// StateNum returns the state number of the reference.
func (ref CommitState) StateNum() commit.StateNum {
return ref.state
}
// Create creates the commit state with the given data. If a state already
// exists with the state number an error will be returned.
func (ref CommitState) Create(data commit.State) error {
drv, ok := ref.repo.drives[ref.drive]
if !ok {
return commit.NotFound{Drive: ref.drive, Commit: ref.commit}
}
if ref.commit >= commit.SeqNum(len(drv.Commits)) {
return commit.NotFound{Drive: ref.drive, Commit: ref.commit}
}
expected := commit.StateNum(len(drv.Commits[ref.commit].States))
if ref.state != expected {
return commit.StateOutOfOrder{Drive: ref.drive, Commit: ref.commit, State: ref.state, Expected: expected}
}
drv.Commits[ref.commit].States = append(drv.Commits[ref.commit].States, data)
ref.repo.drives[ref.drive] = drv
return nil
}
// Data returns the commit state data.
func (ref CommitState) Data() (data commit.State, err error) {
drv, ok := ref.repo.drives[ref.drive]
if !ok {
return commit.State{}, commit.NotFound{Drive: ref.drive, Commit: ref.commit}
}
if ref.commit >= commit.SeqNum(len(drv.Commits)) {
return commit.State{}, commit.NotFound{Drive: ref.drive, Commit: ref.commit}
}
if ref.state >= commit.StateNum(len(drv.Commits[ref.commit].States)) {
return commit.State{}, commit.StateNotFound{Drive: ref.drive, Commit: ref.commit, State: ref.state}
}
return drv.Commits[ref.commit].States[ref.state], nil
}
|
package riak
import (
"github.com/bmizerany/assert"
"testing"
"time"
)
type DocumentModel struct {
FieldS string `riak:"string_field"`
FieldF float64 `riak:"float_field"`
FieldB bool
Model
}
func TestModel(t *testing.T) {
// Preparations
client := setupConnection(t)
assert.T(t, client != nil)
// Create a new "DocumentModel" and save it
doc := DocumentModel{FieldS: "text", FieldF: 1.2, FieldB: true}
err := client.New("testmodel.go", "TestModelKey", &doc)
assert.T(t, err == nil)
err = doc.Save()
assert.T(t, err == nil)
// Check that the JSON is correct
t.Logf(string(doc.robject.Data))
assert.T(t, `{"string_field":"text","float_field":1.2,"FieldB":true}` == string(doc.robject.Data))
// Load it from Riak and check that the fields of the DocumentModel struct are set correctly
doc2 := DocumentModel{}
err = client.Load("testmodel.go", "TestModelKey", &doc2)
assert.T(t, err == nil)
assert.T(t, doc2.FieldS == doc.FieldS)
assert.T(t, doc2.FieldF == doc.FieldF)
assert.T(t, doc2.FieldB == doc.FieldB)
// Cleanup
err = doc2.Delete()
assert.T(t, err == nil)
// Get the key
key, err := client.Key(&doc2)
assert.T(t, err == nil)
assert.T(t, key == "TestModelKey")
// Set it differently
err = client.SetKey("newTestModelKey", &doc2)
assert.T(t, err == nil)
// And test that it changed by getting it again
key, err = client.Key(&doc2)
assert.T(t, err == nil)
assert.T(t, key == "newTestModelKey")
// Test Delete(), so test if the cleanup worked
doc3 := DocumentModel{}
err = client.Load("testmodel.go", "TestModelKey", &doc3)
assert.T(t, err == NotFound)
}
type DocumentModelWithLinks struct {
FieldS string
ALink One `riak:"tag_as_parent"`
BLink One // Will automatically use own name as a tag when linking
Model
}
func TestModelWithLinks(t *testing.T) {
// Preparations
client := setupConnection(t)
assert.T(t, client != nil)
// Create a new "DocumentModel" to use as a parent and save it
parent := DocumentModel{FieldS: "text", FieldF: 1.2, FieldB: true}
err := client.New("testmodel.go", "TestModelKey", &parent)
assert.T(t, err == nil)
//err = client.Save(&doc)
err = parent.Save()
assert.T(t, err == nil)
// Create a new DocumentModelWithLinks and save it, adding a link to the parent
doc := DocumentModelWithLinks{FieldS: "textinlinked", ALink: One{model: &parent}}
err = doc.BLink.Set(&parent) // testing One.Set while we're at it
assert.T(t, err == nil)
err = client.New("testmodellinks.go", "TestModelKey", &doc)
assert.T(t, err == nil)
//err = client.Save(&doc)
err = doc.Save()
assert.T(t, err == nil)
// Load it from Riak and check that the fields of the struct are set correctly, including the link to the parent
doc2 := DocumentModelWithLinks{}
err = client.Load("testmodellinks.go", "TestModelKey", &doc2)
assert.T(t, err == nil)
assert.T(t, doc2.FieldS == doc.FieldS)
assert.T(t, doc2.ALink.model == nil) // Related documents are not loaded automatically, only the link is populated
assert.T(t, doc2.ALink.link.Tag == "tag_as_parent")
assert.T(t, doc2.BLink.link.Tag == "BLink")
t.Logf("Testing DocumentModelWithLinks - One - %v - %v\n", doc2.ALink.model, doc2.ALink.link)
t.Logf("Testing DocumentModelWithLinks - One - %v - %v\n", doc2.BLink.model, doc2.BLink.link)
// Load the parent from the link
parent2 := DocumentModel{}
err = doc2.ALink.Get(&parent2)
assert.T(t, err == nil)
assert.T(t, parent.FieldS == parent2.FieldS)
assert.T(t, parent.FieldF == parent2.FieldF)
assert.T(t, parent.FieldB == parent2.FieldB)
assert.T(t, parent.Key() == parent2.Key())
// Cleanup
bucket, _ := client.Bucket("testmodel.go")
err = bucket.Delete("TestModelKey")
assert.T(t, err == nil)
bucket, _ = client.Bucket("testmodellinks.go")
err = bucket.Delete("TestModelKey")
assert.T(t, err == nil)
}
type FriendLinks struct {
Friends Many "friend"
Model
}
func TestModelWithManyLinks(t *testing.T) {
// Preparations
client := setupConnection(t)
assert.T(t, client != nil)
// Create two new "DocumentModel"s to use as friends and save it
f1 := DocumentModel{FieldS: "friend1", FieldF: 1.0, FieldB: true}
err := client.New("testmodel.go", "f1", &f1)
assert.T(t, err == nil)
err = f1.Save()
assert.T(t, err == nil)
f2 := DocumentModel{FieldS: "friend2", FieldF: 2.0, FieldB: true}
err = client.New("testmodel.go", "f2", &f2)
assert.T(t, err == nil)
err = f2.Save()
assert.T(t, err == nil)
// Create a new "FriendLinks" to and save it
doc := FriendLinks{Friends: Many{One{model: &f1}}}
// Testing Many.Add while we're at it.
err = doc.Friends.Add(&f2)
assert.T(t, err == nil)
err = client.New("testmodel.go", "TestMany", &doc)
assert.T(t, err == nil)
err = doc.Save()
t.Logf("Friends json - %v\n", string(doc.robject.Data))
// Now load a new document and verify it has two links
var doc2 FriendLinks
err = client.Load("testmodel.go", "TestMany", &doc2)
assert.T(t, err == nil)
assert.T(t, len(doc2.Friends) == 2)
for i, v := range doc2.Friends {
var f DocumentModel
err = v.Get(&f)
assert.T(t, err == nil)
t.Logf("TestingModelWithManyLinks - %v - %v - %v\n", i, v, f)
}
}
type ComplexModel struct {
Flags map[string]bool `riak:"a"`
Model
}
func (c *ComplexModel) Resolve(count int) (err error) {
siblings := make([]ComplexModel, count)
err = c.GetSiblings(siblings)
if err != nil {
return err
}
for _, s := range siblings {
for key, flag := range s.Flags {
if flag {
c.Flags[key] = true
}
}
}
return
}
func TestConflictingModel(t *testing.T) {
// Preparations
client := setupConnection(t)
assert.T(t, client != nil)
// Create a bucket where siblings are allowed
bucket, err := client.Bucket("testconflict.go")
assert.T(t, err == nil)
err = bucket.SetAllowMult(true)
assert.T(t, err == nil)
// Delete earlier work ...
err = bucket.Delete("TestModelKey")
assert.T(t, err == nil)
// Create a new "CompleModel" and save it
m1 := ComplexModel{Flags: map[string]bool{"a": true, "b": false}}
err = client.New("testconflict.go", "TestModelKey", &m1)
assert.T(t, err == nil)
err = m1.Save()
assert.T(t, err == nil)
// Create the same again (with the same key)
m2 := ComplexModel{Flags: map[string]bool{"a": false, "b": true, "c": true}}
err = client.New("testconflict.go", "TestModelKey", &m2)
assert.T(t, err == nil)
err = m2.Save()
assert.T(t, err == nil)
// Now load it from Riak to test conflicts
m3 := ComplexModel{}
err = client.Load("testconflict.go", "TestModelKey", &m3)
t.Logf("Loading model - %v\n", err)
t.Logf("ComplexModel = %v\n", m3)
assert.T(t, err == nil)
assert.T(t, m3.Flags["a"])
assert.T(t, m3.Flags["b"])
assert.T(t, m3.Flags["c"])
// Cleanup
err = bucket.Delete("TestModelKey")
assert.T(t, err == nil)
}
type DMTime struct {
FieldS string
FieldT time.Time
Model
}
func TestModelTime(t *testing.T) {
// Preparations
client := setupConnection(t)
assert.T(t, client != nil)
// Create and save
doc := DMTime{FieldS: "text", FieldT: time.Now()}
err := client.New("testmodel.go", "TestTime", &doc)
assert.T(t, err == nil)
//err = client.Save(&doc)
err = doc.Save()
assert.T(t, err == nil)
// Load it from Riak and check that the fields of the DocumentModel struct are set correctly
doc2 := DMTime{}
err = client.Load("testmodel.go", "TestTime", &doc2)
assert.T(t, err == nil)
assert.T(t, doc2.FieldS == doc.FieldS)
t.Logf("FieldT= %v ? %v\n", doc2.FieldT, doc.FieldT)
assert.T(t, doc2.FieldT.Equal(doc.FieldT))
}
type SubStruct struct {
Value string `riak:"value"`
}
type DMInclude struct {
Name string `riak:"name"`
Sub SubStruct `riak:"sub"`
Model
}
func TestModelIncludingOtherStruct(t *testing.T) {
// Preparations
client := setupConnection(t)
assert.T(t, client != nil)
// Create and save
doc := DMInclude{Name: "some name", Sub: SubStruct{Value: "some value"}}
err := client.New("testmodel.go", "TestModelIncludingOtherStruct", &doc)
assert.T(t, err == nil)
//err = client.Save(&doc)
err = doc.Save()
assert.T(t, err == nil)
// Load it from Riak and check that the fields of the DocumentModel struct are set correctly
doc2 := DMInclude{}
err = client.Load("testmodel.go", "TestModelIncludingOtherStruct", &doc2)
t.Logf("doc2 json = %v\n", string(doc2.robject.Data))
assert.T(t, err == nil)
assert.T(t, string(doc2.robject.Data) == `{"name":"some name","sub":{"value":"some value"}}`)
assert.T(t, doc2.Name == doc.Name)
t.Logf("Sub struct = %v ? %v\n", doc2.Sub.Value, doc.Sub.Value)
assert.T(t, doc2.Sub.Value == doc.Sub.Value)
}
func TestModelReload(t *testing.T) {
// Preparations
client := setupConnection(t)
assert.T(t, client != nil)
// Create a new "DocumentModel" and save it
doc := DocumentModel{FieldS: "text", FieldF: 1.2, FieldB: true}
err := client.New("testmodel.go", "TestModelKey", &doc)
assert.T(t, err == nil)
err = doc.Save()
assert.T(t, err == nil)
doc2 := DocumentModel{FieldS: "text22", FieldF: 1.4, FieldB: true}
err = client.New("testmodel.go", "TestModelKey", &doc2)
err = doc2.Save()
assert.T(t, err == nil)
vclock := string(doc.robject.Vclock)
err = (&doc).Reload()
assert.T(t, err == nil)
assert.T(t, string(doc.robject.Vclock) != vclock)
assert.T(t, string(doc.robject.Vclock) == string(doc2.robject.Vclock))
assert.T(t, doc.FieldS == doc2.FieldS)
assert.T(t, doc.FieldF == doc2.FieldF)
assert.T(t, doc.FieldB == doc2.FieldB)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.