text stringlengths 11 4.05M |
|---|
package gen
import (
"context"
"errors"
)
// ErrContinue can be returned from a Planner if there is no match. Other planners may then be tried.
var ErrContinue = errors.New("continue")
// Plan is the contract that must be filled for a type to be rendered.
type Plan interface {
// Type returns the TypeInfo for the current type
Type() TypeInfo
// Deps returns any dependent types for the current type (i.e. any types requiring import)
Deps() []TypeInfo
// Execute is provided a resolved imports and should provide the type rendered as a string.
Execute(imports *Imports) (string, error)
}
// Planner is a strategy for generating a Plan from a Schema
type Planner interface {
// Plan generates a Plan from a Schema. If the error matches `errors.Is(err, ErrContinue)`, processing may continue.
Plan(ctx context.Context, helper Helper, schema *Schema) (Plan, error)
}
// Helper is an interface provided to each planner
type Helper interface {
Loader
Dep(ctx context.Context, schemas ...*Schema) error
TypeInfo(s *Schema) (TypeInfo, error)
ErrSimpleTypeUnknown(err error) bool
DetectSimpleType(ctx context.Context, s *Schema) (JSONType, error)
DetectGoBaseType(ctx context.Context, s *Schema) (GoBaseType, error)
TypeInfoHinted(s *Schema, t JSONType) TypeInfo
JSONPropertyExported(name string) string
Primitive(s JSONType) string
}
|
// This file contains Protobuf and JSON serialization/deserialization methods for peer IDs.
package peer
import (
"encoding/json"
)
// Interface assertions commented out to avoid introducing hard dependencies to protobuf.
// var _ proto.Marshaler = (*ID)(nil)
// var _ proto.Unmarshaler = (*ID)(nil)
var _ json.Marshaler = (*ID)(nil)
var _ json.Unmarshaler = (*ID)(nil)
func (id ID) Marshal() ([]byte, error) {
return []byte(id), nil
}
func (id ID) MarshalTo(data []byte) (n int, err error) {
return copy(data, []byte(id)), nil
}
func (id *ID) Unmarshal(data []byte) (err error) {
*id, err = IDFromBytes(data)
return err
}
// Implements Gogo's proto.Sizer, but we omit the compile-time assertion to avoid introducing a hard
// dependency on gogo.
func (id ID) Size() int {
return len([]byte(id))
}
func (id ID) MarshalJSON() ([]byte, error) {
return json.Marshal(IDB58Encode(id))
}
func (id *ID) UnmarshalJSON(data []byte) (err error) {
var v string
if err = json.Unmarshal(data, &v); err != nil {
return err
}
*id, err = IDB58Decode(v)
return err
}
|
package main
import "fmt"
type person struct {
id int
name string
age int
}
func main2401() {
//初始化
var per person =person{101,"李宁",40}
//fmt.Println(per)
//fmt.Printf("%p\n",&per)
//定义指针接收结构体变量地址
//p := &per
var p *person = &per
fmt.Printf("%T\n",p) //*person 类型
//通过指针间接修改结构体成员的值
(*p).age = 50
fmt.Println(per)
//指针直接操作结构体成员
p.id = 110
fmt.Println(per)
}
//结构体指针作为函数参数
func test24(p *person) {
p.age = 57
}
func main2402() {
var per person = person{101,"李宁",40}
//地址传递
test24(&per)
fmt.Println(per)
}
func main2403() {
arr := [3]person{{101,"钢铁侠",34},
{102,"绿巨人",40},
{103,"黑寡妇",28}}
//指向结构体数组的指针
p := &arr
//fmt.Printf("%p\n",p)
//fmt.Printf("%T\n",p)
p[0].age = 40//ok
(*p)[0].age = 39
fmt.Println(arr[0])
for i:=0;i<len(p) ;i++ {
fmt.Println(p[i])
}
}
func main() {
//map类型变量
m := make(map[int]*[3]person)
fmt.Printf("%T\n",m)
m[1] = new([3]person)
m[1] = &[3]person{{101,"钢铁侠",34},
{102,"绿巨人",40},
{103,"黑寡妇",28}}
m[2] = new([3]person)
m[2] = &[3]person{{101,"美队",34},
{102,"黑豹",40},
{103,"女巫",28}}
for k,v := range m{
fmt.Println(k,*v)
}
//数组指针
var p *[3]int
//创建内存空间 存储 [3]int
p = new([3]int)
p[0] = 123
p[1] = 456
p[2] = 789
fmt.Println(p)
} |
package db
import (
_ "github.com/Go-SQL-Driver/MySQL"
//"time"
"fmt"
)
func Search (keyword string) interface{}{
rows, err := db.Query("SELECT * FROM video WHERE `title` like \"%"+keyword+"%\" ;" )
checkErr(err)
var a [] map[string]interface{}
for rows.Next() {
var id int
var title string
var path string
err = rows.Scan(&id, &title, &path)
checkErr(err)
fmt.Println(id)
fmt.Println(title)
fmt.Println(path)
m := make(map[string]interface{})
m["id"] = id
m["title"] = title
m["path"] = path
a = append(a, m)
}
return a
}
|
package main
import (
"chat/codec"
"encoding/json"
"strings"
"time"
)
type Message struct {
From string //发送者
Time int64 //发送时间
Text string //消息内容
isGm bool //是否为GM命令
gmOrder []string //GM命令参数
}
// NewMessage 新消息
func NewMessage(from string, text string) *Message {
return &Message{
from,
time.Now().Unix(),
text,
false,
nil,
}
}
// ToJson 将消息对象转换成json格式
func (msg *Message) ToJson() ([]byte, error) {
bytes, err := json.Marshal(msg)
if err != nil {
return nil, err
}
return codec.Encode(string(bytes))
}
// ToMessage 将json格式转化成消息对象
func ToMessage(data []byte) *Message {
var msg Message
str := string(data)
msg.Text = Filter(str)
msg.Time = time.Now().Unix()
IsGm := IsGm(str)
msg.isGm = IsGm
if IsGm && len(str) > 0 {
msg.gmOrder = make([]string, 0)
strList := strings.Split(str, " ")
for _, s := range strList {
s = strings.Trim(s, " ")
if len(s) > 0 {
msg.gmOrder = append(msg.gmOrder, s)
}
}
}
return &msg
}
|
package printer
import (
"encoding/json"
"github.com/davyxu/tabtoy/v2/i18n"
"github.com/davyxu/tabtoy/v2/model"
)
type typePrinter struct {
}
// 一个列字段
type typeFieldModel struct {
Name string
Type string
Kind string
IsRepeated bool
Meta map[string]interface{}
Comment string
Value int
}
// 一张表的类型信息
type typeStructModel struct {
Name string
Fields []*typeFieldModel
}
// 整个文件类型信息
type typeFileModel struct {
Tool string
Version string
Structs []*typeStructModel
Enums []*typeStructModel
}
func (self *typePrinter) Run(g *Globals, outputClass int) *Stream {
bf := NewStream()
var fm typeFileModel
fm.Tool = "github.com/davyxu/tabtoy"
fm.Version = g.Version
// 遍历所有类型
for _, d := range g.FileDescriptor.Descriptors {
// 这给被限制输出
if !d.File.MatchTag(".type") {
log.Infof("%s: %s", i18n.String(i18n.Printer_IgnoredByOutputTag), d.Name)
continue
}
structM := &typeStructModel{
Name: d.Name,
}
// 遍历字段
for _, fd := range d.Fields {
// 对CombineStruct的XXDefine对应的字段
if d.Usage == model.DescriptorUsage_CombineStruct {
// 这个字段被限制输出
if fd.Complex != nil && !fd.Complex.File.MatchTag(".type") {
continue
}
}
field := &typeFieldModel{
Name: fd.Name,
Type: fd.TypeString(),
Kind: fd.KindString(),
IsRepeated: fd.IsRepeated,
Comment: fd.Comment,
Meta: fd.Meta.Raw(),
}
switch d.Kind {
case model.DescriptorKind_Struct:
field.Value = 0
case model.DescriptorKind_Enum:
field.Value = int(fd.EnumValue)
}
structM.Fields = append(structM.Fields, field)
}
switch d.Kind {
case model.DescriptorKind_Struct:
fm.Structs = append(fm.Structs, structM)
case model.DescriptorKind_Enum:
fm.Enums = append(fm.Enums, structM)
}
}
data, err := json.MarshalIndent(&fm, "", " ")
if err != nil {
log.Errorln(err)
}
bf.WriteRawBytes(data)
return bf
}
func init() {
RegisterPrinter("type", &typePrinter{})
}
|
package router
import (
"github.com/astaxie/beego"
"beegoApi/controller"
"beegoApi/middleware"
)
func init(){
ns := beego.NewNamespace("/v1",
beego.NSRouter("/user", &controller.IndexController{}, "Get:Get"),
)
beego.Router("/user", &controller.IndexController{}, "Get:Get")
beego.InsertFilter("/v1/*",beego.BeforeRouter, middleware.Token)
beego.AddNamespace(ns)
}
|
package trie
import (
"fmt"
"github.com/openacid/slim/encode"
)
func ExampleSlimTrie_RangeGet() {
// To index a map of key range to value with SlimTrie is very simple:
//
// Gives a set of key the same value, and use RangeGet() instead of Get().
// SlimTrie does not store branches for adjacent leaves with the same value.
keys := []string{
"abc",
"abcd",
"bc",
"bcd",
"bce",
}
values := []int{
1, 1,
2,
3, 3,
}
st, err := NewSlimTrie(encode.Int{}, keys, values)
if err != nil {
panic(err)
}
cases := []struct {
key string
msg string
}{
{"ab", "out of range"},
{"abc", "in range"},
{"abc1", "FALSE POSITIVE"},
{"abc2", "FALSE POSITIVE"},
{"abcd", "in range"},
{"abcde", "FALSE POSITIVE: a suffix of abcd"},
{"acc", "FALSE POSITIVE"},
{"bc", "in single key range [bc]"},
{"bc1", "FALSE POSITIVE"},
{"bcd1", "FALSE POSITIVE"},
// {"def", "FALSE POSITIVE"},
}
for _, c := range cases {
v, found := st.RangeGet(c.key)
fmt.Printf("%-10s %-5v %-5t: %s\n", c.key, v, found, c.msg)
}
// Output:
// ab <nil> false: out of range
// abc 1 true : in range
// abc1 1 true : FALSE POSITIVE
// abc2 1 true : FALSE POSITIVE
// abcd 1 true : in range
// abcde 1 true : FALSE POSITIVE: a suffix of abcd
// acc 1 true : FALSE POSITIVE
// bc 2 true : in single key range [bc]
// bc1 2 true : FALSE POSITIVE
// bcd1 3 true : FALSE POSITIVE
}
|
package controllers
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"github.com/gorilla/websocket"
"github.com/astaxie/beego"
)
//默认beego的controller
type IndexController struct {
beego.Controller
}
//储存当前每个房间的人数
var Room = make(map[int64]int)
//记录房间id以及人数 方便给前端传递数据
type Roomlist struct {
Roomid int64
PerNum int
}
//处理基本get请求
func (this *IndexController) Get() {
uname := this.GetString("uname")
if len(uname) == 0 {
this.Redirect("/", 302)
return
}
this.TplName = "index.html"
this.Data["username"] = uname
}
//用户输入或者点击房间后加入房间
func (this *IndexController) Join() {
//roomid=this.GetString("roomid")
uname := this.GetString("uname")
roomid, _ := this.GetInt64("roomid")
fmt.Println(uname)
fmt.Println(roomid)
if len(uname) == 0 {
this.Redirect("/", 302)
return
}
url := "/ws?uname=" + uname + "&roomid=" + strconv.FormatInt(roomid, 10)
this.Redirect(url, 302)
return
}
//处理前端websocket发送的数据
func (this *IndexController) Check() {
fmt.Printf("error")
ws, err := websocket.Upgrade(this.Ctx.ResponseWriter, this.Ctx.Request, nil, 1024, 1024)
if _, ok := err.(websocket.HandshakeError); ok {
http.Error(this.Ctx.ResponseWriter, "Not a websocket handshake", 400)
return
} else if err != nil {
beego.Error("Cannot setup websocket connection:", err)
return
}
//发送当前所有房间信息给前端
for k, v := range Room {
fmt.Printf("%d %d", k, v)
if v == 0 {
continue
}
data, err := json.Marshal(Roomlist{k, v})
if err != nil {
beego.Error("Fail to marshal event:", err)
return
}
ws.WriteMessage(websocket.TextMessage, data)
}
}
|
package main
import (
"context"
"log"
)
func main() {
ctx := context.TODO()
routes, err := injectRoutes(ctx)
if err != nil {
log.Fatalln("inject routes failed")
}
if err := routes.Run(":8080"); err != nil {
log.Fatalln("start server failed")
}
}
|
package client
import (
"fmt"
"github.com/33cn/chain33/common"
"github.com/33cn/chain33/types"
"math/rand"
"strconv"
"testing"
"time"
)
func TestJSONClient_GetPeerList(t *testing.T) {
jsonclient := NewJSONClient("", "http://123.60.25.80:8801")
peerList, err := jsonclient.GetPeerList()
if err != nil {
t.Error(err)
}
fmt.Println(peerList)
}
func TestJSONClient_SendTx(t *testing.T) {
tx := "0x0a0a757365722e77726974651a6d0801122103068eb379d486ad923f57c7983a1ed32227fc9fa09355412ed9709d7e9739bb6d1a46304402207f379436e2ee009effd43e67256102fc5fa5c97750aec989236824ec25d7ee9302205b949c77fe4011f2f3364011ac575b74d579a0df7b477cc0f6b8bd32808ecbb6208cb4ff9df6cea6f36330b88588cedd829ecd163a2231444e615344524739524431397335396d65416f654e34613246365248393766536f"
//jsonclient := NewJSONClient("", "http://123.60.25.80:8801")
//txhash, err := jsonclient.SendTransaction(tx)
//if err != nil {
// t.Error(err)
//}
//t.Log(txhash)
start := time.Now().UnixNano()
data, _ := common.FromHex(tx)
var tr types.Transaction
types.Decode(data, &tr)
end := time.Now().UnixNano()
fmt.Printf("执行消耗的时间为 %d纳秒", end-start)
fmt.Println(tr)
}
func TestRand(t *testing.T) {
for i := 0; i < 100; i++ {
t.Log(rand.Intn(1))
}
t.Log(strconv.FormatInt(10*1e9, 10))
}
|
package user
import (
"context"
"encoding/base64"
"encoding/json"
"net/http"
"time"
"github.com/damocles217/server/models"
"github.com/damocles217/server/router/user/config"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
)
func MakeUser(
w http.ResponseWriter,
r *http.Request,
collection *mongo.Collection) {
// Set headers
w.Header().Set("Content-Type", "application/json")
w.Header().Set("limit", "50mb")
w.Header().Set("Access-Control-Allow-Origin", "*")
// Define all variables
var userReq models.UserRequest // User catch request and get response
var user models.User // User sends to mongodb
var errors []models.Error // Errors matcher
var setError models.Error // Error type
var response models.ResponseUser // Response
// Set the user to request body
err := json.NewDecoder(r.Body).Decode(&userReq)
if err != nil {
setError.Message = "Por favor envie datos validos"
setError.Value = "[Body]"
errors = append(errors, setError)
}
// Setting some values on user for mongodb
user.Name = userReq.Name
user.Lastname = userReq.Lastname
user.Email = userReq.Email
user.BornOn = userReq.BornOn
user.Password, _ = config.HashPassword(userReq.Password)
user.Logged = true
user.Admin = false
user.CreatedAt = time.Now()
user.UpdatedAt = time.Now()
user.CodeAuth = config.CodeAuth(15)
user.UserID = config.GetUserID(user.Name, user.Lastname, collection)
// Set cookie auth
// crypting the code
key := []byte("0123456789abcdef")
result, err := config.AesEncrypt([]byte(user.CodeAuth), key)
if err != nil {
panic(err)
}
cryptedCode := base64.StdEncoding.EncodeToString(result)
config.SetCookie(&w, "c_user", string(cryptedCode))
// Search for user
newUserId, err := collection.InsertOne(context.TODO(), user)
if err != nil {
setError.Message = "Internal server error!"
setError.Value = "[Database]"
errors = append(errors, setError)
}
userId, _ := newUserId.InsertedID.(primitive.ObjectID)
filter := bson.D{primitive.E{Key: "_id", Value: userId}}
collection.FindOne(context.TODO(), filter).Decode(&response.User)
response.Errors = &errors
if len(*response.Errors) > 0 {
response.Sucess = false
} else {
response.Sucess = true
}
json.NewEncoder(w).Encode(response)
}
|
package sdk
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
rmTesting "github.com/brigadecore/brigade/sdk/v3/internal/restmachinery/testing" // nolint: lll
metaTesting "github.com/brigadecore/brigade/sdk/v3/meta/testing"
"github.com/stretchr/testify/require"
)
func TestProjectRoleAssignmentMarshalJSON(t *testing.T) {
metaTesting.RequireAPIVersionAndType(
t,
ProjectRoleAssignment{},
ProjectRoleAssignmentKind,
)
}
func TestProjectRoleAssignmentListMarshalJSON(t *testing.T) {
metaTesting.RequireAPIVersionAndType(
t,
ProjectRoleAssignmentList{},
ProjectRoleAssignmentListKind,
)
}
func TestNewProjectRoleAssignmentsClient(t *testing.T) {
client, ok := NewProjectRoleAssignmentsClient(
rmTesting.TestAPIAddress,
rmTesting.TestAPIToken,
nil,
).(*projectRoleAssignmentsClient)
require.True(t, ok)
rmTesting.RequireBaseClient(t, client.BaseClient)
}
func TestProjectRoleAssignmentsClientGrant(t *testing.T) {
const testProjectID = "bluebook"
testProjectRoleAssignment := ProjectRoleAssignment{
Role: Role("ceo"),
Principal: PrincipalReference{
Type: PrincipalTypeUser,
ID: "tony@starkindustries.com",
},
}
server := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
require.Equal(t, http.MethodPost, r.Method)
require.Equal(
t,
fmt.Sprintf("/v2/projects/%s/role-assignments", testProjectID),
r.URL.Path,
)
bodyBytes, err := ioutil.ReadAll(r.Body)
require.NoError(t, err)
projectRoleAssignment := ProjectRoleAssignment{}
err = json.Unmarshal(bodyBytes, &projectRoleAssignment)
require.NoError(t, err)
require.Equal(t, testProjectRoleAssignment, projectRoleAssignment)
w.WriteHeader(http.StatusOK)
},
),
)
defer server.Close()
client := NewProjectRoleAssignmentsClient(
server.URL,
rmTesting.TestAPIToken,
nil,
)
err :=
client.Grant(
context.Background(),
testProjectID,
testProjectRoleAssignment,
nil,
)
require.NoError(t, err)
}
func TestProjectRoleAssignmentsClientList(t *testing.T) {
const testProjectID = "bluebook"
const testUserID = "tony@starkindustries.com"
const testRole = Role("ceo")
testProjectRoleAssignments := ProjectRoleAssignmentList{
Items: []ProjectRoleAssignment{
{
ProjectID: testProjectID,
Principal: PrincipalReference{
Type: PrincipalTypeUser,
ID: testUserID,
},
Role: testRole,
},
},
}
testCases := []struct {
name string
selector ProjectRoleAssignmentsSelector
assertions func(*testing.T, *http.Request)
}{
{
name: "with project ID specified",
selector: ProjectRoleAssignmentsSelector{
ProjectID: testProjectID,
Principal: &PrincipalReference{
Type: PrincipalTypeUser,
ID: testUserID,
},
Role: testRole,
},
assertions: func(t *testing.T, r *http.Request) {
require.Equal(
t,
fmt.Sprintf("/v2/projects/%s/role-assignments", testProjectID),
r.URL.Path,
)
},
},
{
name: "without project ID specified",
selector: ProjectRoleAssignmentsSelector{
Principal: &PrincipalReference{
Type: PrincipalTypeUser,
ID: testUserID,
},
Role: testRole,
},
assertions: func(t *testing.T, r *http.Request) {
require.Equal(
t,
"/v2/project-role-assignments",
r.URL.Path,
)
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
server := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
require.Equal(t, http.MethodGet, r.Method)
require.Equal(
t,
testCase.selector.Principal.Type,
PrincipalType(r.URL.Query().Get("principalType")),
)
require.Equal(
t,
testCase.selector.Principal.ID,
r.URL.Query().Get("principalID"),
)
require.Equal(
t,
testCase.selector.Role,
Role(r.URL.Query().Get("role")),
)
testCase.assertions(t, r)
bodyBytes, err := json.Marshal(testProjectRoleAssignments)
require.NoError(t, err)
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, string(bodyBytes))
},
),
)
defer server.Close()
client := NewProjectRoleAssignmentsClient(
server.URL,
rmTesting.TestAPIToken,
nil,
)
projectRoleAssignments, err := client.List(
context.Background(),
&testCase.selector,
nil,
)
require.NoError(t, err)
require.Equal(t, testProjectRoleAssignments, projectRoleAssignments)
})
}
}
func TestProjectRoleAssignmentsClientRevoke(t *testing.T) {
const testProjectID = "bluebook"
testProjectRoleAssignment := ProjectRoleAssignment{
Role: Role("ceo"),
Principal: PrincipalReference{
Type: PrincipalTypeUser,
ID: "tony@starkindustries.com",
},
}
server := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, http.MethodDelete, r.Method)
require.Equal(
t,
fmt.Sprintf("/v2/projects/%s/role-assignments", testProjectID),
r.URL.Path,
)
require.Equal(
t,
testProjectRoleAssignment.Role,
Role(r.URL.Query().Get("role")),
)
require.Equal(
t,
testProjectRoleAssignment.Principal.Type,
PrincipalType(r.URL.Query().Get("principalType")),
)
require.Equal(
t,
testProjectRoleAssignment.Principal.ID,
r.URL.Query().Get("principalID"),
)
w.WriteHeader(http.StatusOK)
},
),
)
defer server.Close()
client := NewProjectRoleAssignmentsClient(
server.URL,
rmTesting.TestAPIToken,
nil,
)
err := client.Revoke(
context.Background(),
testProjectID,
testProjectRoleAssignment,
nil,
)
require.NoError(t, err)
}
|
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package main
import (
"io/ioutil"
"log"
"os"
"testing"
"time"
"golang.org/x/net/context"
"github.com/GoogleCloudPlatform/golang-samples/internal/testutil"
)
func TestMain(m *testing.M) {
// These functions are noisy.
log.SetOutput(ioutil.Discard)
s := m.Run()
log.SetOutput(os.Stderr)
os.Exit(s)
}
func TestListResources(t *testing.T) {
hc := testutil.SystemTest(t)
ctx, _ := context.WithTimeout(context.Background(), time.Second*30)
s, err := createService(ctx)
if err != nil {
t.Fatal(err)
}
if err := listMonitoredResourceDescriptors(s, hc.ProjectID); err != nil {
log.Fatal(err)
}
if err := listMetricDescriptors(s, hc.ProjectID); err != nil {
log.Fatal(err)
}
if err := listTimeSeries(s, hc.ProjectID); err != nil {
log.Fatal(err)
}
}
|
// Package main implements the ledger backed oasis-node signer plugin.
package main
import (
"flag"
"fmt"
"strconv"
"strings"
"github.com/oasisprotocol/oasis-core/go/common/crypto/signature"
pluginSigner "github.com/oasisprotocol/oasis-core/go/common/crypto/signature/signers/plugin"
"github.com/oasisprotocol/oasis-core-ledger/common"
"github.com/oasisprotocol/oasis-core-ledger/common/wallet"
"github.com/oasisprotocol/oasis-core-ledger/internal"
)
var (
// signerPathCoinType is set to 474, the number associated with Oasis ROSE.
signerPathCoinType uint32 = 474
// signerPathAccount is the account index used to sign transactions.
signerPathAccount uint32 = 0
// SignerPathChange indicates an external chain.
signerPathChange uint32 = 0
// signerEntityDerivationRootPath is the BIP-0032 path prefix used for generating
// an Entity signer.
signerEntityDerivationRootPath = []uint32{
internal.PathPurposeBIP44,
signerPathCoinType,
signerPathAccount,
signerPathChange,
}
// signerConsensusDerivationRootPath is the derivation path prefix used for
// generating a consensus signer.
signerConsensusDerivationRootPath = []uint32{
internal.PathPurposeConsensus,
signerPathCoinType,
internal.PathSubPurposeConsensus,
signerPathAccount,
}
roleDerivationRootPaths = map[signature.SignerRole][]uint32{
signature.SignerEntity: signerEntityDerivationRootPath,
signature.SignerConsensus: signerConsensusDerivationRootPath,
}
versionFlag = flag.Bool("version", false, "Print version and exit")
)
type pluginConfig struct {
walletID wallet.ID
index uint32
}
func newPluginConfig(cfgStr string) (*pluginConfig, error) {
var kvStrs []string
// Don't try to split cfgStr if no configuration is specified.
if cfgStr != "" {
kvStrs = strings.Split(cfgStr, ",")
}
var (
cfg pluginConfig
foundWalletID, foundIndex bool
)
for _, v := range kvStrs {
spl := strings.Split(v, ":")
if len(spl) != 2 {
return nil, fmt.Errorf("malformed k/v pair: '%s'", v)
}
key := strings.ToLower(spl[0])
switch key {
case "wallet_id":
if foundWalletID {
return nil, fmt.Errorf("wallet ID already configured")
}
if err := cfg.walletID.UnmarshalHex(spl[1]); err != nil {
return nil, err
}
foundWalletID = true
case "index":
if foundIndex {
return nil, fmt.Errorf("index already configured")
}
idx, err := strconv.ParseUint(spl[1], 10, 32)
if err != nil {
return nil, fmt.Errorf("malformed index: %w", err)
}
cfg.index = uint32(idx)
foundIndex = true
default:
return nil, fmt.Errorf("unknown configuration option: '%v'", spl[0])
}
}
if !foundWalletID {
return nil, fmt.Errorf("wallet ID not configured")
}
if !foundIndex {
return nil, fmt.Errorf("index not configured")
}
return &cfg, nil
}
type ledgerPlugin struct {
walletID wallet.ID
inner map[signature.SignerRole]*ledgerSigner
}
type ledgerSigner struct {
path []uint32
device *internal.LedgerOasis
publicKey *signature.PublicKey
}
func (pl *ledgerPlugin) Initialize(config string, roles ...signature.SignerRole) error {
cfg, err := newPluginConfig(config)
if err != nil {
return fmt.Errorf("ledger: failed to parse configuration: %w", err)
}
pl.walletID = cfg.walletID
pl.inner = make(map[signature.SignerRole]*ledgerSigner)
for _, role := range roles {
var signer ledgerSigner
pathPrefix, ok := roleDerivationRootPaths[role]
if !ok {
return fmt.Errorf("ledger: role %d is not supported by signer", role)
}
signer.path = append(signer.path, pathPrefix...)
signer.path = append(signer.path, cfg.index)
pl.inner[role] = &signer
}
return nil
}
func (pl *ledgerPlugin) Load(role signature.SignerRole, _mustGenerate bool) error {
// Note: `mustGenerate` is ignored as all keys are generated on the
// Ledger device.
signer, device, err := pl.signerForRole(role)
if err != nil {
return err
}
if device != nil {
// Already connected to device with this key's path.
return nil
}
dev, err := internal.ConnectApp(pl.walletID, signer.path)
if err != nil {
return fmt.Errorf("ledger: failed to connect to device: %w", err)
}
signer.device = dev
return nil
}
func (pl *ledgerPlugin) Public(role signature.SignerRole) (signature.PublicKey, error) {
var pubKey signature.PublicKey
signer, device, err := pl.signerForRole(role)
if err != nil {
return pubKey, err
}
if signer.publicKey != nil {
// Already have retrieved the public key.
return *signer.publicKey, nil
}
if device == nil {
return pubKey, fmt.Errorf("ledger: BUG: device for key unavailable: %d", role)
}
// Query the public key from the device.
rawPubKey, err := device.GetPublicKeyEd25519(signer.path)
if err != nil {
return pubKey, fmt.Errorf("ledger: failed to retrieive public key from device: %w", err)
}
if err = pubKey.UnmarshalBinary(rawPubKey); err != nil {
return pubKey, fmt.Errorf("ledger: device returned malformed public key: %w", err)
}
signer.publicKey = &pubKey
return pubKey, nil
}
func (pl *ledgerPlugin) ContextSign(
role signature.SignerRole,
rawContext signature.Context,
message []byte,
) ([]byte, error) {
signer, device, err := pl.signerForRole(role)
if err != nil {
return nil, err
}
if device == nil {
return nil, fmt.Errorf("ledger: BUG: device for key unavailable: %d", role)
}
preparedContext, err := signature.PrepareSignerContext(rawContext)
if err != nil {
return nil, fmt.Errorf("ledger: failed to prepare signing context: %w", err)
}
signature, err := device.SignEd25519(signer.path, preparedContext, message)
if err != nil {
return nil, fmt.Errorf("ledger: failed to sign message: %w", err)
}
return signature, nil
}
func (pl *ledgerPlugin) signerForRole(role signature.SignerRole) (*ledgerSigner, *internal.LedgerOasis, error) {
signer := pl.inner[role]
if signer == nil {
// Plugin was not initialized with this role.
return nil, nil, signature.ErrRoleMismatch
}
return signer, signer.device, nil
}
func main() {
flag.Parse()
if *versionFlag {
fmt.Printf("Software version: %s\n", common.SoftwareVersion)
fmt.Printf("Go toolchain version: %s\n", common.ToolchainVersion)
return
}
// Signer plugins use raw contexts.
signature.UnsafeAllowUnregisteredContexts()
var impl ledgerPlugin
pluginSigner.Serve("ledger", &impl)
}
|
package sol
import (
"testing"
)
func TestSol(t *testing.T) {
testcases := []struct {
s string
induces []int
want string
}{
{s: "codeleet", induces: []int{4, 5, 6, 7, 0, 2, 1, 3}, want: "leetcode"},
{s: "abc", induces: []int{0, 1, 2}, want: "abc"},
{s: "art", induces: []int{1, 0, 2}, want: "rat"},
}
for _, ts := range testcases {
result := restoreString(ts.s, ts.induces)
if result != ts.want {
t.Fatal("expected:", ts.want, "but got:", result)
}
}
t.Log("passed")
}
|
package users
import (
"admigo/common"
"admigo/model"
"admigo/model/roles"
"errors"
"fmt"
"time"
)
type userRequest struct {
Name string `json:"name"`
Email string `json:"email"`
Password string `json:"password"`
Cpassword string `json:"cpassword"`
}
type UserModel struct {
Id int `json:"id,omitempty"`
Name string `json:"name"`
Email string `json:"email"`
Password string `json:"password,omitempty"`
CreatedAt *time.Time `json:"created_at,omitempty"`
Confirmed int `json:"confirmed"`
Thumb string `json:"thumb"`
Phones []UserAttr `json:"phones,omitempty"`
Emails []UserAttr `json:"emails,omitempty"`
Role *roles.RoleModel `json:"role,omitempty"`
}
type UserAttr struct {
Id int `json:"id"`
Val string `json:"val"`
}
const (
img_path = "/static/images/users/"
)
func (user *UserModel) GetThumb() (pth string) {
pth = img_path + user.Thumb
return
}
func (user *UserModel) CanWrite() bool {
if user.Role.Id == roles.ADMIN || user.Role.Id == roles.WRITER {
return true
}
return false
}
func (user *UserModel) IsAdmin() bool {
if user.Role.Id == roles.ADMIN {
return true
}
return false
}
func (user *UserModel) insertAttributes(ar *[]UserAttr, tp int) (err error) {
for _, at := range *ar {
if len(at.Val) == 0 {
continue
}
sql := "insert into users_attr(user_id, attr_id, val) values($1, $2, $3)"
if _, err = model.Db.Exec(sql, user.Id, tp, at.Val); err != nil {
return
}
}
return
}
func (user *UserModel) updateAttributes() (err error) {
if err = user.deleteAttributes(); err != nil {
return
}
if err = user.insertAttributes(&user.Phones, 1); err != nil {
return
}
if err = user.insertAttributes(&user.Emails, 2); err != nil {
return
}
return
}
// Create a new user, save user info into the database
func (user *UserModel) create() (err error) {
statement := fmt.Sprintf(model.GetFormat(3),
"insert into users (name, email, password, created_at, confirmed, thumb)",
"values ($1, $2, $3, $4, $5, $6)",
"returning id, created_at",
)
stmt, err := model.Db.Prepare(statement)
if err != nil {
return
}
defer stmt.Close()
err = stmt.QueryRow(user.Name,
user.Email, common.Encrypt(user.Password),
time.Now(), user.Confirmed, user.Thumb,
).Scan(&user.Id, &user.CreatedAt)
if err != nil {
return
}
err = user.updateAttributes()
if err != nil {
return
}
err = user.updateRole()
return
}
func (user *UserModel) update(logged *UserModel) (err error) {
sql := "update users set name = $1, email = $2, thumb = $3%s%s where id = %d"
var setpas string
var conf string
if len(user.Password) > 0 {
user.Password = common.Encrypt(user.Password)
setpas = fmt.Sprintf(", password = '%s'", user.Password)
}
if logged.IsAdmin() {
conf = fmt.Sprintf(", confirmed = %d", user.Confirmed)
}
sql = fmt.Sprintf(sql, conf, setpas, user.Id)
_, err = model.Db.Exec(sql, user.Name, user.Email, user.Thumb)
if err != nil {
return
}
err = user.updateAttributes()
if err != nil {
return
}
if logged.IsAdmin() {
err = user.updateRole()
}
return
}
func (user *UserModel) setConfirmed() (err error) {
sql := "update users set confirmed = 1 where id = $1"
_, err = model.Db.Exec(sql, user.Id)
return
}
// Get user's session
func (user *UserModel) userSession() (session *SessionModel, err error) {
se := SessionModel{}
sql := fmt.Sprintf(model.GetFormat(3),
"select id, uuid, user_id, created_at",
"from sessions",
"where user_id = $1",
)
err = model.Db.QueryRow(sql, user.Id).Scan(&se.Id, &se.Uuid, &se.UserId, &se.CreatedAt)
session = &se
return
}
func (user *UserModel) fill(ar *[]UserAttr, tp int) (err error) {
sql := fmt.Sprintf(model.GetFormat(4),
"select id, val",
"from users_attr",
"where user_id = $1 and attr_id = $2",
"order by id",
)
rows, err := model.Db.Query(sql, user.Id, tp)
if err != nil {
return
}
for rows.Next() {
at := UserAttr{}
if err = rows.Scan(&at.Id, &at.Val); err != nil {
return
}
*ar = append(*ar, at)
}
rows.Close()
return
}
// Get user's phones
func (user *UserModel) fillAttr() (err error) {
user.fill(&user.Phones, 1)
user.fill(&user.Emails, 2)
return
}
func (user *UserModel) fillRole() (err error) {
sql := fmt.Sprintf(model.GetFormat(4),
"select r.id, r.nm",
"from users_roles u",
"join roles r on r.id = u.role_id",
"where u.user_id = $1",
)
rows, err := model.Db.Query(sql, user.Id)
if err != nil {
return
}
if rows.Next() {
role := roles.RoleModel{}
if err = rows.Scan(&role.Id, &role.Name); err != nil {
return
}
user.Role = &role
}
rows.Close()
return
}
// Create a new session for an existing user
func (user *UserModel) createSession() (session *SessionModel, err error) {
if session, err = user.userSession(); err == nil {
return
}
statement := fmt.Sprintf(model.GetFormat(3),
"insert into sessions (uuid, user_id, created_at)",
"values ($1, $2, $3)",
"returning id, uuid, user_id, created_at",
)
stmt, err := model.Db.Prepare(statement)
if err != nil {
return
}
defer stmt.Close()
se := SessionModel{}
err = stmt.QueryRow(common.CreateUUID(),
user.Id, time.Now(),
).Scan(&se.Id, &se.Uuid, &se.UserId, &se.CreatedAt)
session = &se
return
}
func (user *UserModel) DeleteSessions() (err error) {
sql := "delete from sessions where user_id = $1"
_, err = model.Db.Exec(sql, user.Id)
return
}
func (user *UserModel) deleteAttributes() (err error) {
sql := "delete from users_attr where user_id = $1"
_, err = model.Db.Exec(sql, user.Id)
return
}
func (user *UserModel) insertRole(new_role int) (err error) {
sql := fmt.Sprintf(model.GetFormat(7),
"insert into users_roles(user_id, role_id)",
"select $1, $2",
"where not exists(",
" select 1",
" from users_roles uu",
" where uu.user_id = $1 and uu.role_id = $2",
")",
)
_, err = model.Db.Exec(sql, user.Id, new_role)
return
}
func (user *UserModel) updateRole() error {
if user.Role == nil {
s_err := user.insertRole(roles.READER)
return s_err
}
sql := fmt.Sprintf(model.GetFormat(3),
"update users_roles set",
"role_id = $1",
"where user_id = $2",
)
res, err := model.Db.Exec(sql, user.Role.Id, user.Id)
if err != nil {
return err
}
ra, err := res.RowsAffected()
if ra > 0 || err != nil {
return err
}
err = user.insertRole(user.Role.Id)
return err
}
func (user *UserModel) deleteRole() (err error) {
sql := "delete from users_roles where user_id = $1"
_, err = model.Db.Exec(sql, user.Id)
return
}
func (user *UserModel) Delete(logged *UserModel) (err error) {
if !(logged.IsAdmin() || logged.Id == user.Id) {
err = errors.New(common.Mess().InsufRights)
return
}
if err = user.DeleteSessions(); err != nil {
return
}
if err = user.deleteAttributes(); err != nil {
return
}
if err = user.deleteRole(); err != nil {
return
}
sql := "delete from users where id = $1"
_, err = model.Db.Exec(sql, user.Id)
return
}
|
package main
import (
"fmt"
)
const (
USERS_TOTAL_AND_STRAVA = "Users: %d (unique strava users: %d)"
USERS_TOTAL = "Users: %d"
USERS_UNLOCKED = "Unlocked users: %d"
TEAMS_TOTAL_AND_MONITORED = "Teams: %d (monitored: %d)"
TEAMS_TOTAL = "Teams: %d"
CLUBS_TOTAL = "Clubs: %d"
RATE_LIMITS_UNKNOWN = "Rate usage/limits are unknown - Strava API hasn't be called yet"
RATE_LIMITS = "Rate usage/limits at %s: 15-minute usage - %d/%d requests, day usage - %d/%d requests"
)
type AdminInfoCommand struct {
}
func (cmd *AdminInfoCommand) Name() string {
return "admin_info"
}
func (cmd *AdminInfoCommand) Execute(params []string, message *IncomingSlackMessage, executor *CommandExecutor) (string, error) {
checkResult, err := executor.checkFromAdmin(message)
if err != nil {
return "", err
}
if checkResult != "" {
return checkResult, nil
}
totalUsersCount, err := executor.repo.AccessDetails.Count()
if err != nil {
return "", err
}
stravaUsersCount, err := executor.repo.AccessDetails.CountStravaUsers()
if err != nil {
return "", err
}
unlockedUsersCount, err := executor.repo.UserDetails.CountUnlocked()
if err != nil {
return "", err
}
totalTeamsCount, err := executor.repo.AccessDetails.CountTeams()
if err != nil {
return "", err
}
monitoredTeamsCount, err := executor.repo.JobDetails.CountTeams()
if err != nil {
return "", err
}
clubsCount, err := executor.repo.JobDetails.CountClubs()
if err != nil {
return "", err
}
var usersCountStr string
if stravaUsersCount != totalUsersCount {
usersCountStr = fmt.Sprintf(USERS_TOTAL_AND_STRAVA, totalUsersCount, stravaUsersCount)
} else {
usersCountStr = fmt.Sprintf(USERS_TOTAL, totalUsersCount)
}
unlockedUsersCountStr := fmt.Sprintf(USERS_UNLOCKED, unlockedUsersCount)
var teamsCountStr string
if monitoredTeamsCount != totalTeamsCount {
teamsCountStr = fmt.Sprintf(TEAMS_TOTAL_AND_MONITORED, totalTeamsCount, monitoredTeamsCount)
} else {
teamsCountStr = fmt.Sprintf(TEAMS_TOTAL, totalTeamsCount)
}
clubsCountStr := fmt.Sprintf(CLUBS_TOTAL, clubsCount)
var rateLimitisStr string
requestTime, limitShort, limitLong, usageShort, usageLong := executor.strava.GetRateLimits()
if requestTime.IsZero() {
rateLimitisStr = RATE_LIMITS_UNKNOWN
} else {
rateLimitisStr = fmt.Sprintf(RATE_LIMITS,
requestTime.Format("02-Jan-06 15:04:05"), usageShort, limitShort, usageLong, limitLong)
}
return usersCountStr + "\n" + unlockedUsersCountStr + "\n" + teamsCountStr + "\n" + clubsCountStr + "\n\n" + rateLimitisStr, nil
}
|
package main
import (
"encoding/json"
_ "fmt"
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/prometheus/common/version"
"gopkg.in/alecthomas/kingpin.v2"
)
type clouderaOpts struct {
uri string
username string
password string
clusterName string
}
const (
namespace = "cloudera"
)
func gaugeVec(name string) *prometheus.GaugeVec {
return prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "services",
Name: name,
Help: "Health of the services.",
},
[]string{"name"},
)
}
var (
servicesHealthGauge = gaugeVec("health")
)
func init() {
prometheus.MustRegister(version.NewCollector("cloudera_exporter"))
prometheus.MustRegister(servicesHealthGauge)
}
type ClouderaHealthCheck struct {
Name string
Summary string
}
type ClusterRef struct {
ClusterName string
}
type ClouderaItem struct {
Name string
thetype string `json:"type"`
ClusterRef ClusterRef
ServiceUrl string
ServiceState string
HealthSummary string
HealthChecks []ClouderaHealthCheck
ConfigStale bool
}
type ClouderaResponse struct {
Items []ClouderaItem
}
func getMetrics(opts clouderaOpts) (ClouderaResponse, error) {
path := "/api/v1/clusters/" + opts.clusterName + "/services/"
clouderaResponse := &ClouderaResponse{}
req, err := http.NewRequest("GET", opts.uri+path, nil)
if err != nil {
return *clouderaResponse, err
}
req.SetBasicAuth(opts.username, opts.password)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return *clouderaResponse, err
}
err = json.NewDecoder(resp.Body).Decode(&clouderaResponse)
if err != nil {
return *clouderaResponse, err
}
return *clouderaResponse, nil
}
func updateMetric(name string, healthSummary string) {
status := 0.0
if (healthSummary == "GOOD") { status = 1.0 }
log.Debugln("Updating metric %s = %f", name, status)
servicesHealthGauge.WithLabelValues(name).Set(status)
}
func main() {
var (
listenAddress = kingpin.Flag("web.listen-address", "Address to listen on for web interface and telemetry.").Default(":9107").String()
metricsPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").String()
opts = clouderaOpts{}
)
kingpin.Flag("cloudera.uri", "Address and port of the cloudera api").Default(":7180").StringVar(&opts.uri)
kingpin.Flag("cloudera.user", "Username").Default("admin").StringVar(&opts.username)
kingpin.Flag("cloudera.password", "Password").Default("").StringVar(&opts.password)
kingpin.Flag("cloudera.clustername", "apui path").Default("Cluster%201").StringVar(&opts.clusterName)
log.AddFlags(kingpin.CommandLine)
kingpin.Version(version.Print("cloudera_exporter"))
kingpin.HelpFlag.Short('h')
kingpin.Parse()
log.Infoln("Starting cloudera_exporter", version.Info())
log.Infoln("Build context", version.BuildContext())
go func() {
for {
clouderaResponse, err := getMetrics(opts)
if err != nil {
log.Fatalln(err)
}
for _, item := range clouderaResponse.Items {
updateMetric(item.Name, item.HealthSummary)
}
time.Sleep(time.Duration(10000 * time.Millisecond))
}
}()
http.Handle(*metricsPath, prometheus.Handler())
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`<html>
<head><title>Cloudera Exporter</title></head>
<body>
<h1>Cloudera Exporter</h1>
<p><a href='` + *metricsPath + `'>Metrics</a></p>
<h2>Build</h2>
<pre>` + version.Info() + ` ` + version.BuildContext() + `</pre>
</body>
</html>`))
})
log.Infoln("Listening on", *listenAddress)
log.Fatal(http.ListenAndServe(*listenAddress, nil))
} |
package main
/**
This app will contain multilevel bootstrap event
*/
import (
"context"
"fmt"
"log"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"github.com/gin-gonic/gin"
)
var Modloc string = ""
var Libloc string = ""
var Routers *gin.Engine
func main() {
BootstrapAll()
// r := SetupRouter()
srv := &http.Server{
Addr: ListenAddr,
Handler: Routers,
}
fmt.Printf("Listening at: %s", ListenAddr)
srv.ListenAndServe()
// gracefull shutdown procedure
go func() {
// service connections
if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
log.Fatalf("listen: %s\n", err)
}
}()
// Wait for interrupt signal to gracefully shutdown the server with
// a timeout of 5 seconds.
quit := make(chan os.Signal)
// kill (no param) default send syscall.SIGTERM
// kill -2 is syscall.SIGINT
// kill -9 is syscall.SIGKILL but can't be catch, so don't need add it
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
<-quit
log.Println("wow")
log.Println("Shutdown Server ...")
os.Remove("/tmp/shinyRuntimeFile")
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
if err := srv.Shutdown(ctx); err != nil {
log.Fatal("Server Shutdown:", err)
}
// catching ctx.Done(). timeout of 5 seconds.
select {
case <-ctx.Done():
log.Println("timeout of 5 seconds.")
}
log.Println("Server exiting")
}
|
package main
import "fmt"
func printMap(cityMap map[string]string) {
// 引用传递
for key, value := range cityMap {
fmt.Println("key =", key, "value =", value)
}
}
func changeValue(cityMap map[string]string) {
cityMap["England"] = "London"
}
func main() {
cityMap := make(map[string]string)
// 添加key,value
cityMap["China"] = "Beijing"
cityMap["Japan"] = "Tokyo"
cityMap["USA"] = "NewYork"
printMap(cityMap)
// 删除
delete(cityMap, "China")
// 修改
cityMap["USA"] = "DC"
changeValue(cityMap)
fmt.Println("----------")
printMap(cityMap)
}
|
package sim
import (
"time"
)
type Portfolio struct {
*Simulacrum
speculative float64
aggressive float64
moderate float64
conservative float64
lastDate *time.Time
lastRet *float64
}
func NewPortfolio(sim *Simulation, speculative, aggressive, moderate, conservative float64) *Portfolio {
return &Portfolio{
Simulacrum: NewSimulacrum(sim),
speculative: speculative,
aggressive: aggressive,
moderate: moderate,
conservative: conservative,
}
}
func (p *Portfolio) riskTarget(date time.Time) float64 {
yearsToRetirement := p.Sim().RetirementAge() - p.Age(date)
if yearsToRetirement <= 0.0 {
return p.conservative
}
if yearsToRetirement <= 5.0 {
return p.moderate
}
if yearsToRetirement <= 40.0 {
return p.aggressive
}
return p.speculative
}
func (p *Portfolio) PortfolioReturn(date time.Time) float64 {
risk := p.riskTarget(date)
if p.lastDate != nil && p.lastRet != nil && date.Equal(*p.lastDate) {
return *p.lastRet * (1.0 + (p.Gauss(0.0, risk * 0.05) / 12.0))
}
mkt := p.Sim().Economy.MarketReturn(date)
mean := risk * mkt / 10.0
stddev := risk * 0.25
ret := p.Gauss(mean, stddev) / 12.0
p.lastDate = &date
p.lastRet = &ret
return ret
}
|
package server
import (
"fmt"
"github.com/asaskevich/govalidator"
"github.com/go-chi/chi/v5"
"github.com/go-chi/render"
"github.com/profiralex/go-bootstrap-redis/pkg/bl"
"net/http"
)
// swagger:model
type entityResponse struct {
UUID string `json:"uuid"`
Field1 string `json:"field_1"`
Field2 int `json:"field_2"`
Field3 bool `json:"field_3"`
Field4 string `json:"field_4"`
}
func convertToEntityResponse(e bl.Entity) entityResponse {
return entityResponse{
UUID: e.UUID,
Field1: e.Field1,
Field2: e.Field2,
Field3: e.Field3,
Field4: e.Field4,
}
}
type entitiesController struct {
entitiesRepo bl.EntitiesRepository
}
func newEntitiesController() *entitiesController {
return &entitiesController{
entitiesRepo: &bl.RedisEntitiesRepository{},
}
}
// swagger:model
type createEntityRequest struct {
Field1 string `json:"field_1" valid:"required"`
Field2 int `json:"field_2"`
Field3 bool `json:"field_3"`
Field4 string `json:"field_4" valid:"required"`
}
func (d *createEntityRequest) Bind(*http.Request) error {
_, err := govalidator.ValidateStruct(d)
return err
}
// swagger:operation POST /entities createEntity
// Create Entity
//
// Create an Entity
// ---
// parameters:
// - { name: data, in: body, schema: { "$ref": "#/definitions/createEntityRequest" }, required: true, description: entity creation params}
// responses:
// 201: { schema: { "$ref": "#/definitions/entityResponse" } }
func (c *entitiesController) createEntity(w http.ResponseWriter, r *http.Request) {
data := &createEntityRequest{}
if err := render.Bind(r, data); err != nil {
respondValidationErrors(w, r, err, http.StatusBadRequest)
return
}
entity := bl.Entity{
Field1: data.Field1,
Field2: data.Field2,
Field3: data.Field3,
Field4: data.Field4,
}
err := c.entitiesRepo.Save(r.Context(), &entity)
if err != nil {
respondError(w, r, fmt.Errorf("failed to save entity: %w", err))
return
}
respondSuccess(w, r, convertToEntityResponse(entity), http.StatusCreated)
}
// swagger:operation GET /entities/{uuid} getEntity
// Get Entity by uuid
//
// Get an Entity by uuid
// ---
// parameters:
// - { name: uuid, in: path, type: string, required: true, description: entity uuid}
// responses:
// 201: { schema: { "$ref": "#/definitions/entityResponse" } }
func (c *entitiesController) getEntity(w http.ResponseWriter, r *http.Request) {
uuid := chi.URLParam(r, "uuid")
entity, err := c.entitiesRepo.FindByUUID(r.Context(), uuid)
if err != nil {
respondError(w, r, fmt.Errorf("failed to get entity: %w", err))
return
}
respondSuccess(w, r, convertToEntityResponse(entity), http.StatusOK)
}
|
package go_ntskem
import (
"testing"
)
func TestGenerateKey(t *testing.T) {
nts := NTSKEM{}
nts.New(12)
nts.GenerateKey()
}
func TestEncapsulate(t *testing.T) {
}
func TestDecapsulate(t *testing.T) {
}
|
package main
import (
"fmt"
)
var arr [5]int = [5]int{1, 2, 3, 4, 5}
var slc []int = []int{1, 2, 3, 4, 5}
func main() {
fmt.Println(arr)
fmt.Println(slc)
slc := append(slc, 6)
fmt.Println(slc)
// slc[20] = 19 // error
}
|
package main
import "fmt"
type Suite int
const (
Spades Suite = iota
Hearts
Diamonds
Clubs
)
func (s Suite) String() string {
return [...]string{"Spades", "Hearts", "Diamonds", "Clubs"}[s]
}
/**
* created: 2019/7/15 13:01
* By Will Fan
*/
func main() {
s := Hearts
fmt.Print(s)
switch s {
case Spades:
fmt.Println(" are best")
case Hearts:
fmt.Println(" are second best")
default:
fmt.Println(" aren't very good")
}
}
|
package printer
import (
"github.com/davyxu/tabtoy/util"
"github.com/davyxu/tabtoy/v2/i18n"
"github.com/davyxu/tabtoy/v2/model"
)
func valueWrapperPbt(t model.FieldType, node *model.Node) string {
switch t {
case model.FieldType_String:
return util.StringEscape(node.Value)
}
return node.Value
}
type pbtPrinter struct {
}
func (self *pbtPrinter) Run(g *Globals, outputClass int) *Stream {
bf := NewStream()
bf.Printf("# Generated by github.com/davyxu/tabtoy\n")
bf.Printf("# Version: %s\n", g.Version)
for _, tab := range g.Tables {
if !tab.LocalFD.MatchTag(".pbt") {
log.Infof("%s: %s", i18n.String(i18n.Printer_IgnoredByOutputTag), tab.Name())
continue
}
if !printTablePBT(bf, tab) {
return nil
}
}
return bf
}
func printTablePBT(bf *Stream, tab *model.Table) bool {
if len(tab.Recs) == 0 {
return true
}
bf.Printf("%s: [\n", tab.LocalFD.Name)
// 遍历每一行
for recIndex, r := range tab.Recs {
bf.Printf(" {")
// 遍历每一列
for rootFieldIndex, node := range r.Nodes {
if node.SugguestIgnore && !node.IsRepeated {
continue
}
if node.IsRepeated {
bf.Printf("%s:[ ", node.Name)
} else {
bf.Printf("%s: ", node.Name)
}
// 普通值
if node.Type != model.FieldType_Struct {
if node.IsRepeated {
// repeated 值序列
for arrIndex, valueNode := range node.Child {
bf.Printf("%s", valueWrapperPbt(node.Type, valueNode))
// 多个值分割
if arrIndex < len(node.Child)-1 {
bf.Printf(", ")
}
}
} else {
// 单值
valueNode := node.Child[0]
bf.Printf("%s", valueWrapperPbt(node.Type, valueNode))
}
} else {
// 遍历repeated的结构体
for structIndex, structNode := range node.Child {
// 结构体开始
bf.Printf("{ ")
// 遍历一个结构体的字段
for structFieldIndex, fieldNode := range structNode.Child {
// 值节点总是在第一个
valueNode := fieldNode.Child[0]
bf.Printf("%s: %s", fieldNode.Name, valueWrapperPbt(fieldNode.Type, valueNode))
// 结构体字段分割
if structFieldIndex < len(structNode.Child)-1 {
bf.Printf(", ")
}
}
// 结构体结束
bf.Printf(" }")
// 多个结构体分割
if structIndex < len(node.Child)-1 {
bf.Printf(", ")
}
}
}
if node.IsRepeated {
bf.Printf(" ]")
}
// 根字段分割
if rootFieldIndex < len(r.Nodes)-1 {
bf.Printf(", ")
}
}
bf.Printf(" }")
// 根字段分割
if recIndex < len(tab.Recs)-1 {
bf.Printf(", ")
}
bf.Printf("\n")
}
bf.Printf("]\n\n")
return true
}
func init() {
RegisterPrinter("pbt", &pbtPrinter{})
}
|
package runner
import (
"context"
)
// Runner interface defines method to start running
type Runner interface {
Run(context.Context) Result
}
|
package render
import (
"github.com/tanema/amore/gfx"
)
const increment float32 = 1
type Fake3D struct {
img *gfx.Image
quads []*gfx.Quad
ox, oy float32
}
func New(filepath string, frameWidth, frameHeight int32) (*Fake3D, error) {
img, err := gfx.NewImage(filepath)
if err != nil {
return nil, err
}
imageWidth, imageHeight := img.GetWidth(), img.GetHeight()
framesWide, framesHeight := imageWidth/frameWidth, imageHeight/frameHeight
quads := make([]*gfx.Quad, 0, framesWide*framesHeight)
var x, y int32
for y = 0; y < framesHeight; y++ {
for x = 0; x < framesWide; x++ {
newQuad := gfx.NewQuad(x*frameWidth, y*frameHeight, frameWidth, frameHeight, imageWidth, imageHeight)
quads = append(quads, newQuad)
}
}
return &Fake3D{
img: img,
quads: quads,
ox: float32(frameWidth) / 2,
oy: float32(frameHeight) / 2,
}, nil
}
func (f3d *Fake3D) Draw(x, y, angle float32) {
for _, quad := range f3d.quads {
gfx.Drawq(f3d.img, quad, x, y, angle, 1, 1, f3d.ox, f3d.oy)
y -= increment
}
}
|
package main
import "fmt"
// ListNode 19.
//给你一个链表,删除链表的倒数第 n 个结点,并且返回链表的头结点。
//
//
//
// 示例 1:
//
//
//输入:head = [1,2,3,4,5], n = 2
//输出:[1,2,3,5]
//
//
// 示例 2:
//
//
//输入:head = [1], n = 1
//输出:[]
//
//
// 示例 3:
//
//
//输入:head = [1,2], n = 1
//输出:[1]
//
//
//
//
// 提示:
//
//
// 链表中结点的数目为 sz
// 1 <= sz <= 30
// 0 <= Node.val <= 100
// 1 <= n <= sz
//
//
//
//
// 进阶:你能尝试使用一趟扫描实现吗?
// Related Topics 链表 双指针 👍 1772 👎 0
type ListNode struct {
Val int
Next *ListNode
}
func main() {
head := &ListNode{}
head.Val = 1
sec := &ListNode{}
sec.Val = 2
head.Next = sec
third := &ListNode{}
third.Val = 3
head.Next.Next = third
four := &ListNode{}
four.Val = 4
head.Next.Next.Next = four
five := &ListNode{}
five.Val = 5
head.Next.Next.Next.Next = five
end := removeNthFromEnd(head, 1)
fmt.Println(end)
}
func removeNthFromEnd(head *ListNode, n int) *ListNode {
fast := head
slow := head
// 双指针法,让快指针多走n步
for ; n > 0; n-- {
fast = fast.Next
}
// 走到头了的情况,例如[1],1
if fast == nil {
return head.Next
}
for fast != nil && fast.Next != nil {
slow = slow.Next
fast = fast.Next
}
slow.Next = slow.Next.Next
return head
}
|
package compose
import (
"fmt"
"strconv"
"github.com/kudrykv/latex-yearly-planner/app/components/calendar"
"github.com/kudrykv/latex-yearly-planner/app/components/header"
"github.com/kudrykv/latex-yearly-planner/app/components/page"
"github.com/kudrykv/latex-yearly-planner/app/config"
)
func HeaderTodosIndexed(cfg config.Config, tpls []string) (page.Modules, error) {
if len(tpls) != 1 {
return nil, fmt.Errorf("exppected two tpls, got %d %v", len(tpls), tpls)
}
modules := make(page.Modules, 0, 101)
modules = append(modules, page.Module{
Cfg: cfg,
Tpl: tpls[0],
Body: header.Header{
Left: header.Items{
header.NewIntItem(cfg.Year),
header.NewTextItem("Todos Index").Ref(true),
},
Right: header.Items{
header.NewTextItem("Notes").RefText("Notes Index"),
},
},
})
for i := 1; i <= 100; i++ {
right := header.Items{}
if i > 2 {
right = append(right, header.NewTextItem("Todo "+strconv.Itoa(i-1)))
}
if i < 100 {
right = append(right, header.NewTextItem("Todo "+strconv.Itoa(i+1)))
}
modules = append(modules, page.Module{
Cfg: cfg,
Tpl: tpls[0],
Body: header.Header{
Left: header.Items{
header.NewIntItem(cfg.Year),
header.NewTextItem("Todos Index"),
header.NewTextItem("Todo " + strconv.Itoa(i)).Ref(true),
},
Right: right,
},
})
}
return modules, nil
}
func HeaderTodosIndexed2(cfg config.Config, tpls []string) (page.Modules, error) {
if len(tpls) != 1 {
return nil, fmt.Errorf("exppected two tpls, got %d %v", len(tpls), tpls)
}
modules := make(page.Modules, 0, 101)
modules = append(modules, page.Module{
Cfg: cfg,
Tpl: tpls[0],
Body: map[string]interface{}{
"Todos": "Todos Index",
"Cells": header.Items{
header.NewCellItem("Calendar"),
header.NewCellItem("To Do").Refer("Todos Index").Select(),
header.NewCellItem("Notes").Refer("Notes Index"),
},
"Months": MonthsToCellItems(cfg.WeekStart, calendar.NewYearInMonths(cfg.Year).Reverse()),
"Quarters": QuartersToCellItems(calendar.NewYearInQuarters(cfg.Year).Reverse()),
},
})
for i := 1; i <= 100; i++ {
modules = append(modules, page.Module{
Cfg: cfg,
Tpl: tpls[0],
Body: map[string]interface{}{
"Todos": "Todo " + strconv.Itoa(i),
"Cells": header.Items{
header.NewCellItem("Calendar"),
header.NewCellItem("To Do").Refer("Todos Index"),
header.NewCellItem("Notes").Refer("Notes Index"),
},
"Months": MonthsToCellItems(cfg.WeekStart, calendar.NewYearInMonths(cfg.Year).Reverse()),
"Quarters": QuartersToCellItems(calendar.NewYearInQuarters(cfg.Year).Reverse()),
},
})
}
return modules, nil
}
|
package test2
import (
"fmt"
_ "fmt"
)
type Human struct {
Age int
Name string
}
func (h *Human) Say() {
fmt.Println("humam " + h.Name + " is say")
}
func (h *Student) Say() {
fmt.Println("Student " + h.Name + " is say")
}
type Student struct {
Human
int
Name string
Value string
Score int
Id string
}
func (s *Student) Println() {
println("********")
println(s.Score)
println(s.Value)
println(s.Human.Name)
}
|
package test
import (
"fmt"
"log"
"net/http"
)
//test form x-www-form-urlencode
func ValidateUserLogin(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
w.Write([]byte(err.Error()))
}
log.Println(r.Form.Get("hello"))
log.Println(r.Form.Get("post"))
log.Println(r.PostForm.Get("hello") + "PostForm")
log.Println(r.PostForm.Get("post") + "PostForm")
log.Println(r.Form)
log.Println(r.Form.Get("thread")+"thread")
/*
log.Println(r.MultipartForm.Value["hello"][0])
log.Println(r.PostFormValue("hello"))
fmt.Fprintln(w, r.PostFormValue("hello"))
*/
//操作文件
/*
fileHeader := r.MultipartForm.File["upload"][0]
log.Println(fileHeader)
file, _ := fileHeader.Open()
s, _ := ioutil.ReadAll(file)
//log.Println(r.PostFormValue("hello"))
fmt.Fprintln(w, string(s))
*/
fmt.Fprintln(w, r.PostForm)
}
|
package modA
import (
"testing"
)
func TestA(t *testing.T) {
t.Log(A)
}
|
package main
import (
"encoding/json"
"log"
"net/http"
"html/template"
"gopkg.in/mgo.v2/bson"
"fmt"
//"io"
//"strings"
"github.com/gorilla/mux"
. "github.com/cboornaz17/pallas/src/config"
. "github.com/cboornaz17/pallas/src/dao"
. "github.com/cboornaz17/pallas/src/models"
)
var config = Config{}
var dao = ImagesDAO{}
// Serve index.html to handle user input
func IndexHandler(w http.ResponseWriter, r *http.Request) {
tmpl, err := template.ParseFiles("gui/index.html")
if err != nil {
// Log the detailed error
log.Println(err.Error())
// Return a generic "Internal Server Error" message
http.Error(w, http.StatusText(500), 500)
return
}
if err := tmpl.ExecuteTemplate(w, "index.html", nil); err != nil {
log.Println(err.Error())
http.Error(w, http.StatusText(500), 500)
}
}
func ConvertImage(w http.ResponseWriter, r *http.Request) {
}
// GET list of images
func AllImagesEndPoint(w http.ResponseWriter, r *http.Request) {
images, err := dao.FindAll()
if err != nil {
respondWithError(w, http.StatusInternalServerError, err.Error())
return
}
respondWithJson(w, http.StatusOK, images)
}
// GET a image by its ID
func FindImageEndpoint(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
image, err := dao.FindById(params["id"])
if err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid Image ID")
return
}
respondWithJson(w, http.StatusOK, image)
}
// POST a new image
func CreateImageEndPoint(w http.ResponseWriter, r *http.Request) {
fmt.Println("hit")
defer r.Body.Close()
var image Image
if err := json.NewDecoder(r.Body).Decode(&image); err != nil {
log.Fatal(err)
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
image.ID = bson.NewObjectId()
if err := dao.Insert(image); err != nil {
respondWithError(w, http.StatusInternalServerError, err.Error())
return
}
respondWithJson(w, http.StatusCreated, image)
log.Println("Finished creating an image")
}
// PUT update an existing image
func UpdateImageEndPoint(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
var image Image
if err := json.NewDecoder(r.Body).Decode(&image); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
if err := dao.Update(image); err != nil {
respondWithError(w, http.StatusInternalServerError, err.Error())
return
}
respondWithJson(w, http.StatusOK, map[string]string{"result": "success"})
}
// DELETE an existing image
func DeleteImageEndPoint(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
var image Image
if err := json.NewDecoder(r.Body).Decode(&image); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
if err := dao.Delete(image); err != nil {
respondWithError(w, http.StatusInternalServerError, err.Error())
return
}
respondWithJson(w, http.StatusOK, map[string]string{"result": "success"})
}
func respondWithError(w http.ResponseWriter, code int, msg string) {
respondWithJson(w, code, map[string]string{"error": msg})
}
func respondWithJson(w http.ResponseWriter, code int, payload interface{}) {
response, _ := json.Marshal(payload)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
w.Write(response)
}
// Parse the configuration file 'config.toml', and establish a connection to DB
func init() {
config.Read()
dao.Server = config.Server
dao.Database = config.Database
dao.Connect()
}
// Define HTTP request routes
func main() {
r := mux.NewRouter()
//r.PathPrefix("../gui/").Handler(http.StripPrefix("../gui/", http.FileServer(http.Dir("../gui"))))
r.HandleFunc("/", IndexHandler)
//r.Handle("/gui/", http.StripPrefix("/gui/", http.FileServer(http.Dir("gui"))))
r.HandleFunc("/images", CreateImageEndPoint).Methods("POST")
r.HandleFunc("/images", AllImagesEndPoint).Methods("GET")
//r.HandleFunc("/images", CreateImageEndPoint).Methods("POST")
r.HandleFunc("/images", UpdateImageEndPoint).Methods("PUT")
r.HandleFunc("/images", DeleteImageEndPoint).Methods("DELETE")
r.HandleFunc("/images/{id}", FindImageEndpoint).Methods("GET")
if err := http.ListenAndServe(":3000", r); err != nil {
log.Fatal(err)
}
}
|
// +build !linux
package mptcp
import "testing"
// TestOthers_checkMPTCP verifies that checkMPTCP is not implemented on
// platforms other than Linux.
func TestOthers_checkMPTCP(t *testing.T) {
ok, err := checkMPTCP("localhost", 8080)
if ok || err != ErrNotImplemented {
t.Fatalf("checkMPTCP is not implemented, but returned: (%v, %v)", ok, err)
}
}
// TestOthers_mptcpEnabled verifies that mptcpEnabled always returns
// false unless a platform explicitly supports it.
func TestOthers_mptcpEnabled(t *testing.T) {
ok, err := mptcpEnabled()
if ok || err != nil {
t.Fatalf("mptcpEnabled should return (false, nil), but returned: (%v, %v)", ok, err)
}
}
|
package api
import (
"fmt"
"log"
//"time"
"net/http"
//"strconv"
"github.com/gorilla/mux"
//"github.com/robfig/cron"
"github.com/acmakhoa/smsapp/db"
"github.com/acmakhoa/smsapp/worker"
)
type ListSmsAPI struct{}
func (_ *ListSmsAPI) FindAllHandler(w http.ResponseWriter, r *http.Request){
l := db.List{}
lists:=l.FindAll()
data:=map[string]interface{}{"lists":lists}
Response(w,data)
}
func (_ *ListSmsAPI) FindByIdHandler(w http.ResponseWriter, r *http.Request){
params := mux.Vars(r)
fmt.Println("id:",params["id"])
l := db.List{}
list:=l.FindById(params["id"])
s :=db.SMS{}
smses :=s.FindByList(params["id"])
data:=map[string]interface{}{"list":list,"sms":smses}
Response(w,data)
}
// delete sms, allowed methods: POST
func (_ *ListSmsAPI) DeleteHandler(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
list := db.List{Id:params["id"]}
error:=list.Delete()
if error !=nil {
ErrorResponse(w,"error",1000)
}else {
Response(w,map[string]interface{}{})
}
}
// delete sms, allowed methods: POST
func (_ *ListSmsAPI) DeleteSmsHandler(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
log.Println("params;;;;;;:",params)
list := db.List{Id:params["id"]}
error:=list.DeleteSms(params["sms_id"])
if error !=nil {
ErrorResponse(w,"error",1000)
}else {
Response(w,map[string]interface{}{})
}
}
// send a sms in ListSMS
func (_ *ListSmsAPI) SendSMSHandler(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
model:=db.List{}
l:=model.FindById(params["id"])
log.Println("send message in list:",l)
//Find sms
worker.AddSMS(params["sms_id"],l.Body)
log.Println("send message to phone:",params["sms_id"])
Response(w,map[string]interface{}{})
}
// send a sms in ListSMS
func (_ *ListSmsAPI) SendHandler(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
model:=db.List{}
l:=model.FindById(params["id"])
modelSms:=db.SMS{}
smses:=modelSms.FindByList(params["id"])
queue:=SmsQueueAPI{}
for i:=0;i<len(smses);i++{
//log.Println("send message to phone:",smses[i].Id,l.Body)
go queue.Push(l,smses[i],i)
}
Response(w,map[string]interface{}{})
} |
package processor
type Processor3 struct {
}
func NewProcessor3() Processor3 {
return Processor3{}
}
func (p3 Processor3) Process() string {
return "Processing 3"
} |
package dcp
import (
"fmt"
"strings"
)
// Given the root to a binary tree, implement serialize(root), which serializes the tree into a string, and deserialize(s), which deserializes the string back into the tree.
// For example, given the following Node class
// class Node:
// def __init__(self, val, left=None, right=None):
// self.val = val
// self.left = left
// self.right = right
// The following test should pass:
// node = Node('root', Node('left', Node('left.left')), Node('right'))
// assert deserialize(serialize(node)).left.left.val == 'left.left'
type node struct {
val string
left *node
right *node
}
func serializeNode(n *node, s *[]string) {
if n != nil {
*s = append(*s, n.val)
serializeNode(n.left, s)
serializeNode(n.right, s)
return
}
*s = append(*s, "nil")
}
func serialize(root *node) string {
nodes := make([]string, 0)
serializeNode(root, &nodes)
return fmt.Sprint(nodes)
}
func deserializeNodes(s *[]string) *node {
if len(*s) < 1 {
return nil
}
val := (*s)[0]
if val == "nil" {
return nil
}
*s = (*s)[1:]
left := deserializeNodes(s)
*s = (*s)[1:]
right := deserializeNodes(s)
return &node{val: val, left: left, right: right}
}
func deserialize(input string) *node {
input = strings.ReplaceAll(input, "[", "")
input = strings.ReplaceAll(input, "]", "")
split := strings.Split(input, " ")
return deserializeNodes(&split)
}
|
package nats
import (
"github.com/atymkiv/echo_frame_learning/blog/pkg/utl/config"
"github.com/nats-io/go-nats"
)
func New(cfg *config.Nats) (*nats.Conn, error) {
natsClient, err := nats.Connect(cfg.Host)
return natsClient, err
}
|
package main
import (
"bufio"
"encoding/json"
"flag"
"fmt"
"log"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/JustAdam/streamingtwitter"
"github.com/tg/gosortmap"
"menteslibres.net/gosexy/redis"
)
const STOPWORDS_FILE_NAME = "stopwords.txt"
const TOPWORDS_FILE_NAME = "topwords.json"
const TOKEN_FILE_NAME = "tokens.json"
var (
stopword = make(map[string]bool)
wordcount = make(map[string]int)
redisClient *redis.Client
redisHost = os.Getenv("REDIS_PORT_6379_TCP_ADDR")
redisPort, _ = strconv.Atoi(os.Getenv("REDIS_PORT_6379_TCP_PORT"))
duration = flag.Int("t", 5, "Number of seconds before closing the stream")
tagcloudSize = flag.Int("n", 0, "Print top 'n' words.")
printToFileFlag = flag.Bool("f", false, "Print output to file in addition to terminal")
stopPrintScreenFlag = flag.Bool("s", false, "Suppress printing the output to terminal")
redisFlag = flag.Bool("r", false, "Use Redis to store word frequency")
)
type JSONTag struct {
Word string `json:"word"`
Count int `json:"count"`
}
type JSONOtherTag struct {
Other []JSONTag `json:"other"`
}
func getWordFreqFromRedis() {
keys, err := redisClient.Keys("*")
if err != nil {
cleanupRedis()
log.Fatalf("Redis KEYS error: %s\n", err)
}
for _, k := range keys {
c, err := redisClient.Get(k)
if err != nil {
if err == redis.ErrNilReply {
continue
} else {
cleanupRedis()
log.Fatalf("Redis GET error on word '%v': %s\n", k, err.Error())
}
}
wordcount[k], err = strconv.Atoi(c)
if err != nil {
log.Fatal(err)
}
}
}
func printWordFreq() {
words := make([]JSONTag, 0)
if *redisFlag {
getWordFreqFromRedis()
}
for k, v := range wordcount {
words = append(words, JSONTag{k, v})
}
j, _ := json.MarshalIndent(words, "", " ")
if !*stopPrintScreenFlag {
fmt.Println(string(j))
}
if *printToFileFlag {
printToFile(j)
}
}
func printTopWordsFreq() {
output := make([]interface{}, 0)
jot := &JSONOtherTag{}
if *redisFlag {
getWordFreqFromRedis()
}
nr := 0
for _, e := range sortmap.ByValDesc(wordcount) {
if nr < *tagcloudSize {
output = append(output, JSONTag{e.K.(string), e.V.(int)})
} else {
jot.Other = append(jot.Other, JSONTag{e.K.(string), e.V.(int)})
}
nr++
}
if len(jot.Other) > 0 {
output = append(output, jot)
}
j, _ := json.MarshalIndent(output, "", " ")
if !*stopPrintScreenFlag {
fmt.Println(string(j))
}
if *printToFileFlag {
printToFile(j)
}
}
func printToFile(o []byte) {
f, err := os.Create(TOPWORDS_FILE_NAME)
if err != nil {
panic(err)
}
defer f.Close()
_, err = f.Write(o)
if err != nil {
log.Fatal(err)
}
}
func isWord(s string) bool {
// String with less than 2 letters
if len(s) < 2 {
return false
}
// Twitter handle
if s[0] == '@' {
return false
}
// Links
if strings.Count(s, "http://") > 0 || strings.Count(s, "https://") > 0 {
return false
}
// Strings with no letters
for _, c := range s {
if c >= 'a' && c <= 'z' {
return true
}
}
return false
}
func trimWord(w string) string {
w = strings.TrimSpace(w)
w = strings.Trim(w, "!,.?;!$%^&*()[]{}'/|><~`+-=\\\"")
return w
}
func splitText(txt string) []string {
res := strings.FieldsFunc(txt, func(r rune) bool {
switch r {
case ' ', '\n', '\t', ',', '.', ':', ';', '|':
return true
}
return false
})
return res
}
func countWordFreq(txt string) {
txt = strings.ToLower(txt)
ws := splitText(txt)
for _, w := range ws {
w = trimWord(w)
if !stopword[w] && isWord(w) {
if *redisFlag {
_, err := redisClient.Incr(w)
if err != nil {
log.Fatalf("Redis INCR error on word '%v': %s\n", w, err)
}
} else {
wordcount[w]++
}
}
}
}
func cleanupRedis() {
redisClient.FlushAll()
redisClient.Close()
}
func loadStopwords() {
file, err := os.Open(STOPWORDS_FILE_NAME)
if err != nil {
log.Fatal(err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
w := scanner.Text()
stopword[w] = true
}
}
func init() {
loadStopwords()
}
func main() {
flag.Parse()
// Create new Redis client (if -r flag was used)
if *redisFlag {
redisClient = redis.New()
if redisHost == "" {
redisHost = "127.0.0.1"
redisPort = 6379
}
err := redisClient.Connect(redisHost, uint(redisPort))
if err != nil {
log.Fatalf("Redis connection failed: %s\n", err)
}
defer cleanupRedis()
}
// Create new streaming API client
twitterClient := streamingtwitter.NewClient()
err := twitterClient.Authenticate(&streamingtwitter.ClientTokens{
TokenFile: TOKEN_FILE_NAME,
})
if err != nil {
log.Fatalf("Twitter connection failed: %s\n", err)
}
// Filter the stream by language
args := &url.Values{}
args.Add("language", "en")
// Launch the stream
tweets := make(chan *streamingtwitter.TwitterStatus)
go twitterClient.Stream(tweets, streamingtwitter.Streams["Sample"], args)
// Stream runs for <*duration> seconds
timer := time.NewTimer(time.Second * time.Duration(*duration))
// Streaming
stream:
for {
select {
case status := <-tweets:
countWordFreq(status.Text)
case err := <-twitterClient.Errors:
fmt.Printf("Twitter client error: %s\n", err)
case <-twitterClient.Finished:
return
}
select {
case <-timer.C:
break stream
default:
}
}
// Print results
if *tagcloudSize == 0 {
printWordFreq()
} else {
printTopWordsFreq()
}
}
|
package main
import "fmt"
func main() {
messages := make(chan string)
signals := make(chan bool)
// use select with a default clause to implement non-blocking sends.
// message and signals have not any value so default case will immediately take.
select {
case msg := <-messages:
fmt.Println("received message", msg)
case sig := <-signals:
fmt.Println("received signal", sig)
default:
fmt.Println("no activity")
}
}
|
package main
type Certificate struct {
SerialNumber string `json:"serialNumber"`
RegistrationNumber int `json:"registrationNumber"`
RegistrationDate string `json:"registrationDate"`
CertificateHash string `json:"certificateHash"`
MetaDataHash string `json:"metaDataHash"`
PublicationDate string `json:"publicationDate"`
}
|
package dbserver
import (
"database/sql"
_ "github.com/go-sql-driver/mysql"
"github.com/labstack/echo"
"net/http"
)
func CreateDb(c echo.Context) error {
dbName := c.QueryParam("dbname")
sqlQuery := "create database " + dbName + ";"
db, err := sql.Open("mysql", "root:lei123@/lei")
if err != nil {
panic(err.Error())
}
_, err = db.Exec(sqlQuery)
if err != nil {
panic(err.Error())
}
return c.String(http.StatusOK, "you have create a database")
}
|
package main
import (
"bytes"
"testing"
)
var (
usersData = `name,age
F1 L1,30
F2 L2,20
F3 L3,30
F4 L4,20
F5 L5,30
F6 L6,20
F7 L7,30
F8 L8,20
F9 L9,70`
)
func Test_countRecords(t *testing.T) {
cnt, err := countRecords(bytes.NewBufferString(usersData), &UserCounter{})
if err != nil {
t.Error(err)
}
if cnt != 9 {
t.Errorf("expected: %d, actual: %d", 9, cnt)
}
}
func Benchmark_countRecords(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := countRecords(bytes.NewBufferString(usersData), &UserCounter{})
if err != nil {
b.Error(err)
}
}
}
func Benchmark_countRecordsTheOldWay(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := countRecordsTheOldWay(bytes.NewBufferString(usersData))
if err != nil {
b.Error(err)
}
}
}
|
//01 go语言关键字 标识符
//程序所属包
package main
//import printer "fmt"
import (//. "fmt"
_ "golangPractice/practice00/learn02" //只初始化 不引用
"fmt"
)
//常量定义 首字母用大写
const NAME = "heylink\n"
//全局变量
var mainName = "main name\n"
//全局变量
var a string = "nihao\n"
//一般声明
type myInt int
//结构的声明
type Learn struct {
}
//声明接口
type iLearn interface {
}
//函数定义
func learn() {
//printer.Print("test\n")
}
//常量显式定义
const golang string = "golang"
//常量隐式定义
const goland = "goland"
//组合定义
const (
cat string = "猫"
dog = "狗"
)
//单行定义多个常量
const apple, banana string = "苹果", "香蕉"
const b, c = 1, "测试"
const cLen = len(c)
//自定义函数 含返回类型
func getString() string {
return "getString"
}
//const getStringLen = len(getString()) //错误 函数返回值不是const
const value1 = iota
//const value2 = iota
const (
value2 = iota
_ //跳值使用法
_
//value2_5 = 1.5 //插队使用法 value3+1
value3 = iota
value4 = iota * 2
//表达式隐式使用法
value5
value6
//单行使用法
value7, value8 = iota, iota + 3
value9, value10
value11 = iota
)
func main() {
/*********learn01***********/
//learn()
//fmt.Print("hello learn01\n")
//fmt.Print(mainName)
//fmt.Print(NAME)
/*********learn02***********/
/*验证import的加载原理
import 的基本原理:
1.如果一个main导入其他包,包将被顺序导入
2.如果导入的包中依赖其他包(包B),会首先导入B包,然后初始化B包中的常量与变量,最后如果B中有init,会自动执行ini();
3.所有包到入完成之后才会对main中常量和变量进行初始化,然后执行main中的init函数(如果存在),最后执行main函数;
4.如果一个包被导入多次则该包只会被导入一次;
注意:我在这里遇到坑
以下两个方法调用一直报错,就是没有办法导入learn02和show02两个包
原因是我的GOPATH路径下没有设置src文件夹,然后还要把当前项目放在src文件夹下
go默认到GOPATH的src路径下去找源码,所以环境还是很重要的
*/
//show02.Show02()
//learn02.Learn02()
/*********learn03***********/
/*
import别名,“.”,"_"
别名操作的含义是:将导入的包命名为另一个容易记忆的别名
点(.)操作的含义是:点(.)标识的包导入后,调用该包中函数时可以省略前缀包名;
下划线(_)操作的含义是:导入该包,但不导入整个包,而是执行该包中的init函数,
因此无法通过包名来调用包中其他的函数。使用下划线(_)操作往往是为了注册包里的引擎,让外部可以方便地使用;
*/
//printer.Print("this is a alias for fmt!")
//Print("this is a . for fmt!")
/*********learn04***********/
/*常量和变量*/
//orange := "橘子"
//fmt.Print(cat)
//fmt.Print(dog)
//fmt.Print(apple)
//fmt.Print(banana)
//fmt.Print(orange)
//fmt.Print("\n")
//fmt.Print(reflect.TypeOf(b))
//fmt.Print("\n")
//fmt.Print(b)
//fmt.Print(c)
//fmt.Print(cLen)
/*********learn05***********/
/*go语言的iota关键字
iota在const关键字出现时将被重置为0
const中新增一行常量声明将使iota计数一次
iota常见使用法:
1,跳值使用法;
2,插队使用法;
3,表达式隐式使用法;
4,单行使用法。
*/
/*fmt.Print("value1:")
fmt.Print(value1)
fmt.Print("\n")
fmt.Print("value2:")
fmt.Print(value2)
fmt.Print("\n")
fmt.Print("value3:")
fmt.Print(value3)
fmt.Print("\n")
//表达式隐式使用法
fmt.Print("value4:")
fmt.Print(value4)
fmt.Print("\n")
fmt.Print("value5:")
fmt.Print(value5)
fmt.Print("\n")
fmt.Print("value6:")
fmt.Print(value6)
fmt.Print("\n")
//单行使用法
fmt.Print("value7:")
fmt.Print(value7)
fmt.Print("\n")
fmt.Print("value8:")
fmt.Print(value8)
fmt.Print("\n")
fmt.Print("value9:")
fmt.Print(value9)
fmt.Print("\n")
fmt.Print("value10:")
fmt.Print(value10)
fmt.Print("\n")
fmt.Print("value11:")
fmt.Print(value11)
fmt.Print("\n")*/
/*********learn06***********/
/*运算符等*/
//只有a++,没有++a
value := 0
value++
//fmt.Print(value)
//if-else
if value > 1 {
fmt.Print("value > 1\n")
} else if value < 7{
fmt.Print("value < 7\n")
} else {
fmt.Print("value >= 7\n")
}
//switch
switch value {
case 1:
fmt.Println("value = 1")
case 2:
fmt.Println("value = 2")
default:
fmt.Println("value = 3")
}
//for
/*for {
fmt.Println("死循环")
}*/
for i:=1; i < 10; i++{
fmt.Println("for...")
//time.Sleep(1*time.Second)
}
array := [] string{"苹果", "香蕉", "雪梨"}
for key, valueNum := range array {
fmt.Print("key的值为:")
fmt.Println(key)
fmt.Print("value的值为:")
fmt.Println(valueNum)
}
//只取得value
for _, valueNum := range array {
fmt.Print("value的值为:")
fmt.Println(valueNum)
}
//goto
goto One
fmt.Println("中间代码块")
One:
fmt.Println("goto代码块")
}
|
package postgres
import (
"context"
"fmt"
"strings"
"github.com/georgysavva/scany/pgxscan"
"github.com/jackc/pgx/v4"
"github.com/odpf/stencil/models"
)
// Repository DB access layer
type Store struct {
db *DB
}
func (r *Store) Close() {
r.db.Close()
}
// ListSnapshots returns list of snapshots.
func (r *Store) ListSnapshots(ctx context.Context, queryFields *models.Snapshot) ([]*models.Snapshot, error) {
var snapshots []*models.Snapshot
var query strings.Builder
var args []interface{}
var conditions []string
query.WriteString(`SELECT * from snapshots`)
if queryFields.Latest {
conditions = append(conditions, "latest=true")
}
if queryFields.Namespace != "" {
conditions = append(conditions, fmt.Sprintf("namespace=$%d", len(args)+1))
args = append(args, queryFields.Namespace)
}
if queryFields.Name != "" {
conditions = append(conditions, fmt.Sprintf("name=$%d", len(args)+1))
args = append(args, queryFields.Name)
}
if queryFields.Version != "" {
conditions = append(conditions, fmt.Sprintf("version=$%d", len(args)+1))
args = append(args, queryFields.Version)
}
if queryFields.ID != 0 {
conditions = append(conditions, fmt.Sprintf("id=$%d", len(args)+1))
args = append(args, queryFields.ID)
}
if len(conditions) > 0 {
condition := strings.Join(conditions, " AND ")
query.WriteString(fmt.Sprintf(` WHERE %s`, condition))
}
err := pgxscan.Select(ctx, r.db, &snapshots, query.String(), args...)
return snapshots, err
}
// UpdateSnapshotLatestVersion returns latest version number
func (r *Store) UpdateSnapshotLatestVersion(ctx context.Context, snapshot *models.Snapshot) error {
return r.db.BeginFunc(ctx, func(t pgx.Tx) error {
var previousLatestSnapshotID int64
err := t.QueryRow(ctx, `SELECT id from snapshots where namespace=$1 and name=$2 and latest=true`, snapshot.Namespace, snapshot.Name).Scan(&previousLatestSnapshotID)
if err != nil && err != pgx.ErrNoRows {
return err
}
_, err = t.Exec(ctx, `UPDATE snapshots set latest=false where id=$1`, previousLatestSnapshotID)
if err != nil {
return err
}
_, err = t.Exec(ctx, `UPDATE snapshots set latest=true where id=$1`, snapshot.ID)
if err != nil {
return err
}
snapshot.Latest = true
return nil
})
}
// GetSnapshotByFields returns full snapshot data
func (r *Store) GetSnapshotByFields(ctx context.Context, namespace, name, version string, latest bool) (*models.Snapshot, error) {
sh := &models.Snapshot{
Namespace: namespace,
Name: name,
}
var query strings.Builder
var args []interface{}
query.WriteString(`SELECT id, version, latest from snapshots where namespace=$1 and name=$2 and latest=$3`)
args = append(args, namespace, name, latest)
if version != "" {
query.WriteString(` and version=$4`)
args = append(args, version)
}
err := r.db.QueryRow(ctx, query.String(), args...).Scan(&sh.ID, &sh.Version, &sh.Latest)
if err == pgx.ErrNoRows {
return sh, models.ErrNotFound
}
return sh, err
}
// GetSnapshotByID get snapshot by ID
func (r *Store) GetSnapshotByID(ctx context.Context, id int64) (*models.Snapshot, error) {
var s models.Snapshot
err := r.db.QueryRow(ctx, `SELECT * FROM snapshots where id=$1`, id).Scan(&s.ID, &s.Namespace, &s.Name, &s.Version, &s.Latest)
if err == pgx.ErrNoRows {
return &s, models.ErrNotFound
}
return &s, err
}
// ExistsSnapshot checks if snapshot exits in DB or not
func (r *Store) ExistsSnapshot(ctx context.Context, st *models.Snapshot) bool {
l, err := r.ListSnapshots(ctx, st)
return err == nil && len(l) > 0
}
// CreateSnapshot inserts snapshot data
func (r *Store) CreateSnapshot(ctx context.Context, snapshot *models.Snapshot) error {
return r.db.QueryRow(ctx, snapshotInsertQuery, snapshot.Namespace, snapshot.Name, snapshot.Version).Scan(&snapshot.ID)
}
// DeleteSnapshot deletes snapshot data
func (r *Store) DeleteSnapshot(ctx context.Context, snapshot *models.Snapshot) error {
_, err := r.db.Exec(ctx, `DELETE from snapshots where namespace=$1 and name=$2 and version=$3`, snapshot.Namespace, snapshot.Name, snapshot.Version)
return err
}
// PutSchema inserts Schema information in DB
func (r *Store) PutSchema(ctx context.Context, snapshot *models.Snapshot, dbFiles []*models.ProtobufDBFile) error {
return r.db.Pool.BeginFunc(ctx, func(t pgx.Tx) error {
err := t.QueryRow(ctx, snapshotInsertQuery, snapshot.Namespace, snapshot.Name, snapshot.Version).Scan(&snapshot.ID)
if err != nil {
return err
}
batch := &pgx.Batch{}
for _, file := range dbFiles {
batch.Queue(fileInsertQuery, snapshot.ID, file.SearchData, file.Data)
}
res := t.SendBatch(ctx, batch)
for i := 0; i < len(dbFiles); i++ {
_, err = res.Exec()
if err != nil {
return err
}
}
err = res.Close()
return err
})
}
// GetSchema Fullycontained descriptorset file given list of fully qualified message names.
// If message names are empty then whole fileDescriptorSet data returned
func (r *Store) GetSchema(ctx context.Context, snapshot *models.Snapshot, names []string) ([][]byte, error) {
var totalData [][]byte
var err error
if len(names) > 0 {
err = pgxscan.Select(ctx, r.db, &totalData, getDataForSpecificMessages, snapshot.ID, names)
} else {
err = pgxscan.Select(ctx, r.db, &totalData, getWholeFDS, snapshot.ID)
}
return totalData, err
}
const fileInsertQuery = `
WITH file_insert(id) as (
INSERT INTO protobuf_files (search_data, data)
VALUES ($2, $3) ON CONFLICT DO NOTHING
RETURNING id
),
file(id) as (
SELECT COALESCE(
(
SELECT id
FROM file_insert
),
(
select id
from protobuf_files
where search_data = $2
and data = $3
)
)
)
INSERT INTO snapshots_protobuf_files(snapshot_id, file_id)
SELECT $1,file.id from file`
const getDataForSpecificMessages = `
WITH files as (
SELECT distinct(jsonb_array_elements_text(pf.search_data->'dependencies'))
from protobuf_files as pf
join snapshots_protobuf_files as spf on pf.id = spf.file_id
join snapshots s on s.id = spf.snapshot_id
WHERE spf.snapshot_id = $1 AND pf.search_data->'messages' ?| $2
)
SELECT pf.data
from protobuf_files as pf
join snapshots_protobuf_files as spf on pf.id = spf.file_id
join snapshots s on s.id = spf.snapshot_id
WHERE spf.snapshot_id = $1 and pf.search_data->>'path' in (select * from files);
`
const getWholeFDS = `
SELECT pf.data
from protobuf_files as pf
join snapshots_protobuf_files as spf on pf.id = spf.file_id
join snapshots s on s.id = spf.snapshot_id
WHERE spf.snapshot_id = $1
`
const snapshotInsertQuery = `
WITH ss(id) as (
INSERT INTO snapshots (namespace, name, version)
VALUES ($1, $2, $3) ON CONFLICT DO NOTHING
RETURNING snapshots.id
)
SELECT COALESCE(
(
select ss.id
from ss
),
(
select id
from snapshots
where namespace = $1
and name = $2
and version = $3
)
)`
|
package main
import (
"context"
"fmt"
"os"
"os/signal"
"syscall"
"github.com/pteich/configstruct"
"github.com/pteich/elastic-query-export/export"
"github.com/pteich/elastic-query-export/flags"
)
var Version string
func main() {
conf := flags.Flags{
ElasticURL: "http://localhost:9200",
ElasticVerifySSL: true,
Index: "logs-*",
Query: "*",
OutFormat: flags.FormatCSV,
Outfile: "output.csv",
ScrollSize: 1000,
Timefield: "@timestamp",
}
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGKILL, syscall.SIGTERM, syscall.SIGINT)
defer cancel()
cmd := configstruct.NewCommand(
"",
"CLI tool to export data from ElasticSearch into a CSV or JSON file. https://github.com/pteich/elastic-query-export",
&conf,
func(c *configstruct.Command, cfg interface{}) error {
export.Run(ctx, cfg.(*flags.Flags))
return nil
},
)
err := cmd.ParseAndRun(os.Args)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
|
package model
import (
"encoding/json"
valuetransaction "github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
)
// ValueTxID is the base58 representation of a transaction ID
type ValueTxID string
func NewValueTxID(id *valuetransaction.ID) ValueTxID {
return ValueTxID(id.String())
}
func (id ValueTxID) MarshalJSON() ([]byte, error) {
return json.Marshal(string(id))
}
func (id *ValueTxID) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
_, err := valuetransaction.IDFromBase58(s)
*id = ValueTxID(s)
return err
}
func (id ValueTxID) ID() valuetransaction.ID {
r, err := valuetransaction.IDFromBase58(string(id))
if err != nil {
panic(err)
}
return r
}
|
package generator
import (
"bytes"
"fmt"
"go/format"
"io/ioutil"
"log"
"path/filepath"
"testing"
"github.com/frk/compare"
"github.com/frk/gosql/internal/analysis"
"github.com/frk/gosql/internal/config"
"github.com/frk/gosql/internal/postgres"
"github.com/frk/gosql/internal/search"
)
func TestGenerator(t *testing.T) {
db := &postgres.TestDB{}
if err := db.Init(); err != nil {
panic(err)
}
defer db.Close()
type testcase struct {
filename string
withCfg func(*config.Config)
}
tests := []struct {
skip bool
dirname string
testcases []testcase
}{{
//skip: true,
dirname: "delete",
testcases: []testcase{
{filename: "all_directive"},
{filename: "datatype_1"},
{filename: "datatype_2"},
{filename: "filter"},
{filename: "result_iterator_afterscan"},
{filename: "result_iterator_errorhandler"},
{filename: "result_iterator_errorinfohandler"},
{filename: "result_iterator"},
{filename: "result_single_afterscan"},
{filename: "result_single_errorhandler"},
{filename: "result_single_errorinfohandler"},
{filename: "result_single"},
{filename: "result_slice_afterscan"},
{filename: "result_slice"},
{filename: "returning_iterator_afterscan"},
{filename: "returning_iterator"},
{filename: "returning_single_afterscan"},
{filename: "returning_single_all"},
{filename: "returning_single_collist"},
{filename: "returning_slice_all"},
{filename: "returning_slice_afterscan"},
{filename: "returning_slice_collist"},
{filename: "returning_slice_context"},
{filename: "returning_slice_errorhandler"},
{filename: "returning_slice_errorinfohandler"},
{filename: "rowsaffected"},
{filename: "rowsaffected_errorhandler"},
{filename: "rowsaffected_errorinfohandler"},
{filename: "using_join_block_1"},
{filename: "using_join_block_2"},
{filename: "where_block_1"},
{filename: "where_block_2", withCfg: func(cfg *config.Config) {
cfg.MethodName.Value = "ExecQuery"
}},
},
}, {
//skip: true,
dirname: "select",
testcases: []testcase{
{filename: "afterscan_single"},
{filename: "afterscan_slice"},
{filename: "coalesce_table"},
{filename: "count_basic"},
{filename: "count_filter", withCfg: func(cfg *config.Config) {
cfg.MethodWithContext.Value = true
}},
{filename: "count_where"},
{filename: "exists_filter"},
{filename: "exists_where"},
{filename: "iterator_func"},
{filename: "iterator_func_errorhandler"},
{filename: "iterator_iface"},
{filename: "filter_slice"},
{filename: "filter_iterator"},
{filename: "joinblock_slice"},
{filename: "limit_directive"},
{filename: "limit_field_default"},
{filename: "limit_field"},
{filename: "notexists_where"},
{filename: "notexists_filter"},
{filename: "offset_directive"},
{filename: "offset_field_default"},
{filename: "offset_field"},
{filename: "orderby_directive"},
{filename: "record_nested_single"},
{filename: "record_nested_slice"},
{filename: "whereblock_array_comparison1"},
{filename: "whereblock_array_comparison2"},
{filename: "whereblock_array_comparison3"},
{filename: "whereblock_between"},
{filename: "whereblock_isin"},
{filename: "whereblock_isin2"},
{filename: "whereblock_isin3"},
{filename: "whereblock_modifierfunc_single"},
{filename: "whereblock_nested"},
{filename: "whereblock_single"},
{filename: "whereblock_single2"},
{filename: "whereblock_slice"},
},
}, {
//skip: true,
dirname: "insert",
testcases: []testcase{
{filename: "basic_single"},
{filename: "basic_single2"},
{filename: "basic_slice"},
{filename: "default_all_returning_single"},
{filename: "default_all_returning_slice"},
{filename: "default_all_single"},
{filename: "default_all_slice"},
{filename: "default_single"},
{filename: "default_slice"},
{filename: "json_single"},
{filename: "json_slice"},
{filename: "onconflict_column_ignore_single_1"},
{filename: "onconflict_column_ignore_single_2"},
{filename: "onconflict_column_update_single_1"},
{filename: "onconflict_column_update_returning_slice"},
{filename: "onconflict_constraint_ignore_single_1"},
{filename: "onconflict_ignore_single"},
{filename: "onconflict_ignore_slice"},
{filename: "onconflict_index_ignore_single_1"},
{filename: "onconflict_index_ignore_single_2"},
{filename: "onconflict_index_update_single_1"},
{filename: "onconflict_index_update_returning_slice"},
{filename: "result_afterscan_iterator"},
{filename: "result_afterscan_single"},
{filename: "result_afterscan_slice"},
{filename: "result_basic_iterator"},
{filename: "result_basic_single"},
{filename: "result_basic_slice"},
{filename: "result_errorhandler_iterator"},
{filename: "result_errorhandler_single"},
{filename: "result_errorinfohandler_iterator"},
{filename: "result_errorinfohandler_single"},
{filename: "result_json_single"},
{filename: "result_json_slice"},
{filename: "returning_afterscan_single"},
{filename: "returning_afterscan_slice"},
{filename: "returning_all_json_single"},
{filename: "returning_all_json_slice"},
{filename: "returning_all_single"},
{filename: "returning_all_slice"},
{filename: "returning_collist_single"},
{filename: "returning_collist_slice"},
{filename: "returning_context_single"},
{filename: "returning_context_slice"},
{filename: "returning_errorhandler_slice"},
{filename: "returning_errorinfohandler_slice"},
{filename: "rowsaffected_errorhandler_single"},
{filename: "rowsaffected_errorinfohandler_single"},
{filename: "rowsaffected_single"},
},
}, {
//skip: true,
dirname: "update",
testcases: []testcase{
{filename: "all_single"},
{filename: "filter_single"},
{filename: "filter_result_slice"},
{filename: "fromblock_basic_single"},
{filename: "fromblock_join_single"},
{filename: "pkey_composite_single"},
{filename: "pkey_composite_slice"},
{filename: "pkey_single"},
{filename: "pkey_slice"},
{filename: "pkey_returning_all_single"},
{filename: "whereblock_basic_single_1"},
{filename: "whereblock_basic_single_2"},
{filename: "whereblock_result_slice"},
{filename: "whereblock_returning_all_single"},
},
}, {
//skip: true,
dirname: "filter",
testcases: []testcase{
{filename: "alias"},
{filename: "basic"},
{filename: "basic2", withCfg: func(cfg *config.Config) {
cfg.FilterColumnKeyTag.Value = "json"
cfg.FilterColumnKeySeparator.Value = "."
cfg.QuoteIdentifiers.Value = true
}},
{filename: "nested"},
{filename: "embedded", withCfg: func(cfg *config.Config) {
cfg.FilterColumnKeyTag.Value = "json"
cfg.FilterColumnKeySeparator.Value = "."
cfg.QuoteIdentifiers.Value = true
}},
{filename: "textsearch"},
},
}, {
//skip: true,
dirname: "pgsql",
testcases: []testcase{
{filename: "insert_basic"},
{filename: "insert_array"},
},
}}
for _, tt := range tests {
if tt.skip {
continue
}
pkgs, err := search.Search("../testdata/generator/"+tt.dirname, false, nil)
if err != nil {
t.Fatal(err)
}
pkg := pkgs[0]
for _, tc := range tt.testcases {
t.Run(tt.dirname+"/"+tc.filename, func(t *testing.T) {
tinfos := []*postgres.TargetInfo{}
fileprefix := "../testdata/generator/" + tt.dirname + "/" + tc.filename
f, err := getFile(pkg, fileprefix+"_in.go")
if err != nil {
t.Fatal(err)
}
// default config for tests
cfg := config.DefaultConfig
cfg.FilterColumnKeyTag.Value = ""
cfg.FilterColumnKeySeparator.Value = "."
cfg.QuoteIdentifiers.Value = true
if tc.withCfg != nil {
tc.withCfg(&cfg)
}
for _, match := range f.Matches {
// analyze
ainfo, err := analysis.Run(pkg.Fset, match.Named, match.Pos, cfg)
if err != nil {
t.Error(err)
return
}
// type check
targInfo, err := postgres.Check(db.DB, ainfo.Struct, ainfo)
if err != nil {
t.Error(err)
return
}
tinfos = append(tinfos, targInfo)
}
buf := new(bytes.Buffer)
if err := Write(buf, pkg.Name, tinfos, cfg); err != nil {
t.Error(err)
return
}
got := string(formatBytes(buf))
out, err := ioutil.ReadFile(fileprefix + "_out.go")
if err != nil {
t.Fatal(err)
}
want := string(out)
// compare
if err := compare.Compare(got, want); err != nil {
t.Error(err)
}
})
}
}
}
// helper method...
func getFile(p *search.Package, filename string) (*search.File, error) {
filename, err := filepath.Abs(filename)
if err != nil {
return nil, err
}
for _, f := range p.Files {
if f.Path == filename {
return f, nil
}
}
return nil, fmt.Errorf("file not found: %q", filename)
}
func formatBytes(buf *bytes.Buffer) []byte {
src, err := format.Source(buf.Bytes())
if err != nil {
log.Printf("format error: %s", err)
return buf.Bytes()
}
return src
}
|
package fixture
import (
"math"
"time"
)
// @pi
const Pi = 3.14
const StringConstant = "qwer"
// @dao --asdf "val poi" --qwer 654
// @test -r="q w e r"
type X struct {
Val int // @field
SliceVal []string `gorm:"index"`
MapVal map[string]int `json:"map_val"`
}
// @func --name add
func (x *X) Add(y *X) {
x.Val += int(math.Abs(float64(y.Val)))
}
// @func --name copy
func (x X) Copy(y *X) {
y.Val = x.Val
}
// @func --name addtwo --nomethod
func AddTwo(a X, b X) X {
return X{a.Val + b.Val, []string{}, make(map[string]int)}
}
// @interface --name y
type Y interface {
quack()
}
type I int
type S string
type A []int
type Model struct {
ID string
Ptr *int
DeletedAt *time.Time
}
type Z struct {
Model
Stuff string
}
type W struct {
Z
}
type V struct {
W
Z
}
|
package main
import (
"bufio"
"fmt"
"io"
"os"
"regexp"
"strconv"
"strings"
)
type Password struct {
min, max int
substr string
str string
}
func ReadInput(r io.Reader) ([]Password, error) {
scanner := bufio.NewScanner(r)
scanner.Split(bufio.ScanLines)
var passwords []Password
for scanner.Scan() {
password, err := StringConvertPassword(scanner.Text())
if err != nil {
return passwords, err
}
passwords = append(passwords, *password)
}
return passwords, scanner.Err()
}
func StringConvertPassword(s string) (*Password, error) {
re := regexp.MustCompile(`^(\d+)-(\d+) (\w): (\w*)$`)
matches := re.FindStringSubmatch(s)
if len(matches) == 0 {
return nil, fmt.Errorf("\"%v\" does not match the password regex", s)
}
min, _ := strconv.Atoi(matches[1])
max, _ := strconv.Atoi(matches[2])
result := Password{
min: min,
max: max,
substr: matches[3],
str: matches[4],
}
return &result, nil
}
func ValidatePasswordPartOne(en Password) bool {
substrCount := strings.Count(en.str, en.substr)
return substrCount >= en.min && substrCount <= en.max
}
func ValidatePasswordPartTwo(en Password) bool {
var a, b = en.str[en.min-1], en.str[en.max-1]
return (string(a) == en.substr) != (string(b) == en.substr) // xor between booleans is not equals
}
func main() {
file, _ := os.Open("2/input.txt")
input, _ := ReadInput(file)
var countPartOne, countPartTwo int
for _, el := range input {
if ValidatePasswordPartOne(el) {
countPartOne++
}
if ValidatePasswordPartTwo(el) {
countPartTwo++
}
}
fmt.Println("part one", countPartOne, "part two", countPartTwo)
}
|
package oidc_test
import (
"context"
"fmt"
"regexp"
"testing"
"time"
"github.com/ory/fosite"
"github.com/ory/fosite/token/hmac"
"github.com/stretchr/testify/assert"
"github.com/authelia/authelia/v4/internal/configuration/schema"
"github.com/authelia/authelia/v4/internal/oidc"
)
func TestHMACCoreStrategy(t *testing.T) {
goodsecret := []byte("R7VCSUfnKc7Y5zE84q6GstYqfMGjL4wM")
secreta := []byte("a")
config := &oidc.Config{
TokenEntropy: 10,
GlobalSecret: secreta,
Lifespans: schema.IdentityProvidersOpenIDConnectLifespanToken{
AccessToken: time.Hour,
RefreshToken: time.Hour,
AuthorizeCode: time.Minute,
},
}
strategy := &oidc.HMACCoreStrategy{
Enigma: &hmac.HMACStrategy{Config: config},
Config: config,
}
var (
token, signature string
err error
)
ctx := context.Background()
token, signature, err = strategy.GenerateAuthorizeCode(ctx, &fosite.Request{})
assert.EqualError(t, err, "secret for signing HMAC-SHA512/256 is expected to be 32 byte long, got 1 byte")
assert.Empty(t, token)
assert.Empty(t, signature)
config.GlobalSecret = goodsecret
token, signature, err = strategy.GenerateAuthorizeCode(ctx, &fosite.Request{})
assert.NoError(t, err)
assert.NotEmpty(t, token)
assert.NotEmpty(t, signature)
assert.Equal(t, signature, strategy.AuthorizeCodeSignature(ctx, token))
assert.Regexp(t, regexp.MustCompile(`^authelia_ac_`), token)
assert.NoError(t, strategy.ValidateAuthorizeCode(ctx, &fosite.Request{RequestedAt: time.Now(), Session: &fosite.DefaultSession{}}, token))
assert.NoError(t, strategy.ValidateAuthorizeCode(ctx, &fosite.Request{RequestedAt: time.Now(), Session: &fosite.DefaultSession{}}, token))
assert.EqualError(t, strategy.ValidateAuthorizeCode(ctx, &fosite.Request{RequestedAt: time.Now().Add(time.Hour * -2400), Session: &fosite.DefaultSession{}}, token), "invalid_token")
assert.NoError(t, strategy.ValidateAuthorizeCode(ctx, &fosite.Request{RequestedAt: time.Now().Add(time.Hour * -2400), Session: &fosite.DefaultSession{ExpiresAt: map[fosite.TokenType]time.Time{fosite.AuthorizeCode: time.Now().Add(100 * time.Hour)}}}, token))
assert.EqualError(t, strategy.ValidateAuthorizeCode(ctx, &fosite.Request{RequestedAt: time.Now(), Session: &fosite.DefaultSession{ExpiresAt: map[fosite.TokenType]time.Time{fosite.AuthorizeCode: time.Now().Add(-100 * time.Second)}}}, token), "invalid_token")
token, signature, err = strategy.GenerateRefreshToken(ctx, &fosite.Request{})
assert.NoError(t, err)
assert.NotEmpty(t, token)
assert.NotEmpty(t, signature)
assert.Equal(t, signature, strategy.RefreshTokenSignature(ctx, token))
assert.Regexp(t, regexp.MustCompile(`^authelia_rt_`), token)
assert.NoError(t, strategy.ValidateRefreshToken(ctx, &fosite.Request{RequestedAt: time.Now(), Session: &fosite.DefaultSession{}}, token))
assert.NoError(t, strategy.ValidateRefreshToken(ctx, &fosite.Request{RequestedAt: time.Now(), Session: &fosite.DefaultSession{}}, token))
assert.NoError(t, strategy.ValidateRefreshToken(ctx, &fosite.Request{RequestedAt: time.Now().Add(time.Hour * -2400), Session: &fosite.DefaultSession{ExpiresAt: map[fosite.TokenType]time.Time{fosite.RefreshToken: time.Now().Add(100 * time.Hour)}}}, token))
assert.EqualError(t, strategy.ValidateRefreshToken(ctx, &fosite.Request{RequestedAt: time.Now(), Session: &fosite.DefaultSession{ExpiresAt: map[fosite.TokenType]time.Time{fosite.RefreshToken: time.Now().Add(-100 * time.Second)}}}, token), "invalid_token")
token, signature, err = strategy.GenerateAccessToken(ctx, &fosite.Request{})
assert.NoError(t, err)
assert.NotEmpty(t, token)
assert.NotEmpty(t, signature)
assert.Equal(t, signature, strategy.AccessTokenSignature(ctx, token))
assert.Regexp(t, regexp.MustCompile(`^authelia_at_`), token)
assert.NoError(t, strategy.ValidateAccessToken(ctx, &fosite.Request{RequestedAt: time.Now(), Session: &fosite.DefaultSession{}}, token))
assert.NoError(t, strategy.ValidateAccessToken(ctx, &fosite.Request{RequestedAt: time.Now(), Session: &fosite.DefaultSession{}}, token))
assert.EqualError(t, strategy.ValidateAccessToken(ctx, &fosite.Request{RequestedAt: time.Now().Add(time.Hour * -2400), Session: &fosite.DefaultSession{}}, token), "invalid_token")
assert.NoError(t, strategy.ValidateAccessToken(ctx, &fosite.Request{RequestedAt: time.Now().Add(time.Hour * -2400), Session: &fosite.DefaultSession{ExpiresAt: map[fosite.TokenType]time.Time{fosite.AccessToken: time.Now().Add(100 * time.Hour)}}}, token))
assert.EqualError(t, strategy.ValidateAccessToken(ctx, &fosite.Request{RequestedAt: time.Now(), Session: &fosite.DefaultSession{ExpiresAt: map[fosite.TokenType]time.Time{fosite.AccessToken: time.Now().Add(-100 * time.Second)}}}, token), "invalid_token")
badconfig := &BadGlobalSecretConfig{
Config: config,
}
badstrategy := &oidc.HMACCoreStrategy{
Enigma: &hmac.HMACStrategy{Config: badconfig},
Config: badconfig,
}
token, signature, err = badstrategy.GenerateRefreshToken(ctx, &fosite.Request{})
assert.Equal(t, "", token)
assert.Equal(t, "", signature)
assert.EqualError(t, oidc.ErrorToDebugRFC6749Error(err), "bad secret")
token, signature, err = badstrategy.GenerateAccessToken(ctx, &fosite.Request{})
assert.Equal(t, "", token)
assert.Equal(t, "", signature)
assert.EqualError(t, oidc.ErrorToDebugRFC6749Error(err), "bad secret")
token, signature, err = badstrategy.GenerateAuthorizeCode(ctx, &fosite.Request{})
assert.Equal(t, "", token)
assert.Equal(t, "", signature)
assert.EqualError(t, oidc.ErrorToDebugRFC6749Error(err), "bad secret")
}
type BadGlobalSecretConfig struct {
*oidc.Config
}
func (*BadGlobalSecretConfig) GetGlobalSecret(ctx context.Context) ([]byte, error) {
return nil, fmt.Errorf("bad secret")
}
|
package templatecode
import (
"fmt"
"os"
"regexp"
"strings"
)
// CreateController 创建文件
// name: 文件名称
// path: 文件所在文件夹路径
func CreateController(name, path string) {
create(name, path, 1)
}
// CreateServices 创建文件
// name: 文件名称
// path: 文件所在文件夹路径
func CreateServices(name, path string) {
create(name, path, 2)
}
// create 创建文件
// name: 文件名称
// path: 文件所在文件夹路径
// fType: 所创建文件类型 1-controller 2-services
func create(name, path string, fType int) {
var filePath = path + "/" + name + ".go"
var f1, er = os.OpenFile(filePath, os.O_APPEND|os.O_CREATE, 0666)
if er != nil {
fmt.Println("openfile__er打开文件失败:", er)
return
}
defer f1.Close()
var fileType = ""
var target = ""
var targetChild = ""
switch fType {
case 1: //controllers
fileType = "controllers"
target = initName(name) + "Controller"
targetChild = "BaseController"
case 2: //services
fileType = "services"
target = initName(name) + "Services"
targetChild = ""
}
var result = replaceType(fileType, getTemplateString())
result = replaceTarget(target, result)
result = replaceParents(targetChild, result)
n, err := f1.WriteString(result)
fmt.Println("文件写入:----------", n, ",", err)
}
// initName 初始化Name(首字母大写,下划线+小写字母替换成大写字母)
// name: 表名
// return: string 返回初始化后的名称
func initName(name string) string {
var reg = regexp.MustCompile(`^([a-z])|(_[a-z])+`)
var n = reg.ReplaceAllStringFunc(name, strings.ToUpper)
reg = regexp.MustCompile(`(_)+`)
return reg.ReplaceAllString(n, "")
}
// replaceType 替换template的包名
// typeName: 包名
// strModel: template字符串内容
// return: string-替换后的数据
func replaceType(typeName, strModel string) string {
var reg = regexp.MustCompile(`filetype`)
return reg.ReplaceAllString(strModel, typeName)
}
// replaceTarget 替换template的结构体名称
// targetName: 结构体名称
// strModel: template字符串内容
// return: string-替换后的数据
func replaceTarget(targetName, strModel string) string {
var reg = regexp.MustCompile(`target`)
return reg.ReplaceAllString(strModel, initName(targetName))
}
// replaceChild 替换template的结构体所继承的结构体名称
// name: 结构体名称
// strModel: template字符串内容
// return: string-替换后的数据
func replaceParents(name, strModel string) string {
var reg = regexp.MustCompile(`child`)
return reg.ReplaceAllString(strModel, initName(name))
}
// getTemplateString 获取模版字符串
func getTemplateString() string {
return templateController
}
|
package config
import (
"bytes"
"os"
"github.com/BurntSushi/toml"
"github.com/golang/glog"
"sub_account_service/finance/lib"
)
type zhifubaoConfig struct {
AlipayAppID string
AlipayUrl string
AlipayPrivateKey string
AlipayPublicKey string
Format string
Charset string
SignType string
GoodsType string
AlipaySeller string
RefundUrl string
}
//private
var zfbConfig = zhifubaoConfig{}
func ZfbConfig() zhifubaoConfig {
return zfbConfig
}
func init() {
file := "../conf/zhifubao_config.toml"
glog.Infoln(lib.Log("initing", "", "finding config ..."), file)
// 如果配置文件不存在
if _, err := os.Stat(file); os.IsNotExist(err) {
buf := new(bytes.Buffer)
if err := toml.NewEncoder(buf).Encode(Opts()); err != nil {
glog.Infoln("如果配置文件不存在 ...")
}
glog.Infoln("没有找到配置文件,创建新文件 ...")
}
var conf zhifubaoConfig
_, err := toml.DecodeFile(file, &conf)
if err != nil {
glog.Infoln("DecodeFile Error ...", err)
}
zfbConfig = conf
glog.Infoln(lib.Log("initing", "", "config.zhifubaoConfig()"), zfbConfig)
}
|
package size
import (
"fmt"
"strconv"
)
// Count in byte(8bits)
type Size int64
var Measure = Size(1 << 10)
var Precision = 1
var HaveSpace = true
// 1TB = 1024 GB = 1024*1024
func (s *Size) String() string {
if *s <0 {
return "unknown"
}
p := strconv.Itoa(Precision)
sp := ""
f := "%ciB"
if Measure == 1000 {
f = "%cB"
}
if HaveSpace {
sp = " "
}
if *s < Measure {
return fmt.Sprintf("%d"+sp+f, *s)
}
div, exp := Measure, 0
for n := *s / Measure; n >= Measure; n /= Measure {
div *= Measure
exp++
}
return fmt.Sprintf("%."+p+"f"+sp+f, float64(*s)/float64(div), "KMGTPE"[exp])
}
|
package pack
import (
"bytes"
"crypto"
"crypto/rsa"
"crypto/sha256"
"encoding/binary"
"encoding/json"
"fmt"
"math"
"time"
"github.com/syndtr/goleveldb/leveldb"
)
const DIFF = 3
// minning block moi
func Mine(tran []Transaction, phash [32]byte) Block {
var n int64 = 0
Mroot := MakeMRoot(tran)
time := time.Now().Unix()
var Hash [32]byte
for {
Head := bytes.Join([][]byte{Convert(time), Mroot[0:32], phash[0:32], Convert(n)}, []byte{})
Hash = sha256.Sum256(Head)
if CheckPOW(Hash, DIFF) {
block := Block{
time,
tran,
Hash,
phash,
n,
Mroot,
}
return block
}
n++
}
}
//xac thuc transactionpool cha lai cac transaction hop le
func CheckTranPool(tran []Transaction, db *leveldb.DB) []Transaction {
heigh, _ := db.Get([]byte("heigh"), nil)
h := int64(binary.BigEndian.Uint64(heigh))
var i int64
chain := make([]Block, h+1)
for i = 0; i < h+1; i++ {
b, _ := db.Get(Convert(i), nil)
c, _ := db.Get(b, nil)
json.Unmarshal(c, &chain[i])
}
lenTran := len(tran)
TranResult := make([]Transaction, 0)
CoinIn := make([]int64, lenTran)
CoinOut := make([]int64, lenTran)
for i = 0; i < h+1; i++ {
n := len(chain[i].Data)
for j := 0; j < n; j++ {
for k := 0; k < lenTran; k++ {
if chain[i].Data[j].ReceiveAdd == tran[k].SendAdd {
CoinIn[k] = CoinIn[k] + chain[i].Data[j].Count
} else {
if chain[i].Data[j].SendAdd == tran[k].SendAdd {
CoinOut[k] = CoinOut[k] + chain[i].Data[j].Count
}
}
}
}
}
admine := []byte("FdKmobTRmXbKlfrcDdDQZHLsbmjhjQbs")
var addr [32]byte
copy(addr[:], admine[0:32])
tranbase := Transaction{
[32]byte{},
addr,
10,
nil,
rsa.PublicKey{},
}
TranResult = append(TranResult, tranbase)
for k := 0; k < lenTran; k++ {
if (CoinIn[k] - CoinOut[k]) > tran[k].Count {
TranResult = append(TranResult, tran[k])
}
}
return TranResult
}
// kiem tra tinh hop le cua gia tri bam voi do kho cua thuat toan POW
func CheckPOW(b [32]byte, diff int) bool {
for i := 0; i < diff; i++ {
if uint8(b[i]) != 0 {
return false
}
}
return true
}
// kiem tra tinh hop le cua mot transaction
func CheckTran(tran Transaction, db *leveldb.DB) bool {
mid := bytes.Join([][]byte{tran.SendAdd[0:32], tran.ReceiveAdd[0:32], Convert(tran.Count)}, []byte{})
hash := sha256.Sum256(mid)
pub, err := json.Marshal(tran.Publickey)
ad := sha256.Sum256(pub)
for i := 0; i < 32; i++ {
if ad[i] < 65 || ad[i] > 122 || (90 < ad[i] && ad[i] < 97) {
if 26 <= byte(math.Mod(float64(ad[i]), 52)) && byte(math.Mod(float64(ad[i]), 52)) <= 31 {
ad[i] = byte(math.Mod(float64(ad[i]), 52)) + 65 + 10
} else {
ad[i] = byte(math.Mod(float64(ad[i]), 52)) + 65
}
}
}
if ad != tran.SendAdd {
return false
}
err = rsa.VerifyPSS(&tran.Publickey, crypto.SHA256, hash[:], tran.Signature, nil)
if err != nil {
return false
}
heigh, _ := db.Get([]byte("heigh"), nil)
h := int64(binary.BigEndian.Uint64(heigh))
var i int64
chain := make([]Block, h+1)
for i = 0; i < h+1; i++ {
b, err := db.Get(Convert(i), nil)
if err != nil {
fmt.Println("loi doc du lieu tu db minning--> CheckTran() :", err)
return false
}
c, err := db.Get(b, nil)
if err != nil {
fmt.Println("loi doc du lieu tu db minning--> CheckTran() :", err)
return false
}
json.Unmarshal(c, &chain[i])
}
CoinIn := int64(0)
CoinOut := int64(0)
for i = 0; i < h+1; i++ {
n := len(chain[i].Data)
for j := 0; j < n; j++ {
if chain[i].Data[j].ReceiveAdd == tran.SendAdd {
CoinIn = CoinIn + chain[i].Data[j].Count
} else {
if chain[i].Data[j].SendAdd == tran.SendAdd {
CoinOut = CoinOut + chain[i].Data[j].Count
}
}
}
}
if (CoinIn - CoinOut) > tran.Count {
return true
} else {
return false
}
}
|
package LinkedList
type Node struct {
data interface{}
next *Node
}
func (n *Node) Next() *Node {
return n.next
}
|
package bom
// BOM Mongodb Mongo builder of (go.mongodb.org/mongo-driver)
import (
"context"
"encoding/json"
"fmt"
"math"
"reflect"
"strings"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
type (
Bom struct {
client *mongo.Client
dbName string
dbCollection string
queryTimeout time.Duration
condition interface{}
skipWhenUpdating map[string]bool
whereConditions []map[string]interface{}
orConditions []map[string]interface{}
inConditions []map[string]interface{}
notInConditions []map[string]interface{}
notConditions []map[string]interface{}
aggregateOptions []*options.AggregateOptions
updateOptions []*options.UpdateOptions
insertOptions []*options.InsertOneOptions
findOneOptions []*options.FindOneOptions
findOneAndUpdateOptions []*options.FindOneAndUpdateOptions
pagination *Pagination
limit *Limit
sort []*Sort
lastId string
useAggrigate bool
selectArg []interface{}
}
Pagination struct {
TotalCount int32
TotalPages int32
CurrentPage int32
Size int32
}
Sort struct {
Field string
Type string
}
Limit struct {
Page int32
Size int32
}
Size int32
Option func(*Bom) error
ElemMatch struct {
Key string
Val interface{}
}
)
const (
DefaultQueryTimeout = 5 * time.Second
DefaultSize = 20
)
var (
mType = map[string]int32{"asc": 1, "desc": -1}
skipWhenUpdating = map[string]bool{"id": true, "createdat": true, "updatedat": true}
)
func New(options ...Option) (*Bom, error) {
b := &Bom{
queryTimeout: DefaultQueryTimeout,
pagination: &Pagination{
Size: DefaultSize,
CurrentPage: 1,
},
skipWhenUpdating: skipWhenUpdating,
limit: &Limit{Page: 1, Size: DefaultSize},
}
for _, option := range options {
if err := option(b); err != nil {
return nil, err
}
}
if b.client == nil {
return nil, fmt.Errorf("mondodb client is required")
}
return b, nil
}
func ElMatch(key string, val interface{}) ElemMatch {
return ElemMatch{Key: key, Val: val}
}
func ToObj(id string) primitive.ObjectID {
objectID, _ := primitive.ObjectIDFromHex(id)
return objectID
}
func ToObjects(ids []string) []primitive.ObjectID {
var objectIds []primitive.ObjectID
for _, id := range ids {
objectId, _ := primitive.ObjectIDFromHex(id)
objectIds = append(objectIds, objectId)
}
return objectIds
}
func SetMongoClient(client *mongo.Client) Option {
return func(b *Bom) error {
b.client = client
return nil
}
}
func SetDatabaseName(dbName string) Option {
return func(b *Bom) error {
b.dbName = dbName
return nil
}
}
func SetSkipWhenUpdating(fieldsMap map[string]bool) Option {
return func(b *Bom) error {
b.skipWhenUpdating = fieldsMap
return nil
}
}
func SetCollection(collection string) Option {
return func(b *Bom) error {
b.dbCollection = collection
return nil
}
}
func SetQueryTimeout(time time.Duration) Option {
return func(b *Bom) error {
b.queryTimeout = time
return nil
}
}
func (b *Bom) WithDB(dbName string) *Bom {
b.dbName = dbName
return b
}
func (b *Bom) WithColl(collection string) *Bom {
b.dbCollection = collection
return b
}
func (b *Bom) WithTimeout(time time.Duration) *Bom {
b.queryTimeout = time
return b
}
func (b *Bom) WithCondition(condition interface{}) *Bom {
b.condition = condition
return b
}
func (b *Bom) WithLimit(limit *Limit) *Bom {
if limit.Page > 0 {
b.limit.Page = limit.Page
}
if limit.Size > 0 {
b.limit.Size = limit.Size
}
return b
}
func (b *Bom) WithLastId(lastId string) *Bom {
b.lastId = lastId
return b
}
func (b *Bom) WithSort(sort *Sort) *Bom {
b.sort = append(b.sort, sort)
return b
}
func (b *Bom) WithSize(size int32) *Bom {
if size > 0 {
b.limit.Size = size
}
return b
}
//Deprecated: should use WhereConditions or WhereEq
func (b *Bom) Where(field string, value interface{}) *Bom {
b = b.WhereConditions(field, "=", value)
return b
}
func (b *Bom) WhereEq(field string, value interface{}) *Bom {
b = b.WhereConditions(field, "=", value)
return b
}
func (b *Bom) WhereNotEq(field string, value interface{}) *Bom {
b = b.WhereConditions(field, "!=", value)
return b
}
func (b *Bom) WhereGt(field string, value interface{}) *Bom {
b = b.WhereConditions(field, ">", value)
return b
}
func (b *Bom) WhereGte(field string, value interface{}) *Bom {
b = b.WhereConditions(field, ">=", value)
return b
}
func (b *Bom) WhereLt(field string, value interface{}) *Bom {
b = b.WhereConditions(field, "<", value)
return b
}
func (b *Bom) WhereLte(field string, value interface{}) *Bom {
b = b.WhereConditions(field, "<=", value)
return b
}
func (b *Bom) AddSelect(arg interface{}) *Bom {
b.useAggrigate = true
b.selectArg = append(b.selectArg, arg)
return b
}
func (b *Bom) Select(arg ...interface{}) *Bom {
b.useAggrigate = true
b.selectArg = arg
return b
}
func (b *Bom) WhereConditions(field string, conditions string, value interface{}) *Bom {
switch conditions {
case ">":
b.whereConditions = append(b.whereConditions, map[string]interface{}{"field": field, "value": primitive.D{{Key: "$gt", Value: value}}})
case ">=":
b.whereConditions = append(b.whereConditions, map[string]interface{}{"field": field, "value": primitive.D{{Key: "$gte", Value: value}}})
case "<":
b.whereConditions = append(b.whereConditions, map[string]interface{}{"field": field, "value": primitive.D{{Key: "$lt", Value: value}}})
case "<=":
b.whereConditions = append(b.whereConditions, map[string]interface{}{"field": field, "value": primitive.D{{Key: "$lte", Value: value}}})
case "!=":
b.whereConditions = append(b.whereConditions, map[string]interface{}{"field": field, "value": primitive.D{{Key: "$ne", Value: value}}})
default:
b.whereConditions = append(b.whereConditions, map[string]interface{}{"field": field, "value": value})
}
return b
}
func (b *Bom) OrWhereConditions(field string, conditions string, value interface{}) *Bom {
switch conditions {
case ">":
b.orConditions = append(b.orConditions, map[string]interface{}{"field": field, "value": primitive.D{{Key: "$gt", Value: value}}})
case ">=":
b.orConditions = append(b.orConditions, map[string]interface{}{"field": field, "value": primitive.D{{Key: "$gte", Value: value}}})
case "<":
b.orConditions = append(b.orConditions, map[string]interface{}{"field": field, "value": primitive.D{{Key: "$lt", Value: value}}})
case "<=":
b.orConditions = append(b.orConditions, map[string]interface{}{"field": field, "value": primitive.D{{Key: "$lte", Value: value}}})
default:
b.orConditions = append(b.orConditions, map[string]interface{}{"field": field, "value": value})
}
return b
}
func (b *Bom) SetUpdateOptions(opts ...*options.UpdateOptions) *Bom {
for _, value := range opts {
b.updateOptions = append(b.updateOptions, value)
}
return b
}
func (b *Bom) SetAggrigateOptions(opts ...*options.AggregateOptions) *Bom {
b.aggregateOptions = opts
return b
}
func (b *Bom) SetInsertOptions(opts ...*options.InsertOneOptions) *Bom {
for _, value := range opts {
b.insertOptions = append(b.insertOptions, value)
}
return b
}
func (b *Bom) SetFindOneOptions(opts ...*options.FindOneOptions) *Bom {
for _, value := range opts {
b.findOneOptions = append(b.findOneOptions, value)
}
return b
}
func (b *Bom) SetFindOnEndUpdateOptions(opts ...*options.FindOneAndUpdateOptions) *Bom {
for _, value := range opts {
b.findOneAndUpdateOptions = append(b.findOneAndUpdateOptions, value)
}
return b
}
func (b *Bom) OrWhereEq(field string, value interface{}) *Bom {
b = b.OrWhereConditions(field, "=", value)
return b
}
func (b *Bom) OrWhereGt(field string, value interface{}) *Bom {
b = b.OrWhereConditions(field, ">", value)
return b
}
func (b *Bom) OrWhereGte(field string, value interface{}) *Bom {
b = b.OrWhereConditions(field, ">=", value)
return b
}
func (b *Bom) OrWhereLt(field string, value interface{}) *Bom {
b = b.OrWhereConditions(field, "<", value)
return b
}
func (b *Bom) OrWhereLte(field string, value interface{}) *Bom {
b = b.OrWhereConditions(field, "<=", value)
return b
}
func (b *Bom) Not(field string, value interface{}) *Bom {
b.notConditions = append(b.notConditions, map[string]interface{}{"field": field, "value": value})
return b
}
func (b *Bom) InWhere(field string, value interface{}) *Bom {
b.inConditions = append(b.inConditions, map[string]interface{}{"field": field, "value": value})
return b
}
func (b *Bom) NotInWhere(field string, value interface{}) *Bom {
b.notInConditions = append(b.notInConditions, map[string]interface{}{"field": field, "value": value})
return b
}
//Deprecated: should use OrWhereConditions or OrWhereEq
func (b *Bom) OrWhere(field string, value interface{}) *Bom {
b.OrWhereEq(field, value)
return b
}
func (b *Bom) buildProjection() (interface{}, bool) {
var result = make(primitive.M)
for _, item := range b.selectArg {
switch v := item.(type) {
case string:
result[v] = 1
case ElemMatch:
if vo, ok := v.Val.(ElemMatch); ok {
var sub = make(primitive.M)
sub["$elemMatch"] = primitive.M{vo.Key: vo.Val}
result[v.Key] = sub
}
}
}
if len(result) > 0 {
return result, true
}
return nil, false
}
func (b *Bom) buildCondition() interface{} {
result := make(primitive.M)
if len(b.whereConditions) > 0 {
var query []primitive.M
for _, cnd := range b.whereConditions {
field := cnd["field"]
value := cnd["value"]
query = append(query, primitive.M{field.(string): value})
}
result["$and"] = query
}
if len(b.orConditions) > 0 {
var query []primitive.M
for _, cnd := range b.orConditions {
field := cnd["field"]
value := cnd["value"]
query = append(query, primitive.M{field.(string): value})
}
result["$or"] = query
}
if len(b.inConditions) > 0 {
for _, cnd := range b.inConditions {
field := cnd["field"]
value := cnd["value"]
result[field.(string)] = primitive.M{"$in": value}
}
}
if len(b.notInConditions) > 0 {
for _, cnd := range b.notInConditions {
field := cnd["field"]
value := cnd["value"]
result[field.(string)] = primitive.M{"$nin": value}
}
}
return result
}
func (b *Bom) Mongo() *mongo.Collection {
return b.client.Database(b.dbName).Collection(b.dbCollection)
}
func (b *Bom) getTotalPages() int32 {
d := float64(b.pagination.TotalCount) / float64(b.pagination.Size)
if d < 0 {
d = 1
}
return int32(math.Ceil(d))
}
func (b *Bom) getPagination(total int32, page int32, size int32) *Pagination {
b.pagination.TotalCount = total
if page > 0 {
b.pagination.CurrentPage = page
}
if size > 0 {
b.pagination.Size = size
}
b.pagination.TotalPages = b.getTotalPages()
return b.pagination
}
func (b *Bom) readFieldName(f reflect.StructField) string {
val, ok := f.Tag.Lookup("json")
if !ok {
return strings.ToLower(f.Name)
}
opts := strings.Split(val, ",")
return strings.ToLower(opts[0])
}
func (b *Bom) structToMap(i interface{}) (map[string]interface{}, error) {
result := make(map[string]interface{})
v := reflect.ValueOf(i)
t := v.Type()
if t.Kind() != reflect.Struct {
return result, fmt.Errorf("type %s is not supported", t.Kind())
}
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.PkgPath != "" {
continue
}
if val, exist := f.Tag.Lookup("update"); exist {
if strings.ToLower(val) != "true" {
continue
}
} else {
continue
}
fv := v.Field(i)
key := b.readFieldName(f)
tp := fv.Type().String()
value := fv.Interface()
switch tp {
case "string":
value = fv.String()
if fv.String() == "" {
continue
}
case "interface {}":
value = fv.Interface()
case "int", "int8", "int16", "int32", "int64":
value = fv.Int()
case "float64", "float32":
value = fv.Float()
}
if _, ok := b.skipWhenUpdating[key]; !ok {
result[key] = value
}
}
return result, nil
}
func (b *Bom) calculateOffset(page, size int32) (limit int32, offset int32) {
limit = b.limit.Size
if page == 0 {
page = 1
}
if size > 0 {
limit = size
}
o := float64(page-1) * float64(limit)
offset = int32(math.Ceil(o))
return
}
func (b *Bom) getSort(sorts []*Sort) (map[string]interface{}, bool) {
sortMap := make(map[string]interface{})
if len(sorts) > 0 {
for _, sort := range sorts {
if len(sort.Field) > 0 {
sortMap[strings.ToLower(sort.Field)] = 1
if len(sort.Type) > 0 {
if val, ok := mType[strings.ToLower(sort.Type)]; ok {
sortMap[strings.ToLower(sort.Field)] = val
}
}
return sortMap, true
}
}
}
return sortMap, false
}
func (b *Bom) getCondition() interface{} {
if b.condition != nil {
return b.condition
}
bc := b.buildCondition()
if bc != nil {
if val, ok := bc.(primitive.M); ok {
return val
}
}
return primitive.M{}
}
//Deprecated: method works not correctly user bom generator (https://github.com/cjp2600/protoc-gen-bom)
func (b *Bom) Update(entity interface{}) (*mongo.UpdateResult, error) {
mp, _ := b.structToMap(entity)
var eRes []primitive.E
if len(mp) > 0 {
for key, val := range mp {
if val != nil {
eRes = append(eRes, primitive.E{Key: key, Value: val})
}
}
}
upResult := primitive.D{
{"$set", eRes},
{"$currentDate", primitive.D{{"updatedat", true}}},
}
return b.UpdateRaw(upResult)
}
func (b *Bom) UpdateRaw(update interface{}) (*mongo.UpdateResult, error) {
ctx, _ := context.WithTimeout(context.Background(), DefaultQueryTimeout)
res, err := b.Mongo().UpdateOne(ctx, b.getCondition(), update, b.updateOptions...)
return res, err
}
func (b *Bom) InsertOne(document interface{}) (*mongo.InsertOneResult, error) {
ctx, _ := context.WithTimeout(context.Background(), DefaultQueryTimeout)
return b.Mongo().InsertOne(ctx, document, b.insertOptions...)
}
func (b *Bom) ConvertJsonToBson(document interface{}) (interface{}, error) {
bytes, err := json.Marshal(document)
if err != nil {
return nil, err
}
var bsonDocument interface{}
err = bson.UnmarshalExtJSON(bytes, true, &bsonDocument)
if err != nil {
return nil, err
}
return bsonDocument, nil
}
func (b *Bom) InsertMany(documents []interface{}) (*mongo.InsertManyResult, error) {
ctx, _ := context.WithTimeout(context.Background(), DefaultQueryTimeout)
var bsonDocuments []interface{}
for _, document := range documents {
bsonDocuments = append(bsonDocuments, document)
}
return b.Mongo().InsertMany(ctx, documents)
}
func (b *Bom) FindOne(callback func(s *mongo.SingleResult) error) error {
ctx, _ := context.WithTimeout(context.Background(), DefaultQueryTimeout)
s := b.Mongo().FindOne(ctx, b.getCondition(), b.findOneOptions...)
return callback(s)
}
func (b *Bom) FindOneAndUpdate(update interface{}) *mongo.SingleResult {
ctx, _ := context.WithTimeout(context.Background(), DefaultQueryTimeout)
return b.Mongo().FindOneAndUpdate(ctx, b.getCondition(), update, b.findOneAndUpdateOptions...)
}
func (b *Bom) FindOneAndDelete() *mongo.SingleResult {
ctx, _ := context.WithTimeout(context.Background(), DefaultQueryTimeout)
return b.Mongo().FindOneAndDelete(ctx, b.getCondition())
}
func (b *Bom) DeleteMany() (*mongo.DeleteResult, error) {
ctx, _ := context.WithTimeout(context.Background(), DefaultQueryTimeout)
return b.Mongo().DeleteMany(ctx, b.getCondition())
}
func (b *Bom) ListWithPagination(callback func(cursor *mongo.Cursor) error) (*Pagination, error) {
ctx, _ := context.WithTimeout(context.Background(), DefaultQueryTimeout)
findOptions := options.Find()
limit, offset := b.calculateOffset(b.limit.Page, b.limit.Size)
findOptions.SetLimit(int64(limit)).SetSkip(int64(offset))
if sm, ok := b.getSort(b.sort); ok {
findOptions.SetSort(sm)
}
condition := b.getCondition()
if projection, ok := b.buildProjection(); ok {
findOptions.SetProjection(projection)
}
var count int64
var err error
if condition != nil {
if bs, ok := condition.(primitive.M); ok {
if len(bs) > 0 {
count, err = b.Mongo().CountDocuments(ctx, condition)
} else {
count, err = b.Mongo().EstimatedDocumentCount(ctx)
}
}
}
if err != nil {
return &Pagination{}, err
}
cur, err := b.Mongo().Find(ctx, condition, findOptions)
if err != nil {
return &Pagination{}, err
}
defer cur.Close(ctx)
for cur.Next(ctx) {
err = callback(cur)
}
if err := cur.Err(); err != nil {
return &Pagination{}, err
}
pagination := b.getPagination(int32(count), b.limit.Page, b.limit.Size)
return pagination, err
}
func (b *Bom) ListWithLastId(callback func(cursor *mongo.Cursor) error) (lastId string, err error) {
ctx, _ := context.WithTimeout(context.Background(), DefaultQueryTimeout)
lastId = b.lastId
findOptions := options.Find()
findOptions.SetLimit(int64(b.limit.Size))
cur := &mongo.Cursor{}
if projection, ok := b.buildProjection(); ok {
findOptions.SetProjection(projection)
}
if lastId != "" {
b.WhereConditions("_id", ">", ToObj(lastId))
}
cur, err = b.Mongo().Find(ctx, b.getCondition(), findOptions)
if err != nil {
return "", err
}
defer cur.Close(ctx)
var lastElement primitive.ObjectID
for cur.Next(ctx) {
err = callback(cur)
lastElement = cur.Current.Lookup("_id").ObjectID()
}
if err := cur.Err(); err != nil {
return "", err
}
count, err := b.Mongo().CountDocuments(ctx, b.getCondition())
if err != nil {
return "", err
}
if count > int64(b.limit.Size) {
return lastElement.Hex(), err
} else {
return "", err
}
}
func (b *Bom) List(callback func(cursor *mongo.Cursor) error) error {
ctx, _ := context.WithTimeout(context.Background(), DefaultQueryTimeout)
findOptions := options.Find()
if projection, ok := b.buildProjection(); ok {
findOptions.SetProjection(projection)
}
cur, err := b.Mongo().Find(ctx, b.getCondition(), findOptions)
if err != nil {
return err
}
defer cur.Close(ctx)
for cur.Next(ctx) {
err = callback(cur)
}
if err := cur.Err(); err != nil {
return err
}
return err
} |
// Copyright 2013 Walter Schulze
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import (
"fmt"
)
//HasVar returns whether there exists a variable somewhere in the expression tree.
//This function is executed recursively.
func (this *Expr) HasVar() bool {
if this.Terminal != nil {
return this.GetTerminal().Variable != nil
}
if this.List != nil {
return this.GetList().HasVar()
}
if this.Function != nil {
return this.GetFunction().HasVar()
}
if this.BuiltIn != nil {
return true
}
panic(fmt.Sprintf("unknown expr %#v", this))
}
//HasVar returns whether there exists a variable somewhere in the typed list.
//This function is executed recursively.
func (this *List) HasVar() bool {
for _, e := range this.GetElems() {
if e.HasVar() {
return true
}
}
return false
}
//HasVar returns whether there exists a varaible somewhere in the function parameters.
//This function is executed recursively.
func (this *Function) HasVar() bool {
for _, p := range this.GetParams() {
if p.HasVar() {
return true
}
}
return false
}
|
package resources
import (
DaoClusterTypes "github.com/containers-ai/alameda/datahub/pkg/dao/interfaces/clusterstatus/types"
"github.com/containers-ai/alameda/datahub/pkg/formatconversion/requests/common"
ApiResources "github.com/containers-ai/api/alameda_api/v1alpha1/datahub/resources"
)
type CreateNodesRequestExtended struct {
ApiResources.CreateNodesRequest
}
type ListNodesRequestExtended struct {
*ApiResources.ListNodesRequest
}
type DeleteNodesRequestExtended struct {
*ApiResources.DeleteNodesRequest
}
func NewNode(node *ApiResources.Node) *DaoClusterTypes.Node {
if node != nil {
// Normalize request
objectMeta := NewObjectMeta(node.GetObjectMeta())
objectMeta.Namespace = ""
objectMeta.NodeName = ""
n := DaoClusterTypes.Node{}
n.ObjectMeta = &objectMeta
n.CreateTime = node.GetStartTime()
n.MachineCreateTime = node.GetMachineStartTime()
n.Capacity = NewCapacity(node.GetCapacity())
n.AlamedaNodeSpec = NewAlamedaNodeSpec(node.GetAlamedaNodeSpec())
return &n
}
return nil
}
func (p *CreateNodesRequestExtended) Validate() error {
return nil
}
func (p *CreateNodesRequestExtended) ProduceNodes() []*DaoClusterTypes.Node {
nodes := make([]*DaoClusterTypes.Node, 0)
for _, node := range p.GetNodes() {
nodes = append(nodes, NewNode(node))
}
return nodes
}
func (p *ListNodesRequestExtended) Validate() error {
return nil
}
func (p *ListNodesRequestExtended) ProduceRequest() *DaoClusterTypes.ListNodesRequest {
request := DaoClusterTypes.NewListNodesRequest()
request.QueryCondition = common.QueryConditionExtend{Condition: p.GetQueryCondition()}.QueryCondition()
if p.GetObjectMeta() != nil {
for _, meta := range p.GetObjectMeta() {
// Normalize request
objectMeta := NewObjectMeta(meta)
objectMeta.Namespace = ""
objectMeta.NodeName = ""
if objectMeta.IsEmpty() {
request := DaoClusterTypes.NewListNodesRequest()
request.QueryCondition = common.QueryConditionExtend{Condition: p.GetQueryCondition()}.QueryCondition()
return request
}
request.ObjectMeta = append(request.ObjectMeta, &objectMeta)
}
}
return request
}
func (p *DeleteNodesRequestExtended) Validate() error {
return nil
}
func (p *DeleteNodesRequestExtended) ProduceRequest() *DaoClusterTypes.DeleteNodesRequest {
request := DaoClusterTypes.NewDeleteNodesRequest()
if p.GetObjectMeta() != nil {
for _, meta := range p.GetObjectMeta() {
// Normalize request
objectMeta := NewObjectMeta(meta)
objectMeta.Namespace = ""
objectMeta.NodeName = ""
if objectMeta.IsEmpty() {
request := DaoClusterTypes.NewDeleteNodesRequest()
return request
}
request.ObjectMeta = append(request.ObjectMeta, &objectMeta)
}
}
return request
}
|
package control
import (
"net/http"
"encoding/json"
"fmt"
)
//状态为返回
func CodeReturn(w http.ResponseWriter,httpCode int ){
w.Header().Add("Content-type","text/html;charset=utf-8")
w.WriteHeader(httpCode);
return;
}
//json返回
func JsonReturn(w http.ResponseWriter,data interface{}){
w.Header().Add("Content-type","text/json;charset=utf-8");
json, err := json.MarshalIndent(data,"","\t")
if(err!=nil){
fmt.Println("<control.JsonReturn>")
}
w.WriteHeader(200);
w.Write(json);
return;
}
func htmlReturn(w http.ResponseWriter,data string){
w.Header().Add("Content-type","text/html;charset=utf-8");
w.WriteHeader(200);
reData:=[]byte(data);
w.Write(reData);
return ;
} |
package linkeddata
import (
"encoding/json"
"github.com/google/uuid"
)
type ObjectCapabilityInvocation struct {
Id uuid.UUID `json:"id"`
Action string `json:"action"`
Proof *Proof `json:"proof,omitempty"`
}
func (d *ObjectCapabilityInvocation) Clone() Signable {
b, _ := json.Marshal(d)
var clone DidDocument
json.Unmarshal(b, &clone)
return &clone
}
func (d *ObjectCapabilityInvocation) GetProof() *Proof {
return d.Proof
}
func (d *ObjectCapabilityInvocation) SetProof(p *Proof) {
d.Proof = p
}
|
package services
import (
"github.com/nu7hatch/gouuid"
"log"
)
type BetService struct {
betPublisher BetPublisher
}
func NewBetService(publisher BetPublisher) *BetService {
return &BetService{
betPublisher: publisher,
}
}
// Publisher gives the bet an id and sends bet message to the queue.
func (e BetService) Publisher(customerId string, selectionId string, selectionCoefficient float64, payment float64) error {
id, err := uuid.NewV4()
if err != nil {
log.Fatalf("%s: %s", "failed to create uuid", err)
}
return e.betPublisher.Publish(id.String(), customerId, selectionId, selectionCoefficient, payment)
}
|
/*
* macky - Simple MU* Non-Client
*
* See README.md for usage
*
* See LICENSE for licensing info
*
* Written Sept 2013 Kutani
*/
package main
import (
"bufio"
"container/list"
"encoding/json"
"fmt"
"net"
"os"
"strings"
"syscall"
)
var sList = list.New()
var sListAdd chan *Server = make(chan *Server, 1)
var sListDel chan *Server = make(chan *Server, 1)
func ListHandler(add <-chan *Server, del <-chan *Server) {
for {
select {
case svr := <-add:
svr.e = sList.PushFront(svr)
case svr := <-del:
sList.Remove(svr.e)
}
}
}
func SessionExists(s string) bool {
for e := sList.Front(); e != nil; e = e.Next() {
if e.Value.(*Server).path == fmt.Sprint("connections/", s) {
return true
}
}
return false
}
func CloseAllSessions() {
fmt.Println("Closing all sessions")
for e := sList.Front(); e != nil; e = e.Next() {
e.Value.(*Server).Close()
}
}
type Server struct {
path string
Address string
Port int
Tls bool
Login string
User string
Pass string
netConn net.Conn
Control chan string
e *list.Element
}
func (s *Server) ReadConf(dir string) error {
f, err := os.Open(fmt.Sprintf("connections/%s/conf", dir))
if err != nil {
fmt.Println(err)
return err
}
defer f.Close()
s.path = fmt.Sprint("connections/", dir)
err = json.NewDecoder(bufio.NewReader(f)).Decode(s)
if err != nil {
fmt.Println(err)
return err
}
return nil
}
func (s *Server) Connect() error {
n, err := net.Dial("tcp", fmt.Sprint(s.Address, ":", s.Port))
if err != nil {
fmt.Println(err)
return err
}
s.netConn = n
err = s.build_fifo()
if err != nil {
fmt.Println(err)
return err
}
sListAdd <- s
return nil
}
func (s *Server) build_fifo() error {
for {
err := syscall.Mkfifo(fmt.Sprint(s.path, "/in"), syscall.S_IRWXU)
if err != nil {
if os.IsExist(err) {
err := os.Remove(fmt.Sprint(s.path, "/in"))
if err != nil {
return err
}
continue
}
return err
}
break
}
return nil
}
func (s *Server) clean_fifo() {
err := os.Remove(fmt.Sprint(s.path, "/in"))
if err != nil {
fmt.Println(err)
}
}
func (s *Server) Close() {
fmt.Println(s.path, ".Close()")
s.netConn.Close()
s.clean_fifo()
sListDel <- s
s.Control <- "_CTL_CLEANUP"
}
func (s *Server) LogIn() {
cmd := strings.Replace(s.Login, "%u", s.User, -1)
cmd = strings.Replace(cmd, "%p", s.Pass, -1)
s.Control <- cmd
}
func (s *Server) ReadIn(out chan<- string) {
defer fmt.Println(s.path, ".ReadIn() Closing")
for {
infile, err := os.OpenFile(fmt.Sprint(s.path, "/in"), os.O_RDONLY, 0666)
if err != nil {
if os.IsNotExist(err) {
return
}
fmt.Println(err)
return
}
inbuf := bufio.NewReader(infile)
if msg, err := inbuf.ReadString('\n'); err == nil {
msg = msg[:len(msg)-1]
if msg == "CTL_CLOSE" {
infile.Close()
out <- msg
return
}
out <- msg
}
infile.Close()
}
}
func (s *Server) WriteOut(in <-chan string) {
defer fmt.Println(s.path, ".WriteOut() Closing")
outfile, err := os.OpenFile(fmt.Sprint(s.path, "/out"), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
fmt.Println(err)
return
}
defer outfile.Close()
outbuf := bufio.NewWriter(outfile)
for {
out, ok := <-in
if !ok {
_, _ = outbuf.WriteString("***CLOSED***\n")
outbuf.Flush()
return
}
ret, err := outbuf.WriteString(fmt.Sprintln(out))
outbuf.Flush()
if ret < len(out) && err != nil {
fmt.Println(err)
break
}
}
}
func (s *Server) Process() {
defer fmt.Println(s.path, ".Process() Closing")
var rec chan string = make(chan string, 1)
var snd chan string = make(chan string, 1)
var in chan string = make(chan string, 1)
var out chan string = make(chan string, 1)
go s.Recieve(rec)
go s.Send(snd)
go s.ReadIn(in)
go s.WriteOut(out)
for {
select {
case msg := <-s.Control:
if msg == "_CTL_CLEANUP" {
close(snd)
close(out)
return
}
snd <- msg
case msg := <-in:
// Parse command messages here!
if strings.HasPrefix(msg, "CTL_") {
if msg == "CTL_CLOSE" {
go s.Close()
break
}
mainControl <- msg
break
}
out <- msg
snd <- msg
case msg := <-rec:
// Format output here!
out <- msg
}
}
}
func (s *Server) Send(in <-chan string) {
defer fmt.Println(s.path, ".Send() Closing")
conbuf := bufio.NewWriter(s.netConn)
for {
msg, ok := <-in
if !ok {
conbuf.Flush()
return
}
ret, err := conbuf.WriteString(fmt.Sprintln(msg))
conbuf.Flush()
if ret < len(msg) && err != nil {
fmt.Println(err)
break
}
}
}
func (s *Server) Recieve(out chan<- string) {
defer fmt.Println(s.path, ".Recieve() Closing")
conbuf := bufio.NewReader(s.netConn)
for {
if ret, err := conbuf.ReadString('\n'); err == nil {
ret = ret[:len(ret)-1]
out <- ret
} else {
break
}
}
}
var mainControl chan string = make(chan string, 1)
func readControl() {
for {
infile, err := os.OpenFile("in", os.O_RDONLY, 0666)
if err != nil {
if os.IsNotExist(err) {
return
}
fmt.Println(err)
return
}
inbuf := bufio.NewReader(infile)
if msg, err := inbuf.ReadString('\n'); err == nil {
msg = msg[:len(msg)-1]
mainControl <- msg
}
infile.Close()
}
}
func main() {
if _, err := os.Stat("connections"); os.IsNotExist(err) {
fmt.Println("connections directory does not exist; please see README.md")
return
}
fmt.Println("macky - v0.1 - Starting Up")
go ListHandler(sListAdd, sListDel)
// Set up our superviser `in` FIFO
for {
if err := syscall.Mkfifo("in", syscall.S_IRWXU); err != nil {
if os.IsExist(err) {
if err := os.Remove("in"); err != nil {
fmt.Println(err)
return
}
continue
}
fmt.Println(err)
return
}
break
}
defer func() {
if err := os.Remove("in"); err != nil {
fmt.Println(err)
}
}()
defer CloseAllSessions() // Ensure we clean up
go readControl()
for {
msg := <-mainControl
fmt.Println("> ", msg)
prs := strings.Split(msg, " ")
switch prs[0] {
case "CTL_QUIT":
fmt.Println("Exiting, goodbye")
return
case "CTL_CONNECT":
if len(prs) < 2 {
fmt.Println("CTL_CONNECT: Missing arguments")
break
}
for i := 1; i < len(prs); i++ {
if SessionExists(prs[i]) {
// TODO - Check if disconnected and if so, connect
continue
}
s := new(Server)
if err := s.ReadConf(prs[i]); err != nil {
continue
}
if err := s.Connect(); err != nil {
continue
}
s.Control = make(chan string)
go s.Process()
s.LogIn()
}
}
}
return
}
|
package services
import (
"time"
"github.com/ne7ermore/gRBAC/common"
"github.com/ne7ermore/gRBAC/plugin"
)
type Permission struct {
Id string `json:"id"`
Name string `json:"name"`
Descrip string `json:"descrip"`
Sep string `json:"sep"`
CreateTime time.Time `json:"createTime"`
UpdateTime time.Time `json:"updateTime"`
}
func NewPermissionFromModel(m plugin.Permission) *Permission {
return &Permission{
Id: m.Getid(),
Descrip: m.GetDescrip(),
Name: m.GetName(),
Sep: m.GetSep(),
CreateTime: m.GetCreateTime(),
UpdateTime: m.GetUpdateTime(),
}
}
func CreatePermisson(name, des string, p plugin.PermissionPools) (*Permission, error) {
id, err := p.New(name, des)
if err != nil {
return nil, err
}
pp, err := GetPermById(id, p)
if err != nil {
return nil, err
}
common.Get().NewPerm(common.NewFirstP(id, des))
return pp, nil
}
func GetPermById(id string, p plugin.PermissionPools) (*Permission, error) {
pp, err := p.Get(id)
if err != nil {
return nil, err
}
return NewPermissionFromModel(pp), nil
}
func GetPermByDesc(descrip string, p plugin.PermissionPools) (*Permission, error) {
pp, err := p.GetByDesc(descrip)
if err != nil {
return nil, err
}
return NewPermissionFromModel(pp), nil
}
func UpdatePerm(id string, update map[string]string, p plugin.PermissionPools) (*Permission, error) {
if err := p.Update(id, update); err != nil {
return nil, err
}
return GetPermById(id, p)
}
func GetPerms(skip, limit int, field string, p plugin.PermissionPools) ([]*Permission, error) {
ps, err := p.Gets(skip, limit, field)
if err != nil {
return nil, err
}
perms := make([]*Permission, 0, limit)
for _, p := range ps {
perms = append(perms, NewPermissionFromModel(p))
}
return perms, nil
}
func GetPermissionsCount(pp plugin.PermissionPools) int {
return pp.Counts()
}
|
package printer
import (
"fmt"
"text/template"
"strings"
"github.com/davyxu/tabtoy/v2/i18n"
"github.com/davyxu/tabtoy/v2/model"
)
const cppTemplate = `// Generated by github.com/davyxu/tabtoy
// Version: {{.ToolVersion}}
// DO NOT EDIT!!
#include <vector>
#include <map>
#include <string>
namespace {{.Namespace}}{{$globalIndex:=.Indexes}}{{$verticalFields:=.VerticalFields}}
{
{{range .Enums}}
// Defined in table: {{.DefinedTable}}
enum class {{.Name}}
{
{{range .Fields}}
{{.Comment}}
{{.FieldDescriptor.Name}} = {{.FieldDescriptor.EnumValue}}, {{.Alias}}
{{end}}
};
{{end}}
{{range .Classes}}
{{if not .IsCombine}}
// Defined in table: {{.DefinedTable}}
class {{.Name}}
{
public:
{{range .Fields}}
{{.Comment}}
{{.TypeCode}} {{.Alias}}
{{end}}
}; {{end}}
{{end}}
{{range .Classes}}
{{if .IsCombine}}
// Defined in table: {{.DefinedTable}}
class {{.Name}}
{
{{if .IsCombine}}
public:
tabtoy::Logger TableLogger;
{{end}}
{{range .Fields}}
{{.Comment}}
{{.TypeCode}} {{.Alias}}
{{end}}
{{if .IsCombine}}
//#region Index code
{{range $globalIndex}}std::map<{{.IndexType}}, {{.RowType}}> _{{.RowName}}By{{.IndexName}};
public:
class {{.RowType}}* Get{{.RowName}}By{{.IndexName}}({{.IndexType}} {{.IndexName}}, {{.RowType}}* def = nullptr)
{
auto ret = _{{.RowName}}By{{.IndexName}}.find( {{.IndexName}} );
if ( ret != _{{.RowName}}By{{.IndexName}}.end() )
{
return &ret->second;
}
if ( def == nullptr )
{
TableLogger.ErrorLine("Get{{.RowName}}By{{.IndexName}} failed, {{.IndexName}}: %s", {{.IndexName}});
}
return def;
}
{{end}}
{{range $verticalFields}}
public:
class {{.StructName}}* Get{{.Name}}( )
{
return &{{.Name}}_[0];
}
{{end}}
//#endregion
//#region Deserialize code
{{range $.Classes}}
public:
static void Deserialize( {{.Name}}& ins, tabtoy::DataReader& reader )
{
int tag = -1;
while ( -1 != (tag = reader.ReadTag()))
{
switch (tag)
{ {{range .Fields}}
case {{.Tag}}:
{
{{.ReadCode}}
}
break; {{end}}
}
}
{{range $a, $row :=.IndexedFields}}
// Build {{$row.FieldDescriptor.Name}} Index
for( size_t i = 0;i< ins.{{$row.FieldDescriptor.Name}}_.size();i++)
{
auto element = ins.{{$row.FieldDescriptor.Name}}_[i];
{{range $b, $key := .IndexKeys}}
ins._{{$row.FieldDescriptor.Name}}By{{$key.Name}}.emplace(std::make_pair(element.{{$key.Name}}_, element));
{{end}}
}
{{end}}
}{{end}}
//#endregion
{{end}}
};
{{end}}
{{end}}
}
`
type cppIndexField struct {
TableIndex
}
func (self cppIndexField) IndexName() string {
return self.Index.Name
}
func (self cppIndexField) RowType() string {
return self.Row.Complex.Name
}
func (self cppIndexField) RowName() string {
return self.Row.Name
}
func (self cppIndexField) IndexType() string {
switch self.Index.Type {
case model.FieldType_Int32:
return "int"
case model.FieldType_UInt32:
return "unsigned int"
case model.FieldType_Int64:
return "long long"
case model.FieldType_UInt64:
return "unsigned long long"
case model.FieldType_String:
return "std::string"
case model.FieldType_Float:
return "float"
case model.FieldType_Bool:
return "bool"
case model.FieldType_Enum:
return self.Index.Complex.Name
default:
log.Errorf("%s can not be index ", self.Index.String())
}
return "unknown"
}
type cppField struct {
*model.FieldDescriptor
IndexKeys []*model.FieldDescriptor
parentStruct *cppStructModel
}
func (self cppField) Alias() string {
v := self.FieldDescriptor.Meta.GetString("Alias")
if v == "" {
return ""
}
return "// " + v
}
func (self cppField) Comment() string {
if self.FieldDescriptor.Comment == "" {
return ""
}
// zjwps 建议修改
return "/// <summary> \n /// " + strings.Replace(self.FieldDescriptor.Comment, "\n", "\n ///", -1) + "\n /// </summary>"
}
func (self cppField) ReadCode() string {
var baseType string
var descHandlerCode string
switch self.Type {
case model.FieldType_Int32:
baseType = "Int32"
case model.FieldType_UInt32:
baseType = "UInt32"
case model.FieldType_Int64:
baseType = "Int64"
case model.FieldType_UInt64:
baseType = "UInt64"
case model.FieldType_String:
baseType = "String"
case model.FieldType_Float:
baseType = "Float"
case model.FieldType_Bool:
baseType = "Bool"
case model.FieldType_Enum:
if self.Complex == nil {
return "unknown"
}
baseType = fmt.Sprintf("Enum<%s>", self.Complex.Name)
case model.FieldType_Struct:
if self.Complex == nil {
return "unknown"
}
baseType = fmt.Sprintf("Struct<%s>", self.Complex.Name)
}
if self.Type == model.FieldType_Struct {
descHandlerCode = "Deserialize"
//descHandlerCode = fmt.Sprintf("%sDeserializeHandler", self.Complex.Name)
}
if self.IsRepeated {
return fmt.Sprintf("ins.%s_.emplace_back( reader.Read%s(%s) );", self.Name, baseType, descHandlerCode)
} else {
return fmt.Sprintf("ins.%s_ = reader.Read%s(%s);", self.Name, baseType, descHandlerCode)
}
}
func (self cppField) Tag() string {
if self.parentStruct.IsCombine() {
tag := model.MakeTag(int32(model.FieldType_Table), self.Order)
return fmt.Sprintf("0x%x", tag)
}
return fmt.Sprintf("0x%x", self.FieldDescriptor.Tag())
}
func (self cppField) StructName() string {
if self.Complex == nil {
return "[NotComplex]"
}
return self.Complex.Name
}
func (self cppField) IsVerticalStruct() bool {
if self.FieldDescriptor.Complex == nil {
return false
}
return self.FieldDescriptor.Complex.File.Pragma.GetBool("Vertical")
}
func (self cppField) TypeCode() string {
var raw string
switch self.Type {
case model.FieldType_Int32:
raw = "int"
case model.FieldType_UInt32:
raw = "unsigned int"
case model.FieldType_Int64:
raw = "long long"
case model.FieldType_UInt64:
raw = "unsigned long long"
case model.FieldType_String:
raw = "std::string"
case model.FieldType_Float:
raw = "float"
case model.FieldType_Bool:
raw = "bool"
case model.FieldType_Enum:
if self.Complex == nil {
log.Errorln("unknown enum type ", self.Type)
return "unknown"
}
raw = self.Complex.Name
case model.FieldType_Struct:
if self.Complex == nil {
log.Errorln("unknown struct type ", self.Type, self.FieldDescriptor.Name, self.FieldDescriptor.Parent.Name)
return "unknown"
}
raw = self.Complex.Name
// 非repeated的结构体
if !self.IsRepeated {
return fmt.Sprintf("public:\r\n \t\t%s %s_;", raw, self.Name)
}
default:
raw = "unknown"
}
if self.IsRepeated {
return fmt.Sprintf("public:\r\n \t\tstd::vector<%s> %s_;", raw, self.Name)
}
return fmt.Sprintf("public:\r\n \t\t%s %s_ = %s;", raw, self.Name, wrapCppDefaultValue(self.FieldDescriptor))
}
func wrapCppDefaultValue(fd *model.FieldDescriptor) string {
switch fd.Type {
case model.FieldType_Enum:
return fmt.Sprintf("%s::%s", fd.Complex.Name, fd.DefaultValue())
case model.FieldType_String:
return fmt.Sprintf("\"%s\"", fd.DefaultValue())
case model.FieldType_Float:
var defValue = fd.DefaultValue()
if !strings.ContainsAny(defValue, ".") {
return fmt.Sprintf("%s.0f", defValue)
}
return fmt.Sprintf("%sf", defValue)
}
return fd.DefaultValue()
}
type cppStructModel struct {
*model.Descriptor
Fields []cppField
IndexedFields []cppField // 与cppField.IndexKeys组成树状的索引层次
}
func (self *cppStructModel) DefinedTable() string {
return self.File.Name
}
func (self *cppStructModel) Name() string {
return self.Descriptor.Name
}
func (self *cppStructModel) IsCombine() bool {
return self.Descriptor.Usage == model.DescriptorUsage_CombineStruct
}
type cppFileModel struct {
Namespace string
ToolVersion string
Classes []*cppStructModel
Enums []*cppStructModel
Indexes []cppIndexField // 全局的索引
VerticalFields []cppField
}
type cppPrinter struct {
}
func (self *cppPrinter) Run(g *Globals, outputClass int) *Stream {
tpl, err := template.New("cpp").Parse(cppTemplate)
if err != nil {
log.Errorln(err)
return nil
}
var m cppFileModel
m.Namespace = g.FileDescriptor.Pragma.GetString("Package")
m.ToolVersion = g.Version
// combinestruct的全局索引
for _, ti := range g.GlobalIndexes {
// 索引也限制
if !ti.Index.Parent.File.MatchTag(".cpp") {
continue
}
m.Indexes = append(m.Indexes, cppIndexField{TableIndex: ti})
}
// 遍历所有类型
for _, d := range g.FileDescriptor.Descriptors {
// 这给被限制输出
if !d.File.MatchTag(".cpp") {
log.Infof("%s: %s", i18n.String(i18n.Printer_IgnoredByOutputTag), d.Name)
continue
}
var sm cppStructModel
sm.Descriptor = d
switch d.Kind {
case model.DescriptorKind_Struct:
m.Classes = append(m.Classes, &sm)
case model.DescriptorKind_Enum:
m.Enums = append(m.Enums, &sm)
}
// 遍历字段
for _, fd := range d.Fields {
// 对CombineStruct的XXDefine对应的字段
if d.Usage == model.DescriptorUsage_CombineStruct {
// 这个字段被限制输出
if fd.Complex != nil && !fd.Complex.File.MatchTag(".cpp") {
continue
}
// 这个结构有索引才创建
if fd.Complex != nil && len(fd.Complex.Indexes) > 0 {
// 被索引的结构
indexedField := cppField{FieldDescriptor: fd, parentStruct: &sm}
// 索引字段
for _, key := range fd.Complex.Indexes {
indexedField.IndexKeys = append(indexedField.IndexKeys, key)
}
sm.IndexedFields = append(sm.IndexedFields, indexedField)
}
if fd.Complex != nil && fd.Complex.File.Pragma.GetBool("Vertical") {
m.VerticalFields = append(m.VerticalFields, cppField{FieldDescriptor: fd, parentStruct: &sm})
}
}
csField := cppField{FieldDescriptor: fd, parentStruct: &sm}
sm.Fields = append(sm.Fields, csField)
}
}
bf := NewStream()
err = tpl.Execute(bf.Buffer(), &m)
if err != nil {
log.Errorln(err)
return nil
}
return bf
}
func init() {
RegisterPrinter("cpp", &cppPrinter{})
}
|
package types
import (
sdk "github.com/cosmos/cosmos-sdk/types"
)
type Module interface {
Handler() sdk.Handler
}
|
package main
func displayWidgets() {
//Label
createWidget("Label", `
Item {
Label {
anchors.centerIn: parent
text: "This Is A Label"
}
}`)
//Text
createWidget("Text", `
Item {
Text {
anchors.centerIn: parent
text: "This Is A Text Item"
}
}`)
//Calendar Widget
createWidget("Calendar Widget", `
Calendar {
onClicked: console.log("Calender Widget Selected Date Changed To:", date)
}`)
//Progress Bar
createWidget("Progress Bar", `
Item {
ProgressBar {
id: progressBar
anchors.centerIn: parent
minimumValue: 0
maximumValue: 1000
value: 500
NumberAnimation on value {
from: 0
to: 1000
duration: 4000
loops: Animation.Infinite
}
}
}`)
//Image
createWidget("Image", `
Image {
fillMode: Image.PreserveAspectFit
source: "qrc:/qml/earth.png"
}`)
//BusyIndicator
createWidget("Busy Indicator", `
Item {
BusyIndicator {
anchors.centerIn: parent
}
}`)
}
|
package backend
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/sirupsen/logrus"
"github.com/jim-minter/rp/pkg/api"
"github.com/jim-minter/rp/pkg/database"
"github.com/jim-minter/rp/pkg/env"
)
const (
maxWorkers = 100
maxDequeueCount = 5
)
type backend struct {
baseLog *logrus.Entry
db database.OpenShiftClusters
authorizer autorest.Authorizer
mu sync.Mutex
cond *sync.Cond
workers int32
stopping atomic.Value
domain string
}
// Runnable represents a runnable object
type Runnable interface {
Run(stop <-chan struct{})
}
// NewBackend returns a new runnable backend
func NewBackend(ctx context.Context, log *logrus.Entry, env env.Interface, db database.OpenShiftClusters) (Runnable, error) {
var err error
b := &backend{
baseLog: log,
db: db,
}
b.domain, err = env.DNS(ctx)
if err != nil {
return nil, err
}
b.authorizer, err = env.FirstPartyAuthorizer(ctx)
if err != nil {
return nil, err
}
b.cond = sync.NewCond(&b.mu)
b.stopping.Store(false)
return b, nil
}
func (b *backend) Run(stop <-chan struct{}) {
t := time.NewTicker(time.Second)
defer t.Stop()
go func() {
<-stop
b.baseLog.Print("stopping")
b.stopping.Store(true)
b.cond.Signal()
}()
for {
b.mu.Lock()
for atomic.LoadInt32(&b.workers) == maxWorkers && !b.stopping.Load().(bool) {
b.cond.Wait()
}
b.mu.Unlock()
if b.stopping.Load().(bool) {
break
}
doc, err := b.db.Dequeue()
if err != nil || doc == nil {
if err != nil {
b.baseLog.Error(err)
}
<-t.C
continue
}
log := b.baseLog.WithField("resource", doc.OpenShiftCluster.ID)
if doc.Dequeues > maxDequeueCount {
log.Warnf("dequeued %d times, failing", doc.Dequeues)
err = b.setTerminalState(doc, api.ProvisioningStateFailed)
if err != nil {
log.Error(err)
}
} else {
log.Print("dequeued")
go func() {
atomic.AddInt32(&b.workers, 1)
defer func() {
atomic.AddInt32(&b.workers, -1)
b.cond.Signal()
}()
t := time.Now()
err := b.handle(context.Background(), log, doc)
if err != nil {
log.Error(err)
}
log.WithField("durationMs", int(time.Now().Sub(t)/time.Millisecond)).Print("done")
}()
}
}
}
func (b *backend) handle(ctx context.Context, log *logrus.Entry, doc *api.OpenShiftClusterDocument) error {
stop := b.heartbeat(log, doc)
defer stop()
var err error
switch doc.OpenShiftCluster.Properties.ProvisioningState {
case api.ProvisioningStateUpdating:
log.Print("updating")
err = b.update(ctx, log, doc)
case api.ProvisioningStateDeleting:
log.Print("deleting")
err = b.delete(ctx, log, doc)
}
stop()
if err != nil {
log.Error(err)
return b.setTerminalState(doc, api.ProvisioningStateFailed)
}
switch doc.OpenShiftCluster.Properties.ProvisioningState {
case api.ProvisioningStateUpdating:
return b.setTerminalState(doc, api.ProvisioningStateSucceeded)
case api.ProvisioningStateDeleting:
return b.db.Delete(doc.OpenShiftCluster.ID)
default:
return fmt.Errorf("unexpected state %q", doc.OpenShiftCluster.Properties.ProvisioningState)
}
}
func (b *backend) heartbeat(log *logrus.Entry, doc *api.OpenShiftClusterDocument) func() {
var stopped bool
stop, done := make(chan struct{}), make(chan struct{})
go func() {
defer close(done)
t := time.NewTicker(10 * time.Second)
defer t.Stop()
for {
_, err := b.db.Lease(doc.OpenShiftCluster.ID)
if err != nil {
log.Error(err)
return
}
select {
case <-t.C:
case <-stop:
return
}
}
}()
return func() {
if !stopped {
close(stop)
<-done
stopped = true
}
}
}
func (b *backend) setTerminalState(doc *api.OpenShiftClusterDocument, state api.ProvisioningState) error {
_, err := b.db.Patch(doc.OpenShiftCluster.ID, func(doc *api.OpenShiftClusterDocument) error {
doc.LeaseOwner = nil
doc.LeaseExpires = 0
doc.Dequeues = 0
doc.OpenShiftCluster.Properties.ProvisioningState = state
return nil
})
return err
}
|
package main
import (
"flag"
"fmt"
"io"
"log"
"net/url"
"os"
"strconv"
"time"
"github.com/gorilla/websocket"
)
var (
ip = flag.String("ip", "127.0.0.1", "server IP")
connection = flag.Int("conn", 1, "number of socket connections")
)
func main() {
flag.Usage = func() {
io.WriteString(os.Stderr, `Web socket client generator
Example usage: ./client -ip=127.0.0.1 -conn=10
`)
flag.PrintDefaults()
}
flag.Parse()
fmt.Printf("ip = %s\n", *ip)
fmt.Printf("conn = %d\n", *connection)
u := url.URL{Scheme: "ws", Host: "127.0.0.1:40404", Path: "/"}
log.Printf("Connecting to %s", u.String())
var conns []*websocket.Conn
for i := 0; i < *connection; i++ {
c, _, err := websocket.DefaultDialer.Dial(u.String(), nil)
if err != nil {
fmt.Println("Fail to connect", i, err)
c.Close()
}
conns = append(conns, c)
defer func(i int) {
c.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), time.Now().Add(time.Second))
time.Sleep(time.Second)
c.Close()
fmt.Println("closed for " + strconv.Itoa(i) + " conn")
}(i)
}
log.Printf("Finish initializing %d connections", *connection)
tts := time.Second
if *connection > 100 {
tts = time.Microsecond * 5
}
for {
for i := 0; i < len(conns); i++ {
time.Sleep(tts)
conn := conns[i]
log.Printf("Conn %d sending msg", i)
if err := conn.WriteControl(websocket.PingMessage, nil, time.Now().Add(time.Second*5)); err != nil {
fmt.Printf("Fail to recieving pong: %v", err)
}
conn.WriteMessage(websocket.TextMessage, []byte(fmt.Sprintf("hello from conn %v", i)))
}
}
}
|
package application
import (
"fmt"
"github.com/go-gl/glfw/v3.3/glfw"
"github.com/go-gl/mathgl/mgl32"
)
const (
DEBUG = glfw.KeyH
)
type Drawable interface {
Draw()
DrawWithUniforms(mgl32.Mat4, mgl32.Mat4)
Update(float64)
Log() string
}
type Camera interface {
Log() string
GetViewMatrix() mgl32.Mat4
GetProjectionMatrix() mgl32.Mat4
Walk(float32)
Strafe(float32)
Lift(float32)
UpdateDirection(float32, float32)
GetPosition() mgl32.Vec3
}
type Application struct {
window Window
camera Camera
cameraSet bool
keyDowns map[glfw.Key]bool
mouseDowns map[glfw.MouseButton]bool
MousePosX float64
MousePosY float64
items []Drawable
}
type Window interface {
GetCursorPos() (float64, float64)
SetKeyCallback(glfw.KeyCallback) glfw.KeyCallback
SetMouseButtonCallback(glfw.MouseButtonCallback) glfw.MouseButtonCallback
ShouldClose() bool
SwapBuffers()
}
// New returns an application instance
func New() *Application {
return &Application{
keyDowns: make(map[glfw.Key]bool),
mouseDowns: make(map[glfw.MouseButton]bool),
items: []Drawable{},
cameraSet: false,
}
}
// Log returns the string representation of this object.
func (a *Application) Log() string {
logString := "Application:\n"
if a.cameraSet {
logString += " - camera : " + a.camera.Log() + "\n"
}
logString += " - items :\n"
for _, item := range a.items {
logString += item.Log()
}
return logString
}
// SetWindow updates the window with the new one.
func (a *Application) SetWindow(w Window) {
a.window = w
}
// GetWindow returns the current window of the application.
func (a *Application) GetWindow() Window {
return a.window
}
// SetCamera updates the camera with the new one.
func (a *Application) SetCamera(c Camera) {
a.cameraSet = true
a.camera = c
}
// GetCamera returns the current camera of the application.
func (a *Application) GetCamera() Camera {
return a.camera
}
// SetMouseButtons updates the mouseDowns with the new one.
func (a *Application) SetMouseButtons(m map[glfw.MouseButton]bool) {
a.mouseDowns = m
}
// GetMouseButtons returns the current mouseDowns of the application.
func (a *Application) GetMouseButtons() map[glfw.MouseButton]bool {
return a.mouseDowns
}
// SetKeys updates the keyDowns with the new one.
func (a *Application) SetKeys(m map[glfw.Key]bool) {
a.keyDowns = m
}
// GetKeys returns the current keyDowns of the application.
func (a *Application) GetKeys() map[glfw.Key]bool {
return a.keyDowns
}
// AddItem inserts a new drawable item
func (a *Application) AddItem(d Drawable) {
a.items = append(a.items, d)
}
// Draw calls Draw function in every drawable item.
func (a *Application) Draw() {
for index, _ := range a.items {
a.items[index].Draw()
}
}
// Update calls the Update function in every drawable item.
func (a *Application) Update(dt float64) {
for index, _ := range a.items {
a.items[index].Update(dt)
}
}
// DrawWithUniforms calls DrawWithUniforms function in every drawable item with the calculated V & P.
func (a *Application) DrawWithUniforms() {
V := mgl32.Ident4()
P := mgl32.Ident4()
if a.cameraSet {
V = a.camera.GetViewMatrix()
P = a.camera.GetProjectionMatrix()
}
for _, item := range a.items {
item.DrawWithUniforms(V, P)
}
}
// KeyCallback is responsible for the keyboard event handling.
func (a *Application) KeyCallback(w *glfw.Window, key glfw.Key, scancode int, action glfw.Action, mods glfw.ModifierKey) {
switch key {
case DEBUG:
if action != glfw.Release {
fmt.Printf("%s\n", a.Log())
}
break
default:
a.SetKeyState(key, action)
break
}
}
// MouseButtonCallback is responsible for the mouse button event handling.
func (a *Application) MouseButtonCallback(w *glfw.Window, button glfw.MouseButton, action glfw.Action, mods glfw.ModifierKey) {
a.MousePosX, a.MousePosY = w.GetCursorPos()
switch button {
default:
a.SetButtonState(button, action)
break
}
}
// SetKeyState setups the keyDowns based on the key and action
func (a *Application) SetKeyState(key glfw.Key, action glfw.Action) {
var isButtonPressed bool
if action != glfw.Release {
isButtonPressed = true
} else {
isButtonPressed = false
}
a.keyDowns[key] = isButtonPressed
}
// SetKeyState setups the keyDowns based on the key and action
func (a *Application) SetButtonState(button glfw.MouseButton, action glfw.Action) {
var isButtonPressed bool
if action != glfw.Release {
isButtonPressed = true
} else {
isButtonPressed = false
}
a.mouseDowns[button] = isButtonPressed
}
// GetMouseButtonState returns the state of the given button
func (a *Application) GetMouseButtonState(button glfw.MouseButton) bool {
return a.mouseDowns[button]
}
// GetKeyState returns the state of the given key
func (a *Application) GetKeyState(key glfw.Key) bool {
return a.keyDowns[key]
}
|
package turnstile
import (
"database/sql"
"reflect"
"testing"
"time"
_ "github.com/mattn/go-sqlite3"
)
type eventWithNulls struct {
Time time.Time
Type string
UserID int64
LanguageCode sql.NullString
ChatID sql.NullInt64
ChatType sql.NullString
}
func scanEvents(rows *sql.Rows) ([]eventWithNulls, error) {
var events []eventWithNulls
for rows.Next() {
var e eventWithNulls
err := rows.Scan(&e.Time, &e.Type, &e.UserID, &e.LanguageCode, &e.ChatID, &e.ChatType)
if err != nil {
return events, err
}
events = append(events, e)
}
return events, nil
}
func TestSQLiteCollector(t *testing.T) {
c, err := NewSQLiteCollector(":memory:")
if err != nil {
t.Fatal(err)
}
defer c.Close()
events := []Event{
{
Type: "message",
UserID: 1,
LanguageCode: "en",
ChatID: 1,
ChatType: "private",
},
{
Type: "message",
UserID: 2,
},
}
for _, event := range events {
err = c.Collect(event)
if err != nil {
t.Fatal(err)
}
}
rows, err := c.db.Query(`select * from events`)
if err != nil {
t.Fatal(err)
}
defer rows.Close()
got, err := scanEvents(rows)
if err != nil {
t.Fatal(err)
}
want := []eventWithNulls{
{
Type: "message",
UserID: 1,
LanguageCode: sql.NullString{
String: "en",
Valid: true,
},
ChatID: sql.NullInt64{
Int64: 1,
Valid: true,
},
ChatType: sql.NullString{
String: "private",
Valid: true,
},
},
{
Type: "message",
UserID: 2,
},
}
if !reflect.DeepEqual(got, want) {
t.Errorf("ExtractEvent() = %v, want %v", got, want)
}
}
|
package leetcode
import (
"reflect"
"sort"
"testing"
)
type TestCases []struct {
Input string
Output []string
}
// Input: "23"
// Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"].
var testcases = TestCases{
{Input: "23", Output: []string{"ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"}},
}
func TestLetterCombinationsRealloc(t *testing.T) {
for _, tt := range testcases {
results := letterCombinationsRealloc(tt.Input)
sort.Strings(results)
if !reflect.DeepEqual(results, tt.Output) {
t.Fatalf("Actual %v; expecting %v\n", results, tt.Output)
}
}
}
func BenchmarkLetterCombinationsRealloc(b *testing.B) {
for i := 0; i < b.N; i++ {
letterCombinationsRealloc("234")
}
}
func TestLetterCombinationsReduced(t *testing.T) {
for _, tt := range testcases {
results := letterCombinationsReduced(tt.Input)
sort.Strings(results)
if !reflect.DeepEqual(results, tt.Output) {
t.Fatalf("Actual %v; expecting %v\n", results, tt.Output)
}
}
}
func BenchmarkLetterCombinationsReduced(b *testing.B) {
for i := 0; i < b.N; i++ {
letterCombinationsReduced("234")
}
}
func TestLetterCombinations(t *testing.T) {
for _, tt := range testcases {
results := letterCombinations(tt.Input)
sort.Strings(results)
if !reflect.DeepEqual(results, tt.Output) {
t.Fatalf("Actual %v; expecting %v\n", results, tt.Output)
}
}
}
func BenchmarkLetterCombinations(b *testing.B) {
for i := 0; i < b.N; i++ {
letterCombinations("234")
}
}
|
package main
import (
"net/http"
"github.com/ggalihpp/go-backend-ggalihpp/minio"
example "github.com/ggalihpp/go-backend-ggalihpp/route-example"
"github.com/labstack/echo"
)
func setupHandlers(e *echo.Echo) {
e.GET("/ping", func(c echo.Context) error {
return c.String(http.StatusOK, "pong")
})
exampleRoute := e.Group("/example")
example.SetupHandler(exampleRoute)
minioRoute := e.Group("/file")
minio.SetupHandler(minioRoute)
}
|
package main
import "fmt"
func main() {
//1.声明变量 没有初始化 零值 为false
var a bool
fmt.Println(a)
a = false
fmt.Println(a)
//2.布尔类型不接受其他类型的赋值,不支持自动或强制的类型转换
//a = 1
//a = bool(1)
//fmt.Println(a)
//3.自动推导类型
var b = true
fmt.Println(b)
c := false
fmt.Println(c)
v2 := (1==2)
fmt.Println(v2)
fmt.Printf("%T",v2)
}
|
// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
// This file was generated by swaggo/swag at
// 2019-11-27 11:02:57.290336153 +0100 CET m=+0.082305077
package docs
import (
"bytes"
"encoding/json"
"strings"
"github.com/alecthomas/template"
"github.com/swaggo/swag"
)
var doc = `{
"schemes": {{ marshal .Schemes }},
"swagger": "2.0",
"info": {
"description": "{{.Description}}",
"title": "{{.Title}}",
"contact": {},
"license": {},
"version": "{{.Version}}"
},
"host": "{{.Host}}",
"basePath": "{{.BasePath}}",
"paths": {
"/api/patch/v1/advisories": {
"get": {
"description": "Show me all applicable errata for all my systems",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"summary": "Show me all applicable errata for all my systems",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controllers.AdvisoriesResponse"
}
}
}
}
},
"/api/patch/v1/advisories/{advisory_id}": {
"get": {
"description": "Show me details an advisory by given advisory name",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"summary": "Show me details an advisory by given advisory name",
"parameters": [
{
"type": "string",
"description": "Advisory ID",
"name": "advisory_id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controllers.AdvisoryDetailResponse"
}
}
}
}
},
"/api/patch/v1/advisories/{advisory_id}/systems": {
"get": {
"description": "Show me systems on which the given advisory is applicable",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"summary": "Show me systems on which the given advisory is applicable",
"parameters": [
{
"type": "string",
"description": "Advisory ID",
"name": "advisory_id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controllers.AdvisorySystemsResponse"
}
}
}
}
},
"/api/patch/v1/systems": {
"get": {
"description": "Show me all my systems",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"summary": "Show me all my systems",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controllers.SystemsResponse"
}
}
}
}
},
"/api/patch/v1/systems/{inventory_id}": {
"get": {
"description": "Show me details about a system by given inventory id",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"summary": "Show me details about a system by given inventory id",
"parameters": [
{
"type": "string",
"description": "Inventory ID",
"name": "inventory_id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controllers.SystemDetailResponse"
}
}
}
}
},
"/api/patch/v1/systems/{inventory_id}/advisories": {
"get": {
"description": "Show me advisories for a system by given inventory id",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"summary": "Show me advisories for a system by given inventory id",
"parameters": [
{
"type": "string",
"description": "Inventory ID",
"name": "inventory_id",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/controllers.SystemAdvisoriesResponse"
}
}
}
}
}
},
"definitions": {
"controllers.AdvisoriesResponse": {
"type": "object",
"properties": {
"data": {
"description": "advisories items",
"type": "array",
"items": {
"$ref": "#/definitions/controllers.AdvisoryItem"
}
},
"links": {
"type": "object",
"$ref": "#/definitions/controllers.Links"
},
"meta": {
"type": "object",
"$ref": "#/definitions/controllers.AdvisoryMeta"
}
}
},
"controllers.AdvisoryDetailAttributes": {
"type": "object",
"properties": {
"cves": {
"type": "array",
"items": {
"type": "string"
}
},
"description": {
"type": "string"
},
"fixes": {
"type": "string"
},
"modified_date": {
"type": "string"
},
"public_date": {
"type": "string"
},
"references": {
"type": "array",
"items": {
"type": "string"
}
},
"severity": {
"type": "string"
},
"solution": {
"type": "string"
},
"synopsis": {
"type": "string"
},
"topic": {
"type": "string"
}
}
},
"controllers.AdvisoryDetailItem": {
"type": "object",
"properties": {
"attributes": {
"type": "object",
"$ref": "#/definitions/controllers.AdvisoryDetailAttributes"
},
"id": {
"type": "string"
},
"type": {
"type": "string"
}
}
},
"controllers.AdvisoryDetailResponse": {
"type": "object",
"properties": {
"data": {
"type": "object",
"$ref": "#/definitions/controllers.AdvisoryDetailItem"
}
}
},
"controllers.AdvisoryItem": {
"type": "object",
"properties": {
"attributes": {
"type": "object",
"$ref": "#/definitions/controllers.AdvisoryItemAttributes"
},
"id": {
"type": "string"
},
"type": {
"type": "string"
}
}
},
"controllers.AdvisoryItemAttributes": {
"type": "object",
"properties": {
"advisory_type": {
"type": "integer"
},
"applicable_systems": {
"type": "integer"
},
"description": {
"type": "string"
},
"public_date": {
"type": "string"
},
"severity": {
"type": "string"
},
"synopsis": {
"type": "string"
}
}
},
"controllers.AdvisoryMeta": {
"type": "object",
"properties": {
"data_format": {
"type": "string"
},
"filter": {
"type": "string"
},
"limit": {
"type": "integer"
},
"offset": {
"type": "integer"
},
"page": {
"type": "integer"
},
"page_size": {
"type": "integer"
},
"pages": {
"type": "integer"
},
"public_from": {
"type": "integer"
},
"public_to": {
"type": "integer"
},
"severity": {
"type": "string"
},
"show_all": {
"type": "boolean"
},
"sort": {
"type": "boolean"
},
"total_items": {
"type": "integer"
}
}
},
"controllers.AdvisorySystemsMeta": {
"type": "object",
"properties": {
"advisory": {
"type": "string"
},
"data_format": {
"type": "string"
},
"enabled": {
"type": "boolean"
},
"filter": {
"type": "string"
},
"limit": {
"type": "integer"
},
"offset": {
"type": "integer"
},
"page": {
"type": "integer"
},
"page_size": {
"type": "integer"
},
"pages": {
"type": "integer"
},
"total_items": {
"type": "integer"
}
}
},
"controllers.AdvisorySystemsResponse": {
"type": "object",
"properties": {
"data": {
"type": "array",
"items": {
"$ref": "#/definitions/controllers.SystemItem"
}
},
"links": {
"type": "object",
"$ref": "#/definitions/controllers.Links"
},
"meta": {
"type": "object",
"$ref": "#/definitions/controllers.AdvisorySystemsMeta"
}
}
},
"controllers.Links": {
"type": "object",
"properties": {
"first": {
"type": "string"
},
"last": {
"type": "string"
},
"next": {
"type": "string"
},
"previous": {
"type": "string"
}
}
},
"controllers.SystemAdvisoriesResponse": {
"type": "object",
"properties": {
"data": {
"description": "advisories items",
"type": "array",
"items": {
"$ref": "#/definitions/controllers.AdvisoryItem"
}
},
"links": {
"type": "object",
"$ref": "#/definitions/controllers.Links"
},
"meta": {
"type": "object",
"$ref": "#/definitions/controllers.AdvisoryMeta"
}
}
},
"controllers.SystemDetailResponse": {
"type": "object",
"properties": {
"data": {
"type": "object",
"$ref": "#/definitions/controllers.SystemItem"
}
}
},
"controllers.SystemItem": {
"type": "object",
"properties": {
"attributes": {
"type": "object",
"$ref": "#/definitions/controllers.SystemItemAttributes"
},
"id": {
"type": "string"
},
"type": {
"type": "string"
}
}
},
"controllers.SystemItemAttributes": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
},
"last_evaluation": {
"type": "string"
},
"last_upload": {
"type": "string"
},
"rhba_count": {
"type": "integer"
},
"rhea_count": {
"type": "integer"
},
"rhsa_count": {
"type": "integer"
}
}
},
"controllers.SystemsMeta": {
"type": "object",
"properties": {
"data_format": {
"type": "string"
},
"enabled": {
"type": "boolean"
},
"filter": {
"type": "string"
},
"limit": {
"type": "integer"
},
"offset": {
"type": "integer"
},
"page": {
"type": "integer"
},
"page_size": {
"type": "integer"
},
"pages": {
"type": "integer"
},
"total_items": {
"type": "integer"
}
}
},
"controllers.SystemsResponse": {
"type": "object",
"properties": {
"data": {
"type": "array",
"items": {
"$ref": "#/definitions/controllers.SystemItem"
}
},
"links": {
"type": "object",
"$ref": "#/definitions/controllers.Links"
},
"meta": {
"type": "object",
"$ref": "#/definitions/controllers.SystemsMeta"
}
}
}
}
}`
type swaggerInfo struct {
Version string
Host string
BasePath string
Schemes []string
Title string
Description string
}
// SwaggerInfo holds exported Swagger Info so clients can modify it
var SwaggerInfo = swaggerInfo{
Version: "",
Host: "",
BasePath: "",
Schemes: []string{},
Title: "",
Description: "",
}
type s struct{}
func (s *s) ReadDoc() string {
sInfo := SwaggerInfo
sInfo.Description = strings.Replace(sInfo.Description, "\n", "\\n", -1)
t, err := template.New("swagger_info").Funcs(template.FuncMap{
"marshal": func(v interface{}) string {
a, _ := json.Marshal(v)
return string(a)
},
}).Parse(doc)
if err != nil {
return doc
}
var tpl bytes.Buffer
if err := t.Execute(&tpl, sInfo); err != nil {
return doc
}
return tpl.String()
}
func init() {
swag.Register(swag.Name, &s{})
}
|
/*
* Copyright 2018-present Open Networking Foundation
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package device
import (
"context"
"strconv"
"github.com/gogo/protobuf/proto"
coreutils "github.com/opencord/voltha-go/rw_core/utils"
"github.com/opencord/voltha-lib-go/v4/pkg/log"
ofp "github.com/opencord/voltha-protos/v4/go/openflow_13"
"github.com/opencord/voltha-protos/v4/go/voltha"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// listDeviceGroups returns logical device flow groups
func (agent *Agent) listDeviceGroups() map[uint32]*ofp.OfpGroupEntry {
groupIDs := agent.groupLoader.ListIDs()
groups := make(map[uint32]*ofp.OfpGroupEntry, len(groupIDs))
for groupID := range groupIDs {
if groupHandle, have := agent.groupLoader.Lock(groupID); have {
groups[groupID] = groupHandle.GetReadOnly()
groupHandle.Unlock()
}
}
return groups
}
func (agent *Agent) addGroupsToAdapter(ctx context.Context, newGroups []*ofp.OfpGroupEntry, flowMetadata *voltha.FlowMetadata) (coreutils.Response, error) {
logger.Debugw(ctx, "add-groups-to-adapters", log.Fields{"device-id": agent.deviceID, "groups": newGroups, "flow-metadata": flowMetadata})
if (len(newGroups)) == 0 {
logger.Debugw(ctx, "nothing-to-update", log.Fields{"device-id": agent.deviceID, "groups": newGroups})
return coreutils.DoneResponse(), nil
}
device, err := agent.getDeviceReadOnly(ctx)
if err != nil {
return coreutils.DoneResponse(), status.Errorf(codes.Aborted, "%s", err)
}
dType, err := agent.adapterMgr.GetDeviceType(ctx, &voltha.ID{Id: device.Type})
if err != nil {
return coreutils.DoneResponse(), status.Errorf(codes.FailedPrecondition, "non-existent-device-type-%s", device.Type)
}
groupsToAdd := make([]*ofp.OfpGroupEntry, 0)
groupsToDelete := make([]*ofp.OfpGroupEntry, 0)
for _, group := range newGroups {
groupHandle, created, err := agent.groupLoader.LockOrCreate(ctx, group)
if err != nil {
return coreutils.DoneResponse(), err
}
if created {
groupsToAdd = append(groupsToAdd, group)
} else {
groupToChange := groupHandle.GetReadOnly()
if !proto.Equal(groupToChange, group) {
//Group needs to be updated.
if err := groupHandle.Update(ctx, group); err != nil {
groupHandle.Unlock()
return coreutils.DoneResponse(), status.Errorf(codes.Internal, "failure-updating-group-%s-to-device-%s", strconv.Itoa(int(group.Desc.GroupId)), agent.deviceID)
}
groupsToDelete = append(groupsToDelete, groupToChange)
groupsToAdd = append(groupsToAdd, group)
} else {
//No need to change the group. It is already exist.
logger.Debugw(ctx, "No-need-to-change-already-existing-group", log.Fields{"device-id": agent.deviceID, "group": newGroups, "flow-metadata": flowMetadata})
}
}
groupHandle.Unlock()
}
// Sanity check
if (len(groupsToAdd)) == 0 {
logger.Debugw(ctx, "no-groups-to-update", log.Fields{"device-id": agent.deviceID, "groups": newGroups})
return coreutils.DoneResponse(), nil
}
// Send update to adapters
subCtx, cancel := context.WithTimeout(log.WithSpanFromContext(context.Background(), ctx), agent.defaultTimeout)
response := coreutils.NewResponse()
if !dType.AcceptsAddRemoveFlowUpdates {
updatedAllGroups := agent.listDeviceGroups()
rpcResponse, err := agent.adapterProxy.UpdateFlowsBulk(subCtx, device, nil, updatedAllGroups, flowMetadata)
if err != nil {
cancel()
return coreutils.DoneResponse(), err
}
go agent.waitForAdapterFlowResponse(subCtx, cancel, rpcResponse, response)
} else {
flowChanges := &ofp.FlowChanges{
ToAdd: &voltha.Flows{Items: []*ofp.OfpFlowStats{}},
ToRemove: &voltha.Flows{Items: []*ofp.OfpFlowStats{}},
}
groupChanges := &ofp.FlowGroupChanges{
ToAdd: &voltha.FlowGroups{Items: groupsToAdd},
ToRemove: &voltha.FlowGroups{Items: groupsToDelete},
ToUpdate: &voltha.FlowGroups{Items: []*ofp.OfpGroupEntry{}},
}
rpcResponse, err := agent.adapterProxy.UpdateFlowsIncremental(subCtx, device, flowChanges, groupChanges, flowMetadata)
if err != nil {
cancel()
return coreutils.DoneResponse(), err
}
go agent.waitForAdapterFlowResponse(subCtx, cancel, rpcResponse, response)
}
return response, nil
}
func (agent *Agent) deleteGroupsFromAdapter(ctx context.Context, groupsToDel []*ofp.OfpGroupEntry, flowMetadata *voltha.FlowMetadata) (coreutils.Response, error) {
logger.Debugw(ctx, "delete-groups-from-adapter", log.Fields{"device-id": agent.deviceID, "groups": groupsToDel})
if (len(groupsToDel)) == 0 {
logger.Debugw(ctx, "nothing-to-delete", log.Fields{"device-id": agent.deviceID})
return coreutils.DoneResponse(), nil
}
device, err := agent.getDeviceReadOnly(ctx)
if err != nil {
return coreutils.DoneResponse(), status.Errorf(codes.Aborted, "%s", err)
}
dType, err := agent.adapterMgr.GetDeviceType(ctx, &voltha.ID{Id: device.Type})
if err != nil {
return coreutils.DoneResponse(), status.Errorf(codes.FailedPrecondition, "non-existent-device-type-%s", device.Type)
}
for _, group := range groupsToDel {
if groupHandle, have := agent.groupLoader.Lock(group.Desc.GroupId); have {
// Update the store and cache
if err := groupHandle.Delete(ctx); err != nil {
groupHandle.Unlock()
return coreutils.DoneResponse(), err
}
groupHandle.Unlock()
}
}
// Send update to adapters
subCtx, cancel := context.WithTimeout(log.WithSpanFromContext(context.Background(), ctx), agent.defaultTimeout)
response := coreutils.NewResponse()
if !dType.AcceptsAddRemoveFlowUpdates {
updatedAllGroups := agent.listDeviceGroups()
rpcResponse, err := agent.adapterProxy.UpdateFlowsBulk(subCtx, device, nil, updatedAllGroups, flowMetadata)
if err != nil {
cancel()
return coreutils.DoneResponse(), err
}
go agent.waitForAdapterFlowResponse(subCtx, cancel, rpcResponse, response)
} else {
flowChanges := &ofp.FlowChanges{
ToAdd: &voltha.Flows{Items: []*ofp.OfpFlowStats{}},
ToRemove: &voltha.Flows{Items: []*ofp.OfpFlowStats{}},
}
groupChanges := &ofp.FlowGroupChanges{
ToAdd: &voltha.FlowGroups{Items: []*ofp.OfpGroupEntry{}},
ToRemove: &voltha.FlowGroups{Items: groupsToDel},
ToUpdate: &voltha.FlowGroups{Items: []*ofp.OfpGroupEntry{}},
}
rpcResponse, err := agent.adapterProxy.UpdateFlowsIncremental(subCtx, device, flowChanges, groupChanges, flowMetadata)
if err != nil {
cancel()
return coreutils.DoneResponse(), err
}
go agent.waitForAdapterFlowResponse(subCtx, cancel, rpcResponse, response)
}
return response, nil
}
func (agent *Agent) updateGroupsToAdapter(ctx context.Context, updatedGroups []*ofp.OfpGroupEntry, flowMetadata *voltha.FlowMetadata) (coreutils.Response, error) {
logger.Debugw(ctx, "updateGroupsToAdapter", log.Fields{"device-id": agent.deviceID, "groups": updatedGroups})
if (len(updatedGroups)) == 0 {
logger.Debugw(ctx, "nothing-to-update", log.Fields{"device-id": agent.deviceID, "groups": updatedGroups})
return coreutils.DoneResponse(), nil
}
device, err := agent.getDeviceReadOnly(ctx)
if err != nil {
return coreutils.DoneResponse(), status.Errorf(codes.Aborted, "%s", err)
}
if device.OperStatus != voltha.OperStatus_ACTIVE || device.ConnectStatus != voltha.ConnectStatus_REACHABLE || device.AdminState != voltha.AdminState_ENABLED {
return coreutils.DoneResponse(), status.Errorf(codes.FailedPrecondition, "invalid device states-oper-%s-connect-%s-admin-%s", device.OperStatus, device.ConnectStatus, device.AdminState)
}
dType, err := agent.adapterMgr.GetDeviceType(ctx, &voltha.ID{Id: device.Type})
if err != nil {
return coreutils.DoneResponse(), status.Errorf(codes.FailedPrecondition, "non-existent-device-type-%s", device.Type)
}
groupsToUpdate := make([]*ofp.OfpGroupEntry, 0)
for _, group := range updatedGroups {
if groupHandle, have := agent.groupLoader.Lock(group.Desc.GroupId); have {
// Update the store and cache
if err := groupHandle.Update(ctx, group); err != nil {
groupHandle.Unlock()
return coreutils.DoneResponse(), err
}
groupsToUpdate = append(groupsToUpdate, group)
groupHandle.Unlock()
}
}
subCtx, cancel := context.WithTimeout(log.WithSpanFromContext(context.Background(), ctx), agent.defaultTimeout)
response := coreutils.NewResponse()
// Process bulk flow update differently than incremental update
if !dType.AcceptsAddRemoveFlowUpdates {
updatedAllGroups := agent.listDeviceGroups()
rpcResponse, err := agent.adapterProxy.UpdateFlowsBulk(subCtx, device, nil, updatedAllGroups, nil)
if err != nil {
cancel()
return coreutils.DoneResponse(), err
}
go agent.waitForAdapterFlowResponse(subCtx, cancel, rpcResponse, response)
} else {
logger.Debugw(ctx, "updating-groups",
log.Fields{
"device-id": agent.deviceID,
"groups-to-update": groupsToUpdate,
})
// Sanity check
if (len(groupsToUpdate)) == 0 {
logger.Debugw(ctx, "nothing-to-update", log.Fields{"device-id": agent.deviceID, "groups": groupsToUpdate})
cancel()
return coreutils.DoneResponse(), nil
}
flowChanges := &ofp.FlowChanges{
ToAdd: &voltha.Flows{Items: []*ofp.OfpFlowStats{}},
ToRemove: &voltha.Flows{Items: []*ofp.OfpFlowStats{}},
}
groupChanges := &ofp.FlowGroupChanges{
ToAdd: &voltha.FlowGroups{Items: []*ofp.OfpGroupEntry{}},
ToRemove: &voltha.FlowGroups{Items: []*ofp.OfpGroupEntry{}},
ToUpdate: &voltha.FlowGroups{Items: groupsToUpdate},
}
rpcResponse, err := agent.adapterProxy.UpdateFlowsIncremental(subCtx, device, flowChanges, groupChanges, flowMetadata)
if err != nil {
cancel()
return coreutils.DoneResponse(), err
}
go agent.waitForAdapterFlowResponse(subCtx, cancel, rpcResponse, response)
}
return response, nil
}
|
//go:generate go run github.com/alvaroloes/enumer -type InstallationPhase -output zz_generated_installationphase_enumer.go
package api
|
package service
type BaseService struct {
TrackableLogService
}
type TrackableLogService struct {
LogContext string
} |
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"net"
"net/http"
"net/http/pprof"
"os"
"os/signal"
"path/filepath"
"syscall"
"time"
"github.com/msiebuhr/ucs"
"github.com/msiebuhr/ucs/cache"
"github.com/msiebuhr/ucs/customflags"
"github.com/msiebuhr/ucs/frontend"
"github.com/namsral/flag"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
cacheBackend string
fsCacheBasepath string
HTTPAddress string
quota = customflags.NewSize(1024 * 1024 * 1024)
verbose bool
ports = &customflags.Namespaces{}
)
func init() {
flag.StringVar(&cacheBackend, "cache-backend", "fs", "Cache backend (fs or memory)")
flag.StringVar(&fsCacheBasepath, "cache-path", "./unity-cache", "Where FS cache should store data")
flag.StringVar(&HTTPAddress, "http-address", ":9126", "Address and port for HTTP metrics/admin interface")
flag.BoolVar(&verbose, "verbose", false, "Spew more info")
flag.Var(quota, "quota", "Storage quota (ex. 10GB, 1TB, ...)")
flag.Var(ports, "port", "Namespaces/ports to open (ex: zombie-zebras:5000) May be used multiple times")
}
func main() {
flag.Parse()
// Set a defalt port if the user doesn't set anything
if len(*ports) == 0 {
ports.Set("default:8126")
}
fsCacheBasepath, _ = filepath.Abs(fsCacheBasepath)
log.Printf(
"Starting quota=%s ports=%s httpAddress=%s fsCacheBasepath=%s\n",
quota, ports, HTTPAddress, fsCacheBasepath,
)
// Figure out a cache
var c cache.Cacher
switch cacheBackend {
case "fs":
var err error
c, err = cache.NewFS(func(f *cache.FS) {
f.Quota = quota.Int64()
f.Basepath = fsCacheBasepath
})
if err != nil {
panic(err)
}
case "memory":
c = cache.NewMemory(quota.Int64())
default:
// UNKNOWN BACKEND - BAIL/CRASH/QUIT
panic("Unknown backend " + cacheBackend)
}
// Create a server per namespace
servers := make([]*ucs.Server, 0, len(*ports))
for port, ns := range *ports {
server := ucs.NewServer(
func(s *ucs.Server) { s.Cache = c },
func(s *ucs.Server) {
if verbose {
s.Log = log.New(os.Stdout, "server: ", 0)
}
},
func(s *ucs.Server) { s.Namespace = ns },
)
servers = append(servers, server)
go func(port uint) {
err := server.Listen(context.Background(), fmt.Sprintf(":%d", port))
log.Fatalln("Listen:", err)
}(port)
}
// Set up web-server mux
mux := http.NewServeMux()
// Copy/paste from https://golang.org/src/net/http/pprof/pprof.go?s=6729:6767#L208
mux.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index))
mux.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline))
mux.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
mux.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
mux.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
mux.Handle("/metrics", promhttp.Handler())
mux.Handle("/", http.FileServer(frontend.FS(false)))
mux.HandleFunc("/api/info", func(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
// Figure out our IP
var ip net.IP
addrs, _ := net.InterfaceAddrs()
for _, addr := range addrs {
var i net.IP
switch v := addr.(type) {
case *net.IPNet:
i = v.IP
case *net.IPAddr:
i = v.IP
}
// Is this the right way to detect the IP?
if i.IsGlobalUnicast() {
ip = i
break
}
}
servers := map[string][]string{}
for port, ns := range *ports {
// Parse address to figure out what port/ip we're bound to
tcpAddr := net.TCPAddr{
IP: ip,
Port: int(port),
}
if _, ok := servers[ns]; !ok {
servers[ns] = []string{}
}
servers[ns] = append(servers[ns], tcpAddr.String())
}
data := struct {
QuotaBytes int64
Servers map[string][]string
CacheBackend string
}{
QuotaBytes: quota.Int64(),
Servers: servers,
CacheBackend: cacheBackend,
}
e := json.NewEncoder(w)
e.Encode(data)
})
// Create the web-server itself
h := &http.Server{Addr: HTTPAddress, Handler: mux}
// Start it
go func() {
if err := h.ListenAndServe(); err != nil {
log.Fatalln("ListenAndServe: ", err)
}
}()
// Handle SIGINT and SIGTERM.
ch := make(chan os.Signal)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
log.Println(<-ch)
// Stop web interface gracefully
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
h.Shutdown(ctx)
// Stop the service gracefully.
for _, server := range servers {
server.Stop()
}
}
|
package cpu
import "path/filepath"
import "testing"
func TestParsingInstructions(t *testing.T) {
absPath, _ := filepath.Abs("../../test_input.txt")
list, err := ParseInstructions(absPath)
if err != nil {
t.Errorf("Failed to parse test_input.txt :: %q", err)
} else {
if list.Len() != 4 {
t.Errorf("Parsed the wrong number of instructions from test_input.txt :: %d", list.Len())
}
expected := Instruction{"b", "inc", 5, "a", ">", 1}
if list.Front().Value != expected {
t.Errorf("Failed to parse first instruction correctly :: %q", list.Front().Value)
}
expected = Instruction{"c", "dec", -10, "a", ">=", 1}
if list.Front().Next().Next().Value != expected {
t.Errorf("Failed to parse first instruction correctly :: %q", list.Front().Value)
}
}
}
func TestRunningInstructions(t *testing.T) {
absPath, _ := filepath.Abs("../../test_input.txt")
list, _ := ParseInstructions(absPath)
registers := RunInstructions(list)
if registers["a"] != 1 {
t.Errorf("expected register a == 1, but saw %d", registers["a"])
}
if registers["b"] != 0 {
t.Errorf("expected register b == 0, but saw %d", registers["b"])
}
if registers["c"] != -10 {
t.Errorf("expected register c == -10, but saw %d", registers["c"])
}
}
|
package main
import (
"html/template"
"net/http"
"fmt"
)
var t*template.Template
func init(){
t=template.Must(template.ParseFiles("redirect1.gohtml"))
}
func main() {
http.HandleFunc("/",foo)
http.HandleFunc("/bar",bar)
http.HandleFunc("/barred",barred)
http.Handle("/favicon.ico",http.NotFoundHandler())
http.ListenAndServe(":8080",nil)
}
func foo(w http.ResponseWriter,r *http.Request){
fmt.Println("method(foo) is:",r.Method)
}
func bar(w http.ResponseWriter,r *http.Request){
fmt.Println("method (bar) is:",r.Method)
//http.Redirect(w,r,"/",303)
//(above 1 line) OR (below 2 lines)
w.Header().Set("Location","/") //redirect to foo
w.WriteHeader(http.StatusSeeOther) //redirects to foo with method GET due to code 303(see other)
//redirects without changing method if we use code 307 instead
}
func barred(w http.ResponseWriter,r *http.Request){
fmt.Println("method (barred) is:",r.Method)
t.ExecuteTemplate(w,"redirect1.gohtml",nil)
}
|
package testdata
import (
"github.com/frk/gosql"
"github.com/frk/gosql/internal/testdata/common"
)
type UpdateFromblockBasicSingleQuery struct {
User *common.User4 `rel:"test_user:u"`
From struct {
_ gosql.Relation `sql:"test_post:p"`
}
Where struct {
_ gosql.Column `sql:"u.id=p.user_id"`
_ gosql.Column `sql:"p.is_spam"`
}
}
|
package uuid
import "github.com/lithammer/shortuuid"
func NewString() string {
return shortuuid.New()
}
|
package lnroll
import (
"testing"
"github.com/apg/ln"
)
type mockClient struct {
C int
E int
}
func (c *mockClient) Critical(err error, extras map[string]string) (uuid string, e error) {
c.C++
return
}
func (c *mockClient) Error(err error, extras map[string]string) (uuid string, e error) {
c.E++
return
}
func TestFilter(t *testing.T) {
m := &mockClient{}
underTest := New(m)
log := &ln.Logger{
Pri: ln.PriError,
Filters: []ln.Filter{underTest},
}
log.Error(ln.F{"err": "Here's an error"})
if m.E == 0 {
t.Fatalf("Filter didn't fire on Error")
}
}
|
package prometheuscustomexporter
import (
"context"
metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
"github.com/orijtech/prometheus-go-metrics-exporter"
commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
)
type PrometheusContribExporter struct {
*prometheus.Exporter
}
// ExportMetric is the method that the exporter uses to convert OpenCensus Proto-Metrics to Prometheus metrics.
func (exp *PrometheusContribExporter) ExportMetric(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metric *metricspb.Metric) error {
if metric == nil || len(metric.Timeseries) == 0 {
return nil
}
for k, v := range rsc.Labels {
metric.MetricDescriptor.LabelKeys = append(metric.MetricDescriptor.LabelKeys, &metricspb.LabelKey{
Key: k,
})
metric.Timeseries[0].LabelValues = append(metric.Timeseries[0].LabelValues, &metricspb.LabelValue{
Value: v,
})
}
return exp.Exporter.ExportMetric(ctx, node, rsc, metric)
}
func NewPrometheusExporter(opts prometheus.Options) (*PrometheusContribExporter, error) {
p, _ := prometheus.New(opts)
return &PrometheusContribExporter{
Exporter: p,
}, nil
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package monitoring provides methods and types for managing monitoring GCP resources.
package alpha
import (
"bytes"
"context"
"io/ioutil"
"time"
"google.golang.org/api/googleapi"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager"
)
func equalsMetricDescriptorValueType(m, n *MetricDescriptorValueTypeEnum) bool {
mStr := dcl.ValueOrEmptyString(m)
if mStr == "" {
mStr = "STRING"
}
nStr := dcl.ValueOrEmptyString(n)
if nStr == "" {
nStr = "STRING"
}
return mStr == nStr
}
func equalsMetricDescriptorLabelsValueType(m, n *MetricDescriptorLabelsValueTypeEnum) bool {
mStr := dcl.ValueOrEmptyString(m)
if mStr == "" {
mStr = "STRING"
}
nStr := dcl.ValueOrEmptyString(n)
if nStr == "" {
nStr = "STRING"
}
return mStr == nStr
}
func canonicalizeMetricDescriptorValueType(m, n interface{}) bool {
if m == nil && n == nil {
return true
}
mVal, _ := m.(*MetricDescriptorValueTypeEnum)
nVal, _ := n.(*MetricDescriptorValueTypeEnum)
return equalsMetricDescriptorValueType(mVal, nVal)
}
func canonicalizeMetricDescriptorLabelsValueType(m, n interface{}) bool {
if m == nil && n == nil {
return true
}
mVal, _ := m.(*MetricDescriptorLabelsValueTypeEnum)
nVal, _ := n.(*MetricDescriptorLabelsValueTypeEnum)
return equalsMetricDescriptorLabelsValueType(mVal, nVal)
}
// GetMonitoredProject is a custom method because projects are returned as project numbers instead of project ids.
func (c *Client) GetMonitoredProject(ctx context.Context, r *MonitoredProject) (*MonitoredProject, error) {
ctx, cancel := context.WithTimeout(ctx, c.Config.TimeoutOr(0*time.Second))
defer cancel()
b, err := c.getMonitoredProjectRaw(ctx, r)
if err != nil {
if dcl.IsNotFound(err) {
return nil, &googleapi.Error{
Code: 404,
Message: err.Error(),
}
}
return nil, err
}
result, err := unmarshalMonitoredProject(b, c, r)
if err != nil {
return nil, err
}
result.MetricsScope = r.MetricsScope
result.Name = r.Name
c.Config.Logger.Infof("Retrieved raw result state: %v", result)
c.Config.Logger.Infof("Canonicalizing with specified state: %v", r)
result, err = canonicalizeMonitoredProjectNewState(c, result, r)
if err != nil {
return nil, err
}
c.Config.Logger.Infof("Created result state: %v", result)
return result, nil
}
func (c *Client) getMonitoredProjectRaw(ctx context.Context, r *MonitoredProject) ([]byte, error) {
u, err := r.getURL(c.Config.BasePath)
if err != nil {
return nil, err
}
resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider)
if err != nil {
return nil, err
}
defer resp.Response.Body.Close()
b, err := ioutil.ReadAll(resp.Response.Body)
if err != nil {
return nil, err
}
b, err = dcl.ExtractElementFromList(b, "monitoredProjects", r.customMatcher(ctx, c))
if err != nil {
return nil, err
}
return b, nil
}
// This resource has a custom matcher to do a lookup and convert between project ids and project numbers.
func (r *MonitoredProject) customMatcher(ctx context.Context, c *Client) func([]byte) bool {
return func(b []byte) bool {
cr, err := unmarshalMonitoredProject(b, c, r)
if err != nil {
c.Config.Logger.Warning("Failed to unmarshal provided resource in matcher.")
return false
}
// URL Normalize both resources to compare only the short names.
nr := r.urlNormalized()
ncr := cr.urlNormalized()
if nr.Name == nil && ncr.Name == nil {
c.Config.Logger.Info("Both Name fields null - considering equal.")
return true
}
if nr.Name == nil || ncr.Name == nil {
c.Config.Logger.Info("Only one Name field is null - considering unequal.")
return false
}
// Create a client with an empty base path so that it doesn't inherit the base path from the
// monitoring client.
cloudresourcemanagerCl := cloudresourcemanager.NewClient(c.Config.Clone(dcl.WithBasePath("")))
project, err := cloudresourcemanagerCl.GetProject(ctx, &cloudresourcemanager.Project{
Name: nr.Name,
})
if err != nil {
c.Config.Logger.Warningf("Could not look up project %s", *nr.Name)
return false
}
projectNumber := dcl.ValueOrEmptyString(project.ProjectNumber)
c.Config.Logger.Infof("Attempting to match %v with %v.", projectNumber, ncr.Name)
return projectNumber == *ncr.Name
}
}
|
package upstream
import (
"context"
"fmt"
"strconv"
"sync/atomic"
"testing"
"time"
v2 "github.com/envoyproxy/go-control-plane/envoy/api/v2"
core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
discoveryv3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"github.com/envoyproxy/go-control-plane/pkg/resource/v2"
resourcev3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3"
"github.com/envoyproxy/xds-relay/internal/app/transport"
"github.com/envoyproxy/xds-relay/internal/pkg/log"
"github.com/envoyproxy/xds-relay/internal/pkg/stats"
"github.com/stretchr/testify/assert"
"github.com/uber-go/tally"
"go.uber.org/goleak"
"google.golang.org/genproto/googleapis/rpc/status"
)
func TestMain(m *testing.M) {
defer goleak.VerifyTestMain(m)
}
func TestOpenStreamShouldReturnErrorForInvalidTypeUrl(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
client := createMockClient(ctx)
respCh, done := client.OpenStream(transport.NewRequestV2(&v2.DiscoveryRequest{}), "aggregated_key")
defer done()
_, ok := <-respCh
assert.False(t, ok)
}
func TestOpenStreamShouldReturnErrorForInvalidTypeUrlV3(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
client := createMockClientV3(ctx)
respCh, done := client.OpenStream(transport.NewRequestV3(&discoveryv3.DiscoveryRequest{}), "aggregated_key")
defer done()
_, ok := <-respCh
assert.False(t, ok)
}
func TestOpenStreamShouldRetryOnStreamCreationFailure(t *testing.T) {
scope := stats.NewMockScope("mock")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
client := createMockClientWithError(ctx, scope)
typeURLs := map[string][]string{
resource.ListenerType: {"mock.lds.stream_failure+key=aggregated_key", "mock.lds.stream_opened+key=aggregated_key"},
resource.ClusterType: {"mock.cds.stream_failure+key=aggregated_key", "mock.cds.stream_opened+key=aggregated_key"},
resource.RouteType: {"mock.rds.stream_failure+key=aggregated_key", "mock.rds.stream_opened+key=aggregated_key"},
resource.EndpointType: {"mock.eds.stream_failure+key=aggregated_key", "mock.eds.stream_opened+key=aggregated_key"},
}
for url, stats := range typeURLs {
t.Run(url, func(t *testing.T) {
respCh, done := client.OpenStream(
transport.NewRequestV2(&v2.DiscoveryRequest{
TypeUrl: url,
Node: &core.Node{},
}), "aggregated_key")
assert.NotNil(t, respCh)
for {
if v, ok := scope.Snapshot().Counters()[stats[0]]; ok && v.Value() == 1 {
break
}
}
for {
if v, ok := scope.Snapshot().Counters()[stats[1]]; ok && v.Value() != 0 {
break
}
}
done()
blockUntilClean(respCh, func() {})
})
}
}
func TestOpenStreamShouldRetryOnStreamCreationFailureV3(t *testing.T) {
scope := stats.NewMockScope("mock")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
client := createMockClientWithErrorV3(ctx, scope)
typeURLs := map[string][]string{
resourcev3.ListenerType: {"mock.lds.stream_failure+key=aggregated_key", "mock.lds.stream_opened+key=aggregated_key"},
resourcev3.ClusterType: {"mock.cds.stream_failure+key=aggregated_key", "mock.cds.stream_opened+key=aggregated_key"},
resourcev3.RouteType: {"mock.rds.stream_failure+key=aggregated_key", "mock.rds.stream_opened+key=aggregated_key"},
resourcev3.EndpointType: {"mock.eds.stream_failure+key=aggregated_key", "mock.eds.stream_opened+key=aggregated_key"},
}
for url, stats := range typeURLs {
t.Run(url, func(t *testing.T) {
respCh, done := client.OpenStream(
transport.NewRequestV3(&discoveryv3.DiscoveryRequest{
TypeUrl: url,
Node: &corev3.Node{},
}), "aggregated_key")
assert.NotNil(t, respCh)
for {
if v, ok := scope.Snapshot().Counters()[stats[0]]; ok && v.Value() == 1 {
break
}
}
for {
if v, ok := scope.Snapshot().Counters()[stats[1]]; ok && v.Value() != 0 {
break
}
}
done()
blockUntilClean(respCh, func() {})
})
}
}
func TestOpenStreamShouldReturnNonEmptyResponseChannel(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
client := createMockClient(ctx)
respCh, done := client.OpenStream(
transport.NewRequestV2(&v2.DiscoveryRequest{
TypeUrl: resource.ListenerType,
Node: &core.Node{},
}), "aggregated_key")
assert.NotNil(t, respCh)
done()
cancel()
blockUntilClean(respCh, func() {})
}
func TestOpenStreamShouldReturnNonEmptyResponseChannelV3(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
client := createMockClientV3(ctx)
respCh, done := client.OpenStream(
transport.NewRequestV3(&discoveryv3.DiscoveryRequest{
TypeUrl: resourcev3.ListenerType,
Node: &corev3.Node{},
}), "aggregated_key")
assert.NotNil(t, respCh)
done()
cancel()
blockUntilClean(respCh, func() {})
}
func TestOpenStreamShouldSendTheFirstRequestToOriginServer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
var message *v2.DiscoveryRequest
responseChan := make(chan *v2.DiscoveryResponse)
wait := make(chan bool)
var first int32 = 0
client := NewMock(
ctx,
CallOptions{SendTimeout: time.Nanosecond},
nil,
responseChan,
responseChan,
responseChan,
responseChan,
func(m interface{}) error {
message = m.(*v2.DiscoveryRequest)
if atomic.CompareAndSwapInt32(&first, 0, 1) {
close(wait)
}
return nil
},
stats.NewMockScope("mock"),
0,
)
node := &core.Node{}
resp, done := client.OpenStream(
transport.NewRequestV2(&v2.DiscoveryRequest{
TypeUrl: resource.ListenerType,
Node: node,
}), "aggregated_key")
<-wait
assert.NotNil(t, message)
assert.Equal(t, message.GetNode(), node)
assert.Equal(t, message.GetTypeUrl(), resource.ListenerType)
done()
cancel()
blockUntilClean(resp, func() {})
}
func TestOpenStreamShouldSendTheFirstRequestToOriginServerV3(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
var message *discoveryv3.DiscoveryRequest
responseChan := make(chan *discoveryv3.DiscoveryResponse)
wait := make(chan bool)
first := true
client := NewMockV3(
ctx,
CallOptions{SendTimeout: time.Nanosecond},
nil,
responseChan,
responseChan,
responseChan,
responseChan,
func(m interface{}) error {
message = m.(*discoveryv3.DiscoveryRequest)
if first {
close(wait)
first = false
}
return nil
},
stats.NewMockScope("mock"),
0,
)
node := &corev3.Node{}
resp, done := client.OpenStream(
transport.NewRequestV3(&discoveryv3.DiscoveryRequest{
TypeUrl: resourcev3.ListenerType,
Node: node,
}), "aggregated_key")
<-wait
assert.NotNil(t, message)
assert.Equal(t, message.GetNode(), node)
assert.Equal(t, message.GetTypeUrl(), resourcev3.ListenerType)
done()
cancel()
blockUntilClean(resp, func() {})
}
func TestOpenStreamShouldClearNackFromRequestInTheFirstRequestToOriginServer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
var message *v2.DiscoveryRequest
responseChan := make(chan *v2.DiscoveryResponse)
wait := make(chan bool)
first := true
client := NewMock(
ctx,
CallOptions{SendTimeout: time.Nanosecond},
nil,
responseChan,
responseChan,
responseChan,
responseChan,
func(m interface{}) error {
message = m.(*v2.DiscoveryRequest)
if first {
close(wait)
first = false
}
return nil
},
stats.NewMockScope("mock"),
0,
)
node := &core.Node{}
resp, done := client.OpenStream(
transport.NewRequestV2(&v2.DiscoveryRequest{
TypeUrl: resource.ListenerType,
Node: node,
ErrorDetail: &status.Status{Message: "message", Code: 1},
}), "aggregated_key")
<-wait
assert.NotNil(t, message)
assert.Equal(t, message.GetNode(), node)
assert.Equal(t, message.GetTypeUrl(), resource.ListenerType)
assert.Nil(t, message.GetErrorDetail())
done()
cancel()
blockUntilClean(resp, func() {})
}
func TestOpenStreamShouldClearNackFromRequestInTheFirstRequestToOriginServerV3(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
var message *discoveryv3.DiscoveryRequest
responseChan := make(chan *discoveryv3.DiscoveryResponse)
wait := make(chan bool)
first := true
client := NewMockV3(
ctx,
CallOptions{SendTimeout: time.Nanosecond},
nil,
responseChan,
responseChan,
responseChan,
responseChan,
func(m interface{}) error {
message = m.(*discoveryv3.DiscoveryRequest)
if first {
close(wait)
first = false
}
return nil
},
stats.NewMockScope("mock"),
0,
)
node := &corev3.Node{}
resp, done := client.OpenStream(
transport.NewRequestV3(&discoveryv3.DiscoveryRequest{
TypeUrl: resourcev3.ListenerType,
Node: node,
ErrorDetail: &status.Status{Message: "message", Code: 1},
}), "aggregated_key")
<-wait
assert.NotNil(t, message)
assert.Equal(t, message.GetNode(), node)
assert.Equal(t, message.GetTypeUrl(), resourcev3.ListenerType)
assert.Nil(t, message.GetErrorDetail())
done()
cancel()
blockUntilClean(resp, func() {})
}
func TestOpenStreamShouldRetryIfSendFails(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
responseChan := make(chan *v2.DiscoveryResponse)
sendError := fmt.Errorf("")
errResp := true
response := &v2.DiscoveryResponse{}
scope := stats.NewMockScope("mock")
client := createMockClientWithResponse(ctx, time.Second, responseChan, func(m interface{}) error {
if errResp {
errResp = false
return sendError
}
select {
case <-ctx.Done():
return nil
default:
responseChan <- response
return nil
}
}, scope, 0)
resp, done := client.OpenStream(
transport.NewRequestV2(&v2.DiscoveryRequest{
TypeUrl: resource.ListenerType,
Node: &core.Node{},
}), "aggregated_key")
defer done()
_, more := <-resp
assert.True(t, more)
assert.Equal(t, int64(1), scope.Snapshot().Counters()["mock.lds.stream_retry+key=aggregated_key"].Value())
done()
cancel()
blockUntilClean(resp, func() {
close(responseChan)
})
}
func TestStreamShouldRetryWhenTimeoutMet(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
responseChan := make(chan *v2.DiscoveryResponse)
scope := stats.NewMockScope("mock")
client := createMockClientWithResponse(ctx, time.Second, responseChan, func(m interface{}) error {
return nil
}, scope, 1)
_, done := client.OpenStream(
transport.NewRequestV2(&v2.DiscoveryRequest{
TypeUrl: resource.ListenerType,
Node: &core.Node{},
}), "aggregated_key")
defer done()
for start := time.Now(); ; {
var val int64 = 0
if scope.Snapshot().Counters() != nil &&
scope.Snapshot().Counters()["mock.lds.stream_retry+key=aggregated_key"] != nil {
val = scope.Snapshot().Counters()["mock.lds.stream_retry+key=aggregated_key"].Value()
}
if val > 0 {
break
}
if time.Since(start) >= time.Second*5 {
t.Fail()
break
}
}
cancel()
}
func TestStreamShouldRetryWhenTimeoutMetV3(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
responseChan := make(chan *discoveryv3.DiscoveryResponse)
scope := stats.NewMockScope("mock")
client := createMockClientWithResponseV3(ctx, time.Second, responseChan, func(m interface{}) error {
return nil
}, scope, 1)
_, done := client.OpenStream(
transport.NewRequestV3(&discoveryv3.DiscoveryRequest{
TypeUrl: resourcev3.ListenerType,
Node: &corev3.Node{},
}), "aggregated_key")
defer done()
for start := time.Now(); ; {
var val int64 = 0
if scope.Snapshot().Counters() != nil &&
scope.Snapshot().Counters()["mock.lds.stream_retry+key=aggregated_key"] != nil {
val = scope.Snapshot().Counters()["mock.lds.stream_retry+key=aggregated_key"].Value()
}
if val > 0 {
break
}
if time.Since(start) >= time.Second*5 {
t.Fail()
break
}
}
cancel()
}
func TestOpenStreamShouldRetryIfSendFailsV3(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
responseChan := make(chan *discoveryv3.DiscoveryResponse)
sendError := fmt.Errorf("")
errResp := true
response := &discoveryv3.DiscoveryResponse{}
scope := stats.NewMockScope("mock")
client := createMockClientWithResponseV3(ctx, time.Second, responseChan, func(m interface{}) error {
if errResp {
errResp = false
return sendError
}
select {
case <-ctx.Done():
return nil
default:
responseChan <- response
return nil
}
}, scope, 0)
resp, done := client.OpenStream(
transport.NewRequestV3(&discoveryv3.DiscoveryRequest{
TypeUrl: resourcev3.ListenerType,
Node: &corev3.Node{},
}), "aggregated_key")
_, more := <-resp
assert.True(t, more)
assert.Equal(t, int64(1), scope.Snapshot().Counters()["mock.lds.stream_retry+key=aggregated_key"].Value())
done()
cancel()
blockUntilClean(resp, func() {
close(responseChan)
})
}
func TestOpenStreamShouldSendTheResponseOnTheChannel(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
responseChan := make(chan *v2.DiscoveryResponse)
response := &v2.DiscoveryResponse{}
client := createMockClientWithResponse(ctx, time.Second, responseChan, func(m interface{}) error {
select {
case <-ctx.Done():
return nil
default:
responseChan <- response
return nil
}
}, stats.NewMockScope("mock"), 0)
resp, done := client.OpenStream(
transport.NewRequestV2(&v2.DiscoveryRequest{
TypeUrl: resource.ListenerType,
Node: &core.Node{},
}), "aggregated_key")
assert.NotNil(t, resp)
val := <-resp
assert.Equal(t, val.Get().V2, response)
done()
cancel()
blockUntilClean(resp, func() {
close(responseChan)
})
}
func TestOpenStreamShouldSendTheResponseOnTheChannelV3(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
responseChan := make(chan *discoveryv3.DiscoveryResponse)
response := &discoveryv3.DiscoveryResponse{}
client := createMockClientWithResponseV3(ctx, time.Second, responseChan, func(m interface{}) error {
select {
case <-ctx.Done():
return nil
default:
responseChan <- response
return nil
}
}, stats.NewMockScope("mock"), 0)
resp, done := client.OpenStream(
transport.NewRequestV3(&discoveryv3.DiscoveryRequest{
TypeUrl: resourcev3.ListenerType,
Node: &corev3.Node{},
}), "aggregated_key")
assert.NotNil(t, resp)
val := <-resp
assert.Equal(t, val.Get().V3, response)
done()
cancel()
blockUntilClean(resp, func() {
close(responseChan)
})
}
func TestOpenStreamShouldSendTheNextRequestWithUpdatedVersionAndNonce(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
responseChan := make(chan *v2.DiscoveryResponse)
lastAppliedVersion := ""
index := 0
client := createMockClientWithResponse(ctx, time.Second, responseChan, func(m interface{}) error {
select {
case <-ctx.Done():
return nil
default:
}
message := m.(*v2.DiscoveryRequest)
assert.Equal(t, message.GetVersionInfo(), lastAppliedVersion)
assert.Equal(t, message.GetResponseNonce(), lastAppliedVersion)
response := &v2.DiscoveryResponse{
VersionInfo: strconv.Itoa(index),
Nonce: strconv.Itoa(index),
TypeUrl: resource.ListenerType,
}
lastAppliedVersion = strconv.Itoa(index)
index++
select {
case responseChan <- response:
case <-ctx.Done():
return nil
}
return nil
}, stats.NewMockScope("mock"), 0)
resp, done := client.OpenStream(
transport.NewRequestV2(&v2.DiscoveryRequest{
TypeUrl: resource.ListenerType,
Node: &core.Node{},
}), "aggregated_key")
defer done()
assert.NotNil(t, resp)
for i := 0; i < 5; i++ {
val := <-resp
assert.Equal(t, val.GetPayloadVersion(), strconv.Itoa(i))
assert.Equal(t, val.GetNonce(), strconv.Itoa(i))
}
done()
cancel()
blockUntilClean(resp, func() {
close(responseChan)
})
}
func TestOpenStreamShouldSendTheNextRequestWithUpdatedVersionAndNonceV3(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
responseChan := make(chan *discoveryv3.DiscoveryResponse)
lastAppliedVersion := ""
index := 0
client := createMockClientWithResponseV3(ctx, time.Second, responseChan, func(m interface{}) error {
select {
case <-ctx.Done():
return nil
default:
}
message := m.(*discoveryv3.DiscoveryRequest)
assert.Equal(t, message.GetVersionInfo(), lastAppliedVersion)
assert.Equal(t, message.GetResponseNonce(), lastAppliedVersion)
response := &discoveryv3.DiscoveryResponse{
VersionInfo: strconv.Itoa(index),
Nonce: strconv.Itoa(index),
TypeUrl: resource.ListenerType,
}
lastAppliedVersion = strconv.Itoa(index)
index++
select {
case responseChan <- response:
case <-ctx.Done():
return nil
}
return nil
}, stats.NewMockScope("mock"), 0)
resp, done := client.OpenStream(
transport.NewRequestV3(&discoveryv3.DiscoveryRequest{
TypeUrl: resourcev3.ListenerType,
Node: &corev3.Node{},
}), "aggregated_key")
assert.NotNil(t, resp)
for i := 0; i < 5; i++ {
val := <-resp
assert.Equal(t, val.GetPayloadVersion(), strconv.Itoa(i))
assert.Equal(t, val.GetNonce(), strconv.Itoa(i))
}
done()
cancel()
blockUntilClean(resp, func() {
close(responseChan)
})
}
func TestOpenStreamShouldRetryWhenSendMsgBlocks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
responseChan := make(chan *v2.DiscoveryResponse)
var first int32 = 0
response2 := &v2.DiscoveryResponse{VersionInfo: "2"}
client := createMockClientWithResponse(ctx, time.Nanosecond, responseChan, func(m interface{}) error {
if atomic.CompareAndSwapInt32(&first, 0, 1) {
<-ctx.Done()
return nil
}
select {
case <-ctx.Done():
return nil
default:
responseChan <- response2
return nil
}
}, stats.NewMockScope("mock"), 0)
respCh, done := client.OpenStream(transport.NewRequestV2(&v2.DiscoveryRequest{
TypeUrl: resource.ListenerType,
Node: &core.Node{},
}), "aggregated_key")
resp, ok := <-respCh
assert.True(t, ok)
assert.Equal(t, resp.Get().V2.VersionInfo, response2.VersionInfo)
done()
cancel()
blockUntilClean(respCh, func() {})
}
func TestOpenStreamShouldRetryWhenSendMsgBlocksV3(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
responseChan := make(chan *discoveryv3.DiscoveryResponse)
var first int32 = 0
response2 := &discoveryv3.DiscoveryResponse{VersionInfo: "2"}
client := createMockClientWithResponseV3(ctx, time.Nanosecond, responseChan, func(m interface{}) error {
if atomic.CompareAndSwapInt32(&first, 0, 1) {
<-ctx.Done()
return nil
}
select {
case <-ctx.Done():
return nil
default:
responseChan <- response2
return nil
}
}, stats.NewMockScope("mock"), 0)
respCh, done := client.OpenStream(transport.NewRequestV3(&discoveryv3.DiscoveryRequest{
TypeUrl: resourcev3.ListenerType,
Node: &corev3.Node{},
}), "aggregated_key")
resp, ok := <-respCh
assert.True(t, ok)
assert.Equal(t, response2.VersionInfo, resp.Get().V3.VersionInfo)
done()
cancel()
blockUntilClean(respCh, func() {})
}
func TestKeepaliveSettingsUnset(t *testing.T) {
params := getKeepaliveParams(context.Background(), log.MockLogger, CallOptions{})
assert.Equal(t, 5*time.Minute, params.Time)
assert.Equal(t, 0*time.Second, params.Timeout)
assert.True(t, params.PermitWithoutStream)
}
func TestKeepaliveSettingsSet(t *testing.T) {
params := getKeepaliveParams(context.Background(), log.MockLogger, CallOptions{
UpstreamKeepaliveTimeout: "10m",
})
assert.Equal(t, 10*time.Minute, params.Time)
assert.Equal(t, 0*time.Second, params.Timeout)
assert.True(t, params.PermitWithoutStream)
}
func createMockClient(ctx context.Context) Client {
return NewMock(
ctx,
CallOptions{SendTimeout: time.Nanosecond},
nil,
make(chan *v2.DiscoveryResponse),
make(chan *v2.DiscoveryResponse),
make(chan *v2.DiscoveryResponse),
make(chan *v2.DiscoveryResponse),
func(m interface{}) error { return nil },
stats.NewMockScope("mock"),
0)
}
func createMockClientWithError(ctx context.Context, scope tally.Scope) Client {
return NewMock(
ctx,
CallOptions{SendTimeout: time.Nanosecond},
[]error{fmt.Errorf("error")},
make(chan *v2.DiscoveryResponse),
make(chan *v2.DiscoveryResponse),
make(chan *v2.DiscoveryResponse),
make(chan *v2.DiscoveryResponse),
func(m interface{}) error { return nil },
scope,
0)
}
func createMockClientWithResponse(
ctx context.Context,
t time.Duration,
r chan *v2.DiscoveryResponse,
sendCb func(m interface{}) error,
scope tally.Scope,
timeout int64) Client {
return NewMock(ctx, CallOptions{SendTimeout: t}, nil, r, r, r, r, sendCb, scope, timeout)
}
func createMockClientV3(ctx context.Context) Client {
return NewMockV3(
ctx,
CallOptions{SendTimeout: time.Nanosecond},
nil,
make(chan *discoveryv3.DiscoveryResponse),
make(chan *discoveryv3.DiscoveryResponse),
make(chan *discoveryv3.DiscoveryResponse),
make(chan *discoveryv3.DiscoveryResponse),
func(m interface{}) error { return nil },
stats.NewMockScope("mock"),
0)
}
func createMockClientWithErrorV3(ctx context.Context, scope tally.Scope) Client {
return NewMockV3(
ctx,
CallOptions{SendTimeout: time.Nanosecond},
[]error{fmt.Errorf("error")},
make(chan *discoveryv3.DiscoveryResponse),
make(chan *discoveryv3.DiscoveryResponse),
make(chan *discoveryv3.DiscoveryResponse),
make(chan *discoveryv3.DiscoveryResponse),
func(m interface{}) error { return nil },
scope,
0)
}
func createMockClientWithResponseV3(
ctx context.Context,
t time.Duration,
r chan *discoveryv3.DiscoveryResponse,
sendCb func(m interface{}) error,
scope tally.Scope,
timeout int64) Client {
return NewMockV3(ctx, CallOptions{SendTimeout: t}, nil, r, r, r, r, sendCb, scope, timeout)
}
func blockUntilClean(resp <-chan transport.Response, tearDown func()) {
for range resp {
}
tearDown()
}
|
package main
func addStrings(num1 string, num2 string) string {
sum := make([]byte, 0)
i, j, carry := len(num1)-1, len(num2)-1, 0
for i >= 0 || j >= 0 || carry != 0 {
if i >= 0 {
carry += int(num1[i] - '0')
i--
}
if j >= 0 {
carry += int(num2[j] - '0')
j--
}
sum = append(sum, byte(carry%10+'0'))
carry /= 10
}
reverse(sum, 0, len(sum)-1)
return string(sum)
}
func reverse(bytes []byte, l, r int) {
for i := 0; i < (r-l+1)/2; i++ {
bytes[l+i], bytes[r-i] = bytes[r-i], bytes[l+i]
}
}
/*
总结
1. 这是看了评论区大佬后写出来的代码,这个模板很优雅,而且还可以用到链表相加中。
*/
|
package main
import "fmt"
func main() {
var list1 = ListNode{10, nil}
var list2 = ListNode{101, &list1}
var list3 = ListNode{110, &list2}
var list4 = ListNode{120, &list3}
//120 110 101 10
//101 10 120 110
ret := rotateRight(&list4, 3)
fmt.Println(ret.Val)
fmt.Println(ret.Next.Val)
fmt.Println(ret.Next.Next.Val)
fmt.Println(ret.Next.Next.Next.Val)
}
//Definition for singly-linked list.
type ListNode struct {
Val int
Next *ListNode
}
func rotateRight(head *ListNode, k int) *ListNode {
len := len(head)
if len == 0 {
return head
}
if k >= len {
k = k % len
}
if k == 0 {
return head
}
var endNew = head
var startNew = head
//1.将startNew往前移动k个元素
var i int
for i < k {
startNew = startNew.Next
i++
}
//再同时移动endNew和startNew,当startNew到结尾时(startNew.Next == nil),则得到新的链表头尾
for startNew.Next != nil {
startNew = startNew.Next
endNew = endNew.Next
}
//对startNew进行Next指向旧head
startNew.Next = head
//对endNew进行next的绑定解除
ret := endNew.Next
endNew.Next = nil
//返回新head
return ret
}
//首先要计算链表的长度
func len(head *ListNode) int {
var len int
for head != nil {
head = head.Next
len++
}
return len
}
//旋转链表
//给你一个链表的头节点 head ,旋转链表,将链表每个节点向右移动 k 个位置。
//f(n) = f(n-1).Next
//分析思路,向右移动k个,因为是链表,所以,从右起第k个即是新头,找到新头
|
/*
* Copyright (c) 2014 Michael Wendland
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Michael Wendland <michael@michiwend.com>
*/
/*
package goplaceholder implements a simple library to generate placeholder
images using freetype-go.
*/
package goplaceholder
import (
"errors"
"image"
"image/color"
"image/draw"
"io/ioutil"
"math"
"strconv"
"code.google.com/p/freetype-go/freetype"
"code.google.com/p/freetype-go/freetype/raster"
)
const (
maxTextBoundsToImageRatioY = 0.23
maxTextBoundsToImageRatioX = 0.64
dpi = 72.00
testFontSize = 1.00 // use heigher values (>=100) when hinting enabled
)
// Placeholder returns a placeholder image with the given text or, if text was
// an empty string, with the image bounds in the form "800x600".
func Placeholder(text, ttfPath string, foreground, background color.RGBA, width, height int) (image.Image, error) {
if width < 0 || height < 0 {
return nil, errors.New("negative values not allowed")
}
if width == 0 && height == 0 {
return nil, errors.New("either width or height needs to be > 0")
}
if width == 0 {
width = height
} else if height == 0 {
height = width
}
if text == "" {
text = strconv.Itoa(width) + " x " + strconv.Itoa(height)
}
fontBytes, err := ioutil.ReadFile(ttfPath)
if err != nil {
return nil, err
}
font, err := freetype.ParseFont(fontBytes)
if err != nil {
return nil, err
}
fg_img := image.NewUniform(foreground)
bg_img := image.NewUniform(background)
testImg := image.NewRGBA(image.Rect(0, 0, 0, 0))
c := freetype.NewContext()
c.SetDPI(dpi)
c.SetFont(font)
c.SetFontSize(testFontSize)
c.SetSrc(fg_img)
c.SetDst(testImg)
c.SetClip(testImg.Bounds())
c.SetHinting(freetype.NoHinting)
// first draw with testFontSize to get the text extent
var textExtent raster.Point
drawPoint := freetype.Pt(0, int(c.PointToFix32(testFontSize)>>8))
textExtent, err = c.DrawString(text, drawPoint)
if err != nil {
return nil, err
}
// calculate font scales to stay within the bounds
scaleX := float64(c.PointToFix32(float64(width)*maxTextBoundsToImageRatioX)) / float64(textExtent.X)
scaleY := float64(c.PointToFix32(float64(height)*maxTextBoundsToImageRatioY)) / float64(textExtent.Y)
fontsize := testFontSize * math.Min(scaleX, scaleY)
// draw with scaled fontsize to get the real text extent. This could also be
// done by scaling up the textExtent from the previous drawing but it's less
// precise.
c.SetFontSize(fontsize)
drawPoint = freetype.Pt(0, 0)
textExtent, err = c.DrawString(text, drawPoint)
if err != nil {
return nil, err
}
// finally draw the centered text
drawPoint = freetype.Pt(
int(c.PointToFix32(float64(width)/2.0)-textExtent.X/2)>>8,
int(c.PointToFix32(float64(height)/2.0+fontsize/2.6))>>8)
img := image.NewRGBA(image.Rect(0, 0, width, height))
draw.Draw(img, img.Bounds(), bg_img, image.ZP, draw.Src)
c.SetDst(img)
c.SetClip(img.Bounds())
_, err = c.DrawString(text, drawPoint)
if err != nil {
return nil, err
}
return img, nil
}
|
package usecase
import (
"HttpBigFilesServer/MainApplication/internal/files/model"
"HttpBigFilesServer/MainApplication/internal/files/repository"
"HttpBigFilesServer/MainApplication/pkg/logger"
"io"
"os"
"time"
)
type Interface interface {
Download(id uint64, seeker uint64) (model.File, *os.File, error)
Upload(file io.ReadCloser, name string, size uint64, chunk int) (model.File, error)
}
type usecase struct {
info repository.InterfaceDataBase
file repository.InterfaceFile
log logger.Interface
}
func New(db repository.InterfaceDataBase, sys repository.InterfaceFile, l logger.Interface) Interface {
return usecase{
info: db,
file: sys,
log: l,
}
}
func (u usecase) Download(id uint64, seeker uint64) (model.File, *os.File, error) {
info, err := u.info.Get(id)
if err != nil {
return model.File{}, nil, err
}
file, err := u.file.Get(id, seeker)
if err != nil {
return model.File{}, nil, err
}
return info, file, nil
}
func (u usecase) Upload(file io.ReadCloser, name string, size uint64, chunk int) (model.File, error) {
fid, err := u.info.GenID()
if err != nil {
return model.File{}, ErrorCouldNotGenID
}
path, err := u.file.Save(fid, file, size, chunk)
if err != nil {
if len(path) > 0 {
u.file.Remove(path)
}
return model.File{}, err
}
fileInfo := model.File{
Id: fid,
Name: name,
Path: path,
Uploaded: uint64(time.Now().Unix()),
Size: size,
}
err = u.info.Save(fileInfo)
if err != nil {
return model.File{}, ErrorCouldNotSaveFileInfo
}
return fileInfo, nil
}
|
package main
import "fmt"
func main() {
fmt.Println(isNumber("1.1+") == false)
fmt.Println(isNumber(".-4") == false)
fmt.Println(isNumber("-1E-16") == true)
fmt.Println(isNumber("1+2") == false)
fmt.Println(isNumber("3 .") == false)
fmt.Println(isNumber(".") == false)
fmt.Println(isNumber(". 1") == false)
fmt.Println(isNumber("1 .") == false)
fmt.Println(isNumber("1 ") == true)
fmt.Println(isNumber("3.") == true)
fmt.Println(isNumber("3. ") == true)
fmt.Println(isNumber(".3") == true)
fmt.Println(isNumber(" .3") == true)
fmt.Println(isNumber("e9") == false)
fmt.Println(isNumber("0") == true)
fmt.Println(isNumber("e") == false)
fmt.Println(isNumber(".") == false)
fmt.Println(isNumber(" .1 ") == true)
fmt.Println(isNumber(" ") == false)
//
fmt.Println(isNumber("+100") == true)
fmt.Println(isNumber("5e2") == true)
fmt.Println(isNumber("-123") == true)
fmt.Println(isNumber("3.1416") == true)
fmt.Println(isNumber("0123") == true)
fmt.Println(isNumber("12e") == false)
fmt.Println(isNumber("1a3.14") == false)
fmt.Println(isNumber("1.2.3") == false)
fmt.Println(isNumber("+-5") == false)
fmt.Println(isNumber("12e+5.4") == false)
}
func isNumber(s string) bool {
opflag := -1
eflag := -1
pflag := -1
nflag := 1
for i := 0; i < len(s); i++ {
v := s[i]
if v >= '0' && v <= '9' {
// 数字
nflag = 2
if opflag != -1 {
opflag = 2
}
if eflag != -1 {
eflag = 2
}
if pflag != -1 {
pflag = 2
}
} else if v == ' ' {
if eflag == 1 {
return false
}
if opflag == 1 {
return false
}
//有点不能有空格,数字,后续除了空格不能出现其他
if nflag == 2 || pflag == 2 || pflag == 1 {
for j := i; j < len(s); j++ {
if s[j] != ' ' {
return false
}
}
}
} else if v == '+' || v == '-' {
if opflag != -1 {
return false
}
if nflag == 2 {
return false
}
if pflag == 1 {
return false
}
opflag = 1
} else if v == 'e' || v == 'E' {
if eflag != -1 {
return false
}
if nflag != 2 {
return false
}
eflag = 1
opflag = -1
pflag = 2
nflag = -1
} else if v == '.' {
if pflag != -1 {
return false
}
//if nflag != 2 {
// return false
//}
pflag = 1
} else {
return false
}
}
if opflag == 1 || eflag == 1 || nflag == 1 {
return false
}
return true
}
|
package main
import (
"context"
"database/sql"
"flag"
"log"
_ "github.com/lib/pq"
)
var (
planFile string
)
func main() {
flag.StringVar(&planFile, "plan", "plan.yaml", "gosqlbencher plan file")
flag.Parse()
pl, err := readPlan(planFile)
if err != nil {
log.Fatalf("failed to read plan: %v", err)
}
log.Printf("Benchmarking\ndsn: %v\nNumWorker:%v\n",
pl.DataSourceName, pl.NumWorker)
db := initDB(pl)
defer db.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for i, query := range pl.Queries {
err = benchmarQuery(ctx, db, pl, query)
if err != nil {
log.Fatalf("benchmarck query #%v failed: %v", i, err)
}
}
}
func initDB(pl plan) *sql.DB {
log.Println("Open DB")
db, err := sql.Open("postgres", pl.DataSourceName)
if err != nil {
log.Fatalf("failed to open db: %v", err)
}
log.Println("Ping DB")
err = db.Ping()
if err != nil {
log.Fatalf("failed to ping db: %v", err)
}
return db
}
|
package dingtalk
import (
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"github.com/weiqiang333/infra-skywalking-webhook/configs"
"log"
"net/http"
"net/url"
"strings"
"time"
)
/*
[{
"scopeId": 2,
"name": "growing-segmentation-pid:15149@seg3",
"id0": 47,
"id1": 0,
"alarmMessage": "Response time of service instance growing-segmentation-pid:15149@seg3 is more than 1000ms in 2 minutes of last 10 minutes",
"startTime": 1568888544862
}, {
"scopeId": 2,
"name": "growing-segmentation-pid:11847@seg2",
"id0": 46,
"id1": 0,
"alarmMessage": "Response time of service instance growing-segmentation-pid:11847@seg2 is more than 1000ms in 2 minutes of last 10 minutes",
"startTime": 1568888544862
}]
*/
type message struct {
ScopeId int
Name string
Id0 int
Id1 int
AlarmMessage string
StartTime int
}
// Dingtalk 发送钉钉消息体
func Dingtalk(data []byte) error {
//加签
secret := configs.V.GetString("secret")
timeStampNow := time.Now().UnixNano() / 1000000
signStr :=fmt.Sprintf("%d\n%s", timeStampNow, secret)
hash := hmac.New(sha256.New, []byte(secret))
hash.Write([]byte(signStr))
sum := hash.Sum(nil)
encode := base64.StdEncoding.EncodeToString(sum)
urlEncode := url.QueryEscape(encode)
var m []message
err := json.Unmarshal(data, &m)
if err != nil {
fmt.Println(err.Error())
}
contents, alertSummary := createContent(m)
bodys := strings.NewReader(contents)
token := configs.V.GetString("token")
resp, err := http.Post(
fmt.Sprintf("https://oapi.dingtalk.com/robot/send?access_token=%s×tamp=%v&sign=%s", token,timeStampNow,urlEncode), "application/json", bodys)
if err != nil {
return err
}
log.Println(resp.StatusCode, alertSummary)
return nil
}
/*
状态: notify
等级: P1
告警: Skywalking
growing-segmentation-pid:6494@seg1 id: 44 time: 1568945304861
growing-segmentation-pid:6908@seg0 id: 43 time: 1568945304861
Item values:
0 Response time of service instance growing-segmentation-pid:6494@seg1 is more than 1000ms in 2 minutes of last 10 minutes
1 Response time of service instance growing-segmentation-pid:6908@seg0 is more than 1000ms in 2 minutes of last 10 minutes
故障修复:
*/
func createContent(message []message) (string, string) {
contents := ""
for _,v := range message {
contents += fmt.Sprintf("-----来自SkyWalking的告警-----\n【time】: %v\n【scope】: %v\n【name】: %v\n【message】: %v\n\n",
v.StartTime, v.ScopeId, v.Name, v.AlarmMessage)
}
data := fmt.Sprintf(`{
"msgtype": "text",
"text": {
"content": "%s",
},
"at": {
"isAtAll": "",
},
}`, contents)
return data, contents
}
|
package setgame
func MakeSet(a int, b int, c int) bool {
if a < 3 && b < 3 && c < 3 {
return (a == b && a == c) || (a != b && a != c && b != c)
}
return MakeSet(a%3, b%3, c%3) && MakeSet(a/3, b/3, c/3)
}
func getMatch(a int, b int) int {
if a < 3 && b < 3 {
return (6 - a - b) % 3
}
return getMatch(a%3, b%3) + getMatch(a/3, b/3)*3
}
func HasSet(cards []int) bool {
for i := 0; i < len(cards); i++ {
for j := 0; j < i; j++ {
expected := getMatch(cards[i], cards[j])
for k := 0; k < j; k++ {
if cards[k] == expected {
return true
}
}
}
}
return false
}
func PositionsMakeSet(cards []int, positions []int) bool {
if max(positions) >= len(cards) {
return false
}
return MakeSet(cards[positions[0]], cards[positions[1]], cards[positions[2]])
}
func RemoveAndNormalize(table []int, gone []int, deck []int, positions []int) ([]int, []int, []int, bool) {
gone = appendByPosition(gone, table, positions)
table = removeByPosition(table, positions)
return Normalize(table, gone, deck)
}
func Normalize(table []int, gone []int, deck []int) ([]int, []int, []int, bool) {
for (len(table) < 12 || !HasSet(table)) && len(deck) > 0 {
table, deck = showMoreFromDeck(table, deck)
}
return table, gone, deck, !HasSet(table)
}
func showMoreFromDeck(table []int, deck []int) ([]int, []int) {
if len(deck) == 0 {
return table, deck
}
return append(table, deck[len(deck)-3:]...), deck[:len(deck)-3]
}
|
package 数组
func hanota(A []int, B []int, C []int) []int {
move(len(A), &A, &B, &C)
return C
}
// 将 A 上面的 n 个盘子,借助 B,移动到 C 中 (移动的每步要符合汉诺塔规则)
func move(n int, A *[]int, B *[]int, C *[]int) {
if n == 1 {
*C = append(*C, (*A)[len(*A)-1])
*A = (*A)[:len(*A)-1]
return
}
move(n-1, A, C, B)
move(1, A, B, C)
move(n-1, B, A, C)
}
/*
题目链接: https://leetcode-cn.com/problems/hanota-lcci/
总结
1. 这题我还不太理解,不太理解的点在参数n --- n是否可以省去呢?
2. 这题不传引用的话,会出现错误。 (不传引用会导致 append 后,函数内的参数修改了,调用者的传入的参数却没改)
*/
|
package pipeline
type EchoFunc func([]int) <-chan int
type PipeFunc func(<-chan int) <-chan int
func pipeline(nums []int, echo EchoFunc, pipeFns ...PipeFunc) <-chan int {
ch := echo(nums)
for i := range pipeFns {
ch = pipeFns[i](ch)
}
return ch
}
//var nums = []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
//for n := range pipeline(nums, gen, odd, sq, sum) {
//fmt.Println(n)
//}
|
package main
import (
"example.com/ben/primes"
"fmt"
)
func main() {
best := best{}
for a := -1000; a <= 1000; a++ {
for _, b := range primes.GetPrimes(1000) {
n := 0
for {
quad := n*n + a*n + b
if !primes.IsPrime(quad) {
break
}
//fmt.Printf("a: %v, b: %v, n: %v, quad: %v\n", a, b, n, quad)
if n > best.n {
best.a = a
best.b = b
best.n = n
best.quad = quad
}
n++
}
}
}
fmt.Printf("Best a: %v, b: %v, n: %v, quad: %v\n", best.a, best.b, best.n, best.quad)
}
type best struct {
a, b, n, quad int
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.