text stringlengths 11 4.05M |
|---|
package pgeo
import (
"database/sql/driver"
)
// NullLseg allows line segment to be null
type NullLseg struct {
Lseg
Valid bool `json:"valid"`
}
// Value for database
func (l NullLseg) Value() (driver.Value, error) {
if !l.Valid {
return nil, nil
}
return valueLseg(l.Lseg)
}
// Scan from sql query
func (l *NullLseg) Scan(src interface{}) error {
if src == nil {
l.Lseg, l.Valid = NewLseg(Point{}, Point{}), false
return nil
}
l.Valid = true
return scanLseg(&l.Lseg, src)
}
// Randomize for sqlboiler
func (l *NullLseg) Randomize(nextInt func() int64, fieldType string, shouldBeNull bool) {
if shouldBeNull {
l.Valid = false
return
}
l.Valid = true
l.Lseg = randLseg(nextInt)
}
|
package main
import (
"encoding/json"
"fmt"
"log"
)
type Envelope struct {
Type string `json:"type"`
}
type Sound struct {
Description string `json:"description"`
Authority string `json:"authority"`
}
type Cowbell struct {
More bool `json:"more"`
}
func main() {
input := `
{
"type": "sound",
"description": "dynamite",
"authority": "the Bruce Dickinson"
}
`
var env Envelope
buf := []byte(input)
if err := json.Unmarshal(buf, &env); err != nil {
log.Fatal(err)
}
switch env.Type {
case "sound":
var env Envelope
var s Sound
if err := json.Unmarshal(buf, &struct {
*Envelope
*Sound
}{&env, &s}); err != nil {
log.Fatal(err)
}
// s := struct {
// *Envelope
// *Sound
// }{}
// if err := json.Unmarshal(buf, &s); err != nil {
// log.Fatal(err)
// }
desc := s.Description
fmt.Println(desc)
default:
log.Fatalf("unknown message type: %q", env.Type)
}
}
|
package manager
import (
"github.com/ChowRobin/fantim/model/bo"
"github.com/ChowRobin/fantim/model/vo"
"github.com/gorilla/websocket"
)
// 单机先采用本地缓存链接关系,分布式采用redis
var (
UserConnRouter map[int64]*bo.LConnectionGroup
)
func init() {
UserConnRouter = make(map[int64]*bo.LConnectionGroup)
}
// 注册长连接
func RegisterUserLongConn(userId, connId int64, conn *websocket.Conn) error {
connGroup, ok := UserConnRouter[userId]
if !ok {
connGroup = &bo.LConnectionGroup{}
connGroup.Register(connId, conn)
UserConnRouter[userId] = connGroup
} else if connGroup != nil {
connGroup.Register(connId, conn)
}
return nil
}
// 移除连接
func DeleteUserLongConn(userId, connId int64) {
connGroup, ok := UserConnRouter[userId]
if ok && connGroup != nil {
delete(connGroup.ConnMap, connId)
}
}
// 长链推送消息
func PushMessage(userId int64, msg *vo.PushMessage) error {
connGroup, ok := UserConnRouter[userId]
if !ok {
return nil
}
return connGroup.BroadCast(msg)
}
|
package raft
import (
"errors"
zmq "github.com/pebbe/zmq4"
"github.com/syndtr/goleveldb/leveldb"
"net"
"sync"
"time"
)
type ErrRedirect int // See Log.Append. Implements Error interface.
var MsgAckMap map[Lsn]int
//Map to maintain log-entry to client conn mapping, used while sending back response to client
var LogEntMap map[Lsn]net.Conn
//CommitCh channel is used by sharedlog, to put commited Command entry onto channel for kvstore to execute
var CommitCh chan ConMsg
//Map for Key value store
var keyval = make(map[string]valstruct)
//Locks Used
var mutex = &sync.RWMutex{} //Lock for keyval store
var MutexLog = &sync.RWMutex{} //Lock for LogEntMap store
type PriorityQueue []*Item
//Below constants are used to set message IDs.
const (
BROADCAST = -1
NOVOTE = -1
UNKNOWN = -1
INITIALIZATION = 0
APPENDENTRIESRPC = 1
CONFIRMCONSENSUS = 2
REQUESTVOTE = 3
VOTERESPONSE = 4
APPENDENTRIES = 5
APPENDENTRIESRESPONSE = 6
HEARTBEATRESPONSE = 7
HEARTBEAT = 8
APPENDENTRIESRPCRESPONSE = 9
FOLLOWER = 1
CANDIDATE = 2
LEADER = 3
)
var (
//Below are various error description
errNoCommand = errors.New("NO COMMAND")
errorTimeout = errors.New("TIMEOUT")
errWrongIndex = errors.New("BAD INDEX")
errWrongTerm = errors.New("BAD TERM")
errTermIsSmall = errors.New("TOO_SMALL_TERM")
errorappendEntriesRejected = errors.New("APPENDENTRIES_REJECTED")
errIndexIsSmall = errors.New("TOO_SMALL_INDEX")
errIndexIsBig = errors.New("TOO_BIG_COMMIT_INDEX")
errorDeposed = errors.New("DEPOSED")
errorOutOfSync = errors.New("OUTOFSYNC")
errChecksumInvalid = errors.New("INVALID CHECKSUM")
//Lock for vote counting
voteLock = &sync.RWMutex{}
voteMap = make(map[int]bool)
/*
Both candidate and follower state raft server will wait for random time between min and max election timeout
and once it's wait is over, will start election all over again.
*/
MinElectTo int32 = 100
MaxElectTo = 3 * MinElectTo
)
type Log struct {
sync.RWMutex
ApplyFunc func(*LogEntryStruct)
db *leveldb.DB
entries []*LogEntryStruct
commitIndex uint64
initialTerm uint64
}
var debug = false
type Lsn uint64 //Log sequence number, unique for all time.
//LeaderID denotes id of the leader
var LeaderID, Quorum int
type uint64Slice []uint64
func (p uint64Slice) Len() int { return len(p) }
func (p uint64Slice) Less(i, j int) bool { return p[i] > p[j] }
func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
type Envelope struct {
Pid int
SenderId int
Leaderid int
MessageId int
CommitIndex uint64
LastLogIndex uint64
LastLogTerm uint64
//Message LogEntryStruct
Message interface{}
}
type LogEntry interface {
Lsn() Lsn
Data() []byte
Term() uint64
Committed() bool
}
type LogEntryStruct struct {
Logsn Lsn
TermIndex uint64
DataArray []byte
Commit chan bool
}
//Below struct is used to envelop coomand inside it.
type CommandTuple struct {
Com []byte
ComResponse chan bool
Err chan error
}
//Structure to store the value for each key
type valstruct struct {
version int64
expirytime int
timestamp int64
numbytes int
value []byte
}
//Structure to store the parsed command sent by the client to connhandler
type Command struct {
CmdType int // 1-set , 2-cas, 3-get , 4-getm , 5-delete
Key string
Expirytime int
Len int
Value []byte
Version int64
}
type ConMsg struct{
Les LogEntryStruct
Con net.Conn
}
//Heap(Priority Queue Implementation) -- code functions taken from Golang ducumentation examples and edited
var PQ = make(PriorityQueue, 0)
type Item struct {
value string // The value of the item; arbitrary.
priority int64 // The priority of the item in the queue.
timestamp int64 // To ensure unique element insert in heap (helps in deleting/updating elements)
index int // The index of the item in the heap.
}
type ServerConfig struct {
Id string
HostName string
ClientPort string
LogPort string
}
type clusterCount struct {
Count string
}
type serverLogPath struct {
Path string
}
type ClusterConfig struct {
Path serverLogPath // Directory for persistent log
Servers []ServerConfig // All servers in this cluster
Count clusterCount
}
//store index of all peer servers
type nextIndex struct {
sync.RWMutex
m map[uint64]uint64 // followerId: nextIndex
}
type appendEntries struct {
TermIndex uint64
Entries []*LogEntryStruct
}
// appendEntriesResponse represents the response to an appendEntries RPC.
type appendEntriesResponse struct {
Term uint64
Success bool
reason string
}
//Raft is a structure that defines server and also maintains information about cluster.
type Raft struct {
Pid int
Peers []int
Path string
Term uint64 // Current term ID for raft object.(Monotonic increase)
VotedFor int // Server ID for which this raft has voted in leader election.
VotedTerm int
LeaderId int //Current Leader ID
CommitIndex uint64 //Index of the highest entry commited till time.(Monotonic increase)
MatchIndex map[int]uint64 //Index fo the highest log entry known to be replicated on server for all peers.
NextIndex map[int]uint64 //Index of the next log entry to send to that server for all peers (initialized to leader last log index + 1)
PrevLogIndex uint64
PrevLogTerm uint64
ElectTicker <-chan time.Time
State int
LastApplied uint64 //Index of the highest log entry applied to state machine.
In chan *Envelope
Out chan *Envelope
Address map[int]string
ClientSockets map[int]*zmq.Socket
LogSockets map[int]*zmq.Socket
LsnVar uint64
ClusterSize int
GotConsensus chan bool
SLog *Log
Inchan chan *LogEntryStruct
Outchan chan interface{}
Inprocess bool
}
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
)
type RuleInputs struct {
firstName string
lastName string
abn string
}
// RuleResults returns the success fail of each rule, and an aggregated message string.
type RuleResults struct {
validFirstName bool
validLastName bool
abnStatus bool
message string
}
type DMResult struct {
result bool
message string
}
var errorList map[string]string
// #####################################################################
func validateRules(serviceVersion string, ruleInputs RuleInputs) (RuleResults, error) {
var results RuleResults
var err error
ruleReq := buildRuleRequest(ruleInputs)
reqBody := []byte(ruleReq)
m := make(map[string]string)
errorList = m
var URL string
if ruleInputs.abn != "" {
URL = applicationConfig.ABNRuleServerURL
ruleOkay, _ := callDecisionManager(URL, reqBody)
results.abnStatus = ruleOkay
}
if ruleInputs.firstName != "" {
if serviceVersion != "" {
serviceVersion = "_" + serviceVersion + "-SNAPSHOT"
}
URL = applicationConfig.NameRuleServerURL + serviceVersion
ruleOkay, _ := callDecisionManager(URL, reqBody)
results.validFirstName = ruleOkay
}
if ruleInputs.lastName != "" {
URL = applicationConfig.LNameRuleServerURL
ruleOkay, _ := callDecisionManager(URL, reqBody)
results.validLastName = ruleOkay
}
// Iterate over all the error messages.
for k, _ := range errorList {
results.message = results.message + k + " "
}
return results, err
}
// validateABN calls the ABN Validate Rules Engine. Returns true if rule was okay
func callDecisionManager(URL string, reqBody []byte) (bool, error) {
var err error
// var results RuleResults
var ruleOkay bool
// Build the HTTP request.
client := &http.Client{}
//jsonStr, _x := json.Marshal(ruleReq)
req, err := http.NewRequest("POST", URL, bytes.NewBuffer(reqBody))
req.SetBasicAuth(applicationConfig.Username, applicationConfig.Password)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/xml")
resp, err := client.Do(req)
if err != nil {
log.Fatal(err)
}
bodyText, err := ioutil.ReadAll(resp.Body)
// s := string(bodyText)
// fmt.Println("INFO: Request Message:\n", s)
var result map[string]interface{}
err = json.Unmarshal(bodyText, &result)
if err != nil {
fmt.Fprintln(os.Stderr, "ERROR: Cannot unmarshall DM response.")
}
l := len(errorList)
parseMap(result)
if l == len(errorList) {
ruleOkay = true // No rule violations
} else {
ruleOkay = false // Rule violations
}
return ruleOkay, err
}
func parseMap(aMap map[string]interface{}) {
for key, val := range aMap {
switch concreteVal := val.(type) {
case map[string]interface{}:
// fmt.Println(key)
parseMap(val.(map[string]interface{}))
case []interface{}:
// fmt.Println(key)
parseArray(val.([]interface{}))
default:
//fmt.Print("M> ")
//fmt.Println(key, ":", concreteVal)
if key == "cause" {
// fmt.Println("VALIDATION ERROR: ", concreteVal)
msg := fmt.Sprintf("%s", concreteVal)
errorList[msg] = "error"
}
}
}
}
func parseArray(anArray []interface{}) {
for _, val := range anArray {
switch val.(type) {
case map[string]interface{}:
// fmt.Println("Index:", i)
parseMap(val.(map[string]interface{}))
case []interface{}:
// fmt.Println("Index:", i)
parseArray(val.([]interface{}))
default:
// Do nothing
// fmt.Print("A> ")
//fmt.Println("Index", i, ":", concreteVal)
}
}
}
// buildRuleRequest
func buildRuleRequest(ruleInputs RuleInputs) string {
jsonReq := []byte(`{
"lookup": "statelessSession",
"commands": [
{
"set-global": {
"identifier": "service",
"object": {
"com.redhat.demo.abnclient.Client": {}
}
}
},
{
"insert": {
"object": {
"com.myspace.datavalidation.Entity": {
*****
}
}
}
},
{
"fire-all-rules": ""
},
{
"query": {
"out-identifier": "error-results",
"name": "get_validation_error"
}
}
]
}`)
var query string
if ruleInputs.firstName != "" {
query = "\"name\" : \"" + ruleInputs.firstName + "\""
}
if ruleInputs.lastName != "" {
if len(query) > 0 && query[len(query)-1:] == "\"" {
query = query + ", "
}
query = query + "\"lastName\" : \"" + ruleInputs.lastName + "\""
}
if ruleInputs.abn != "" {
if len(query) > 0 && query[len(query)-1:] == "\"" {
query = query + ", "
}
query = query + "\"abn\" : \"" + ruleInputs.abn + "\""
}
// BIGGEST HACK EVER!
s := string(jsonReq)
s = strings.Replace(s, "*****", query, 1)
// fmt.Fprintln(os.Stdout, "INFO: Rules query: \n", s)
return s
}
|
//Package cutout implements the circuit breaker design pattern(see: https://martinfowler.com/bliki/CircuitBreaker.html)
//for calling third party api services.
//
// Cutout comes with features like:
//
// 1. Multilevel fallback functions(in case even the fallback fails)
//
// 2. Custom BackOff function on the request level for generating backoff timeout logics
//
// 3. Event channel to capture events like State change or failure detection
//
//Here is a basic example:
// package main
//
// import (
// "bytes"
// "fmt"
// "log"
// "net/http"
// "time"
//
// "github.com/Anondo/cutout"
// )
//
// var (
// cb = &cutout.CircuitBreaker{
// FailThreshold: 100,
// HealthCheckPeriod: 15 * time.Second,
// }
// req = cutout.Request{
// URL: "http://localhost:9090",
// AllowedStatus: []int{http.StatusOK},
// Method: http.MethodPost,
// TimeOut: 2 * time.Second,
// RequestBody: bytes.NewBuffer([]byte(`{"name":"abcd"}`)),
// BackOff: func(t time.Duration) time.Duration {
// return time.Duration(int(t/time.Second)*5) * time.Second
// },
// }
//
// cache = `{"name":"Mr. Test","age":69,"cgpa":4}`
// )
//
// func thehandler(w http.ResponseWriter, r *http.Request) {
//
// resp, err := cb.Call(&req, func() (*cutout.Response, error) {
// return &cutout.Response{
// BodyString: cache,
// }, nil
// })
//
// if err != nil {
// fmt.Fprintf(w, err.Error())
// return
// }
//
// w.WriteHeader(http.StatusOK)
// fmt.Fprintf(w, resp.BodyString)
// }
//
// func main() {
//
// http.HandleFunc("/", thehandler)
//
// log.Println("Service A is running on http://localhost:8080 ...")
//
// if err := http.ListenAndServe(":8080", nil); err != nil {
// log.Fatal(err.Error())
// }
// }
//
// See https://github.com/Anondo/cutout/examples.
package cutout
|
package provider
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"github.com/mrparkers/terraform-provider-keycloak/keycloak"
)
func resourceKeycloakOpenidClientAuthorizationClientPolicy() *schema.Resource {
return &schema.Resource{
Create: resourceKeycloakOpenidClientAuthorizationClientPolicyCreate,
Read: resourceKeycloakOpenidClientAuthorizationClientPolicyRead,
Delete: resourceKeycloakOpenidClientAuthorizationClientPolicyDelete,
Update: resourceKeycloakOpenidClientAuthorizationClientPolicyUpdate,
Importer: &schema.ResourceImporter{
State: genericResourcePolicyImport,
},
Schema: map[string]*schema.Schema{
"resource_server_id": {
Type: schema.TypeString,
Required: true,
},
"realm_id": {
Type: schema.TypeString,
Required: true,
},
"name": {
Type: schema.TypeString,
Required: true,
},
"decision_strategy": {
Type: schema.TypeString,
Optional: true,
},
"logic": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice(keycloakPolicyLogicTypes, false),
},
"description": {
Type: schema.TypeString,
Optional: true,
},
"clients": {
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
Required: true,
},
},
}
}
func getOpenidClientAuthorizationClientAuthorizationClientPolicyResourceFromData(data *schema.ResourceData) *keycloak.OpenidClientAuthorizationClientPolicy {
var clients []string
if v, ok := data.GetOk("clients"); ok {
for _, client := range v.(*schema.Set).List() {
clients = append(clients, client.(string))
}
}
resource := keycloak.OpenidClientAuthorizationClientPolicy{
Id: data.Id(),
ResourceServerId: data.Get("resource_server_id").(string),
RealmId: data.Get("realm_id").(string),
DecisionStrategy: data.Get("decision_strategy").(string),
Logic: data.Get("logic").(string),
Name: data.Get("name").(string),
Type: "client",
Clients: clients,
Description: data.Get("description").(string),
}
return &resource
}
func setOpenidClientAuthorizationClientAuthorizationClientPolicyResourceData(data *schema.ResourceData, policy *keycloak.OpenidClientAuthorizationClientPolicy) {
data.SetId(policy.Id)
data.Set("resource_server_id", policy.ResourceServerId)
data.Set("realm_id", policy.RealmId)
data.Set("name", policy.Name)
data.Set("decision_strategy", policy.DecisionStrategy)
data.Set("logic", policy.Logic)
data.Set("description", policy.Description)
data.Set("clients", policy.Clients)
}
func resourceKeycloakOpenidClientAuthorizationClientPolicyCreate(data *schema.ResourceData, meta interface{}) error {
keycloakClient := meta.(*keycloak.KeycloakClient)
resource := getOpenidClientAuthorizationClientAuthorizationClientPolicyResourceFromData(data)
err := keycloakClient.NewOpenidClientAuthorizationClientPolicy(resource)
if err != nil {
return err
}
setOpenidClientAuthorizationClientAuthorizationClientPolicyResourceData(data, resource)
return resourceKeycloakOpenidClientAuthorizationClientPolicyRead(data, meta)
}
func resourceKeycloakOpenidClientAuthorizationClientPolicyRead(data *schema.ResourceData, meta interface{}) error {
keycloakClient := meta.(*keycloak.KeycloakClient)
realmId := data.Get("realm_id").(string)
resourceServerId := data.Get("resource_server_id").(string)
id := data.Id()
resource, err := keycloakClient.GetOpenidClientAuthorizationClientPolicy(realmId, resourceServerId, id)
if err != nil {
return handleNotFoundError(err, data)
}
setOpenidClientAuthorizationClientAuthorizationClientPolicyResourceData(data, resource)
return nil
}
func resourceKeycloakOpenidClientAuthorizationClientPolicyUpdate(data *schema.ResourceData, meta interface{}) error {
keycloakClient := meta.(*keycloak.KeycloakClient)
resource := getOpenidClientAuthorizationClientAuthorizationClientPolicyResourceFromData(data)
err := keycloakClient.UpdateOpenidClientAuthorizationClientPolicy(resource)
if err != nil {
return err
}
setOpenidClientAuthorizationClientAuthorizationClientPolicyResourceData(data, resource)
return nil
}
func resourceKeycloakOpenidClientAuthorizationClientPolicyDelete(data *schema.ResourceData, meta interface{}) error {
keycloakClient := meta.(*keycloak.KeycloakClient)
realmId := data.Get("realm_id").(string)
resourceServerId := data.Get("resource_server_id").(string)
id := data.Id()
return keycloakClient.DeleteOpenidClientAuthorizationClientPolicy(realmId, resourceServerId, id)
}
|
package resolvers
import (
"context"
"github.com/Keijun-KUMAGAI/graphql-server/gqlgen"
"github.com/Keijun-KUMAGAI/graphql-server/prisma-client"
)
// -------------------- Mutation --------------------
func (r *mutationResolver) TodoCreate(ctx context.Context, params gqlgen.TodoCreateInput) (*prisma.Todo, error) {
todo, err := r.Prisma.CreateTodo(prisma.TodoCreateInput{
Message: params.Message,
User: prisma.UserCreateOneWithoutTodosInput{
Connect: &prisma.UserWhereUniqueInput{
ID: ¶ms.UserID,
},
},
}).Exec(ctx)
return todo, err
}
func (r *mutationResolver) TodoUpdate(ctx context.Context, params gqlgen.TodoUpdateInput) (*prisma.Todo, error) {
todo, err := r.Prisma.UpdateTodo(prisma.TodoUpdateParams{
Where: prisma.TodoWhereUniqueInput{
ID: ¶ms.ID,
},
Data: prisma.TodoUpdateInput{
Message: ¶ms.Message,
Done: ¶ms.Done,
},
}).Exec(ctx)
return todo, err
}
func (r *mutationResolver) TodoDelete(ctx context.Context, params gqlgen.TodoDeleteInput) (*prisma.Todo, error) {
todo, err := r.Prisma.DeleteTodo(prisma.TodoWhereUniqueInput{
ID: ¶ms.ID,
}).Exec(ctx)
return todo, err
}
// -------------------- TodoResolver --------------------
func (r *todoResolver) User(ctx context.Context, obj *prisma.Todo) (*prisma.User, error) {
user, err := r.Prisma.Todo(prisma.TodoWhereUniqueInput{ID: &obj.ID}).User().Exec(ctx)
if err != nil {
return nil, nil
}
return user, err
}
|
package config
import (
"encoding/json"
"fmt"
"io"
"log"
"os"
"time"
"github.com/schicho/mensa/canteen"
)
const FilenameConfig = "mensa_conf.json"
const FilenameCache = "mensa_data.csv"
var defaultConfig = Config{canteen.Canteens2Abbrev["UNI_PASSAU_CANTEEN"], time.Time{}, PriceStudent_t}
var FilepathConfig string
var FilepathCache string
type PriceType string
const (
PriceStudent_t PriceType = "Student"
PriceEmployee_t PriceType = "Employee"
PriceGuest_t PriceType = "Guest"
)
// Config describes the json layout of the saved config file.
type Config struct {
University string `json:"university"`
Cached time.Time `json:"cached"`
Price PriceType `json:"price"`
}
// GetConfig returns a pointer to the loaded user data.
func GetConfig() *Config {
c := &Config{}
c.loadConfig()
return c
}
// init the filepath to config and cache.
func init() {
configDir, err := os.UserConfigDir()
cacheDir, err := os.UserCacheDir()
if err != nil {
log.Println("Cannot access home directory.")
log.Fatal(err)
}
FilepathConfig = configDir + "/" + FilenameConfig
FilepathCache = cacheDir + "/" + FilenameCache
}
// LoadConfig checks if there exists a previous configuration and loads it, or generates a new one and saves it to disk.
func (c *Config) loadConfig() {
if Exists(FilepathConfig) {
configFile, err := os.OpenFile(FilepathConfig, os.O_RDONLY, os.ModePerm)
if err != nil {
panic(err)
}
defer configFile.Close()
buffer, err := io.ReadAll(configFile)
if err != nil {
panic(err)
}
err = json.Unmarshal(buffer, c)
if err != nil {
fmt.Fprintln(os.Stderr, "Malformed `mensa_conf.json` file. Default to Student of Uni Passau.")
c = &defaultConfig
c.writeConfigFile()
}
} else {
// Default to known values and create config file.
fmt.Fprintln(os.Stderr, "No `mensa_conf.json` file. Creating new file. Default to Student of Uni Passau.")
c = &defaultConfig
c.writeConfigFile()
}
}
// UpdateConfigFile just updates the timestamp in the configuration file, if new data was cached.
func (c *Config) UpdateConfigFile() {
c.Cached = time.Now()
c.writeConfigFile()
}
// writeConfigFile reads the data stored in the config variable, marshals it and writes it to disk.
func (c *Config) writeConfigFile() {
buffer, err := json.Marshal(c)
if err != nil {
panic(err)
}
configFile, err := os.OpenFile(FilepathConfig, os.O_RDWR|os.O_CREATE|os.O_TRUNC, os.ModePerm)
if err != nil {
log.Println("Could not create configuration file.")
return
}
defer configFile.Close()
_, err = configFile.Write(buffer)
if err != nil {
return
}
}
func deleteConfigCache() {
fmt.Fprintln(os.Stderr, "Deleting config and cache.")
err := os.Remove(FilepathConfig)
if err != nil {
log.Fatalln("Could not clear config.")
}
err = os.Remove(FilepathCache)
if err != nil {
log.Fatalln("Could not clear cache.")
}
}
// BuildNewConfig deletes the config and cache files from disk and creates a new default config file.
func (c *Config) BuildNewConfig() {
deleteConfigCache()
c.loadConfig()
}
// Exists checks if a file or directory exists.
func Exists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
|
package queries
import (
"fmt"
"github.com/graphql-go/graphql"
"go_graphql/blog/db"
"go_graphql/blog/types"
)
// GetUserQuery returns the queries available against user type.
func GetUserQuery() *graphql.Field {
return &graphql.Field{
Type: graphql.NewList(types.UserType),
Resolve: func(params graphql.ResolveParams) (interface{}, error) {
var users []types.User
user := new(types.User)
conn := db.Connect()
rows, err := conn.Query("SELECT * FROM users")
if err != nil {
fmt.Println("Problem quering db")
}
for rows.Next() {
err = rows.Scan(&user.ID, &user.Firstname, &user.Lastname)
if err != nil {
fmt.Println("Problem quering db")
}
users = append(users, *user)
}
// ... Implement the way you want to obtain your data here.
defer conn.Close()
return users, nil
},
}
}
|
// Copyright 2019-present PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package raftstore
import (
"bytes"
"fmt"
"testing"
"github.com/ngaut/unistore/raftstore/raftlog"
"github.com/pingcap/badger"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
rfpb "github.com/pingcap/kvproto/pkg/raft_cmdpb"
"github.com/pingcap/tidb/store/mockstore/unistore/tikv/mvcc"
"github.com/stretchr/testify/assert"
)
func TestRaftWriteBatch_PrewriteAndCommit(t *testing.T) {
engines := newTestEngines(t)
defer cleanUpTestEngineData(engines)
apply := new(applier)
applyCtx := newApplyContext("test", nil, engines, nil, NewDefaultConfig())
wb := &raftWriteBatch{
startTS: 100,
commitTS: 0,
}
// Testing PreWriter
longValue := [128]byte{101}
values := [][]byte{
[]byte("short value"),
longValue[:],
[]byte(""),
}
for i := 0; i < 3; i++ {
primary := []byte(fmt.Sprintf("t%08d_r%08d", i, i))
expectLock := mvcc.Lock{
LockHdr: mvcc.LockHdr{
StartTS: 100,
TTL: 10,
Op: uint8(kvrpcpb.Op_Put),
PrimaryLen: uint16(len(primary)),
},
Primary: primary,
Value: values[i],
}
wb.Prewrite(primary, &expectLock)
apply.execWriteCmd(applyCtx, raftlog.NewRequest(&rfpb.RaftCmdRequest{
Header: new(rfpb.RaftRequestHeader),
Requests: wb.requests,
}))
err := applyCtx.wb.WriteToKV(engines.kv)
assert.Nil(t, err)
applyCtx.wb.Reset()
wb.requests = nil
val := engines.kv.LockStore.Get(primary, nil)
assert.NotNil(t, val)
lock := mvcc.DecodeLock(val)
assert.Equal(t, expectLock, lock)
}
// Testing Commit
wb = &raftWriteBatch{
startTS: 100,
commitTS: 200,
}
for i := 0; i < 3; i++ {
primary := []byte(fmt.Sprintf("t%08d_r%08d", i, i))
expectLock := &mvcc.Lock{
LockHdr: mvcc.LockHdr{
StartTS: 100,
TTL: 10,
Op: mvcc.LockTypePut,
},
Value: values[i],
}
wb.Commit(primary, expectLock)
apply.execWriteCmd(applyCtx, raftlog.NewRequest(&rfpb.RaftCmdRequest{
Header: new(rfpb.RaftRequestHeader),
Requests: wb.requests,
}))
err := applyCtx.wb.WriteToKV(engines.kv)
assert.Nil(t, err)
applyCtx.wb.Reset()
wb.requests = nil
if err := engines.kv.DB.View(func(txn *badger.Txn) error {
item, err := txn.Get(primary)
assert.Nil(t, err)
curVal, err := item.Value()
assert.Nil(t, err)
assert.NotNil(t, item)
userMeta := mvcc.DBUserMeta(item.UserMeta())
assert.Equal(t, userMeta.StartTS(), expectLock.StartTS)
assert.Equal(t, userMeta.CommitTS(), wb.commitTS)
assert.Equal(t, 0, bytes.Compare(curVal, expectLock.Value))
return nil
}); err != nil {
t.Error(err)
}
}
}
func TestRaftWriteBatch_Rollback(t *testing.T) {
engines := newTestEngines(t)
defer cleanUpTestEngineData(engines)
apply := new(applier)
applyCtx := newApplyContext("test", nil, engines, nil, NewDefaultConfig())
wb := &raftWriteBatch{
startTS: 100,
commitTS: 0,
}
longValue := [128]byte{102}
for i := 0; i < 2; i++ {
primary := []byte(fmt.Sprintf("t%08d_r%08d", i, i))
expectLock := mvcc.Lock{
LockHdr: mvcc.LockHdr{
StartTS: 100,
TTL: 10,
Op: uint8(kvrpcpb.Op_Put),
PrimaryLen: uint16(len(primary)),
},
Primary: primary,
Value: longValue[:],
}
wb.Prewrite(primary, &expectLock)
apply.execWriteCmd(applyCtx, raftlog.NewRequest(&rfpb.RaftCmdRequest{
Header: new(rfpb.RaftRequestHeader),
Requests: wb.requests,
}))
err := applyCtx.wb.WriteToKV(engines.kv)
assert.Nil(t, err)
applyCtx.wb.Reset()
wb.requests = nil
}
// Testing RollBack
wb = &raftWriteBatch{
startTS: 150,
commitTS: 200,
}
primary := []byte(fmt.Sprintf("t%08d_r%08d", 0, 0))
wb.Rollback(primary, false)
apply.execWriteCmd(applyCtx, raftlog.NewRequest(&rfpb.RaftCmdRequest{
Header: new(rfpb.RaftRequestHeader),
Requests: wb.requests,
}))
err := applyCtx.wb.WriteToKV(engines.kv)
assert.Nil(t, err)
applyCtx.wb.Reset()
wb = &raftWriteBatch{
startTS: 100,
commitTS: 200,
}
primary = []byte(fmt.Sprintf("t%08d_r%08d", 1, 1))
wb.Rollback(primary, true)
apply.execWriteCmd(applyCtx, raftlog.NewRequest(&rfpb.RaftCmdRequest{
Header: new(rfpb.RaftRequestHeader),
Requests: wb.requests,
}))
err = applyCtx.wb.WriteToKV(engines.kv)
assert.Nil(t, err)
applyCtx.wb.Reset()
// The lock should be deleted.
val := engines.kv.LockStore.Get(primary, nil)
assert.Nil(t, val)
}
|
// DO NOT EDIT. This file was generated by "github.com/frk/gosql".
package testdata
import (
"github.com/frk/gosql"
"github.com/frk/gosql/pgsql"
)
func (q *insertarrayquery) Exec(c gosql.Conn) error {
const queryString = `INSERT INTO "pgsql_test" (
"col_bitarr"
, "col_boolarr"
, "col_boxarr"
, "col_bpchararr"
, "col_byteaarr"
, "col_chararr"
, "col_cidrarr"
, "col_datearr"
, "col_daterangearr"
, "col_float4arr"
, "col_float8arr"
, "col_inetarr"
, "col_int2arr"
, "col_int2vector"
, "col_int2vectorarr"
, "col_int4arr"
, "col_int4rangearr"
, "col_int8arr"
, "col_int8rangearr"
, "col_jsonarr"
, "col_jsonbarr"
, "col_linearr"
, "col_lsegarr"
, "col_macaddrarr"
, "col_macaddr8arr"
, "col_moneyarr"
, "col_numericarr"
, "col_numrangearr"
, "col_patharr"
, "col_pointarr"
, "col_polygonarr"
, "col_textarr"
, "col_timearr"
, "col_timestamparr"
, "col_timestamptzarr"
, "col_timetzarr"
, "col_tsqueryarr"
, "col_tsrangearr"
, "col_tstzrangearr"
, "col_tsvectorarr"
, "col_uuidarr"
, "col_varbitarr"
, "col_varchararr"
, "col_xmlarr"
) VALUES (
$1
, $2
, $3
, $4
, $5
, $6
, $7
, $8
, $9
, $10
, $11
, $12
, $13
, $14
, $15
, $16
, $17
, $18
, $19
, $20
, $21
, $22
, $23
, $24
, $25
, $26
, $27
, $28
, $29
, $30
, $31
, $32
, $33
, $34
, $35
, $36
, $37
, $38
, $39
, $40
, $41
, $42
, $43
, $44
)` // `
_, err := c.Exec(queryString,
pgsql.BitArrayFromUint8Slice(q.data.bitarr),
pgsql.BoolArrayFromBoolSlice(q.data.boolarr),
pgsql.BoxArrayFromFloat64Array2Array2Slice(q.data.boxarr),
pgsql.BPCharArrayFromRuneSlice(q.data.bpchararr),
pgsql.ByteaArrayFromByteSliceSlice(q.data.byteaarr),
pgsql.BPCharArrayFromByteSlice(q.data.chararr),
pgsql.CIDRArrayFromIPNetSlice(q.data.cidrarr),
pgsql.DateArrayFromTimeSlice(q.data.datearr),
pgsql.DateRangeArrayFromTimeArray2Slice(q.data.daterangearr),
pgsql.Float4ArrayFromFloat32Slice(q.data.float4arr),
pgsql.Float8ArrayFromFloat64Slice(q.data.float8arr),
pgsql.InetArrayFromIPSlice(q.data.inetarr),
pgsql.Int2ArrayFromInt16Slice(q.data.int2arr),
pgsql.Int2VectorFromInt16Slice(q.data.int2vector),
pgsql.Int2VectorArrayFromInt16SliceSlice(q.data.int2vectorarr),
pgsql.Int4ArrayFromInt32Slice(q.data.int4arr),
pgsql.Int4RangeArrayFromInt32Array2Slice(q.data.int4rangearr),
pgsql.Int8ArrayFromInt64Slice(q.data.int8arr),
pgsql.Int8RangeArrayFromInt64Array2Slice(q.data.int8rangearr),
pgsql.JSONArrayFromByteSliceSlice(q.data.jsonarr),
pgsql.JSONArrayFromByteSliceSlice(q.data.jsonbarr),
pgsql.LineArrayFromFloat64Array3Slice(q.data.linearr),
pgsql.LsegArrayFromFloat64Array2Array2Slice(q.data.lsegarr),
pgsql.MACAddrArrayFromHardwareAddrSlice(q.data.macaddrarr),
pgsql.MACAddr8ArrayFromHardwareAddrSlice(q.data.macaddr8arr),
pgsql.MoneyArrayFromInt64Slice(q.data.moneyarr),
pgsql.NumericArrayFromInt64Slice(q.data.numericarr),
pgsql.NumRangeArrayFromFloat64Array2Slice(q.data.numrangearr),
pgsql.PathArrayFromFloat64Array2SliceSlice(q.data.patharr),
pgsql.PointArrayFromFloat64Array2Slice(q.data.pointarr),
pgsql.PolygonArrayFromFloat64Array2SliceSlice(q.data.polygonarr),
pgsql.TextArrayFromStringSlice(q.data.textarr),
pgsql.TimeArrayFromTimeSlice(q.data.timearr),
pgsql.TimestampArrayFromTimeSlice(q.data.timestamparr),
pgsql.TimestamptzArrayFromTimeSlice(q.data.timestamptzarr),
pgsql.TimetzArrayFromTimeSlice(q.data.timetzarr),
pgsql.TSQueryArrayFromStringSlice(q.data.tsqueryarr),
pgsql.TsRangeArrayFromTimeArray2Slice(q.data.tsrangearr),
pgsql.TstzRangeArrayFromTimeArray2Slice(q.data.tstzrangearr),
pgsql.TSVectorArrayFromStringSliceSlice(q.data.tsvectorarr),
pgsql.UUIDArrayFromByteArray16Slice(q.data.uuidarr),
pgsql.VarBitArrayFromBoolSliceSlice(q.data.varbitarr),
pgsql.VarCharArrayFromStringSlice(q.data.varchararr),
pgsql.XMLArrayFromByteSliceSlice(q.data.xmlarr),
)
return err
}
|
package valexa
import (
"os"
"testing"
// "fmt"
"bufio"
"bytes"
"net/http"
)
func init(){
os.Chdir("./test/data")
}
//注意
//下面这requestBody是签名的,不能改动他,生成日期是 2017-12-01T09:00:10Z
//在此项目里设置测试有效时间是在 testValidTime 变量里
//如果你测试这个项目发生报错,应该是testValidTime过期了
//1,你可以增加 testValidTime 数值,不能超出 int 类型允许大小的限制。
//2,你可以自己生成一个requestBody内容更新它。
var testValidTime int = 60*60*24*365*10 //单位为秒,默认10年
var requestBody = []byte(`POST /echo/helloworld?appid=amzn1.ask.skill.594232d8-4095-499b-9ba3-11701107834d&type=1 HTTP/1.1
Content-Type: application/json; charset=utf-8
Accept: application/json
Accept-Charset: utf-8
Signature: L+sBVB8FrP0lvR0MelzmsNivlw6dFYb5p0FU865mSPszIAyHyJ02Eg0GKCATOV25KYC2VpLgVD33tdgSQM56RifFukPnh8jJRCJRP36GHchszW1sIeBsd3ey/MTO7DW4QnYwLtDxhuIaDIifWwSkgT7I2IiqcxhUZPwECYLtzG51HU7Azwj/ECb5gew8wR2NlPKlbdzIO6938pF8veU3JMVlRkFs7dZfLxglcSk+sCcf0qnzCasocMHrO/p70szCN9X2vRt9y3Jur377Xncxb0vz2t5N8yR5KGDctw/J2yZHqhgJtLvKolnxa8wW2CjaqSb6y4mA95VM1JMl5Abjdw==
SignatureCertChainUrl: https://s3.amazonaws.com/echo.api/echo-api-cert-5.pem
Content-Length: 1138
Host: alexatest.xxx.com.cn
Connection: Keep-Alive
User-Agent: HttpClient
{"session":
{"sessionId":"SessionId.b67984b5-01e4-4a2d-8cf1-c4692d304dde","application":{"applicationId":"amzn1.ask.skill.594232d8-4095-499b-9ba3-11701107834d"},"attributes":{},"user":{"userId":"amzn1.ask.account.AF3E6FFKSBVMJK6ZOGSEWKEDLD2EXJCZUFJX2ZNWO3R55COS5ZLILGRLM7WTJWKQYPRAOUZWFS2ZZP6ULJRALRA3CVIDCVZ7W5VUMZZMWREW3UWZRIF3XWJMXG5HV4LZZ5ZCDYXKM56BQUKOWVPYP4CWH3TP3SQABYCVATYLG56PNUHV2VON3RAY54LELDRHWBZ2JI6O6VN6LWY","accessToken":null},"new":false},
"request":
{"intent":{"name":"RecipeIntent","slots":{"Item":{"name":"Item","value":"map"}}},"requestId":"EdwRequestId.09c3feaa-a54d-4833-bca0-2f2e4dc1ae5e","type":"IntentRequest","locale":"en-US","timestamp":"2017-12-01T09:00:10Z"},"context":{"AudioPlayer":{"playerActivity":"IDLE"},"System":{"application":{"applicationId":"amzn1.ask.skill.594232d8-4095-499b-9ba3-11701107834d"},"user":{"userId":"amzn1.ask.account.AF3E6FFKSBVMJK6ZOGSEWKEDLD2EXJCZUFJX2ZNWO3R55COS5ZLILGRLM7WTJWKQYPRAOUZWFS2ZZP6ULJRALRA3CVIDCVZ7W5VUMZZMWREW3UWZRIF3XWJMXG5HV4LZZ5ZCDYXKM56BQUKOWVPYP4CWH3TP3SQABYCVATYLG56PNUHV2VON3RAY54LELDRHWBZ2JI6O6VN6LWY"},"device":{"supportedInterfaces":{}}}},"version":"1.0"}`)
func Test_CheckRequestBody_verifyBody(testingT *testing.T){
//这里就不测试了,除非你可以得到 Signature。
//SignatureCertChainUrl 的连接也是过期的,amazonaws常更换连接。
//不提供测试,不代表 verifyBody 方法不能使用。
return
var tests = []struct{
crb checkRequestBody
err bool
}{
{crb:checkRequestBody{R:func() (req *http.Request) {
req, _ = http.ReadRequest(bufio.NewReader(bytes.NewReader(requestBody)))
return req
}()}},
{crb:checkRequestBody{R:func() (req *http.Request) {
req, _ = http.ReadRequest(bufio.NewReader(bytes.NewReader(requestBody)))
req.Header.Set("Signature", "111")
return req
}()}, err:true},
{crb:checkRequestBody{R:func() (req *http.Request) {
req, _ = http.ReadRequest(bufio.NewReader(bytes.NewReader(requestBody)))
req.Header.Set("SignatureCertChainUrl", "https://s3.amazonaws.com/echo.api/echo-api-cert-5.pem.xxxxxxxxx")
return req
}()}, err:true},
}
app := &EchoApplication{
CertFolder: "./AmazonCertFile",
}
for _, test := range tests {
_, err := test.crb.verifyBody(app)
if err != nil && !test.err {
testingT.Fatal(err)
}
}
}
func Test_CheckRequestBody_echoRequest(testingT *testing.T){
var tests = []struct{
crb checkRequestBody
timestamp int
err bool
}{
{crb:checkRequestBody{R:func() (req *http.Request) {
req, _ = http.ReadRequest(bufio.NewReader(bytes.NewReader(requestBody)))
return req
}()},timestamp: 150, err:true},
{crb:checkRequestBody{R:func() (req *http.Request) {
req, _ = http.ReadRequest(bufio.NewReader(bytes.NewReader(requestBody)))
return req
}()}, timestamp:testValidTime, err:false},
}
app := &EchoApplication{}
for _, test := range tests {
app.ValidReqTimestamp = test.timestamp
_, err := test.crb.echoRequest(app)
if err != nil && !test.err {
testingT.Fatal(err)
}
}
}
|
// Copyright 2014 Gyepi Sam. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package redux
// Dependent is the inverse of Prerequisite
type Dependent struct {
Path string
}
func (d Dependent) File(dir string) (*File, error) {
f, err := NewFile(dir, d.Path)
if err != nil {
return nil, err
}
return f, nil
}
func (p Prerequisite) File(dir string) (*File, error) {
f, err := NewFile(dir, p.Path)
if err != nil {
return nil, err
}
return f, nil
}
func (f *File) DependentFiles(prefix string) ([]*File, error) {
data, err := f.db.GetValues(prefix)
if err != nil {
return nil, err
}
files := make([]*File, len(data))
for i, b := range data {
if dep, err := decodeDependent(b); err != nil {
return nil, err
} else if item, err := dep.File(f.RootDir); err != nil {
return nil, err
} else {
files[i] = item
}
}
return files, nil
}
func (f *File) AllDependents() ([]*File, error) {
return f.DependentFiles(f.makeKey(SATISFIES))
}
func (f *File) EventDependents(event Event) ([]*File, error) {
return f.DependentFiles(f.makeKey(SATISFIES, event))
}
func (f *File) DeleteAllDependencies() (err error) {
keys, err := f.db.GetKeys(f.makeKey(SATISFIES))
if err != nil {
return err
}
for _, key := range keys {
if err := f.Delete(key); err != nil {
return err
}
}
return nil
}
func (f *File) DeleteDependency(event Event, hash Hash) error {
return f.Delete(f.makeKey(SATISFIES, event, hash))
}
func (f *File) PutDependency(event Event, hash Hash, dep Dependent) error {
return f.Put(f.makeKey(SATISFIES, event, hash), dep)
}
// NotifyDependents flags dependents as out of date because target has been created, modified, or deleted.
func (f *File) NotifyDependents(event Event) (err error) {
dependents, err := f.EventDependents(event)
if err != nil {
return err
}
for _, dependent := range dependents {
if err := dependent.PutMustRebuild(); err != nil {
return err
}
f.Debug("@Notify %s -> %s\n", event, dependent.Path)
}
return nil
}
|
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package query
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"github.com/kubevela/pkg/util/slices"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/networking/v1"
networkv1beta1 "k8s.io/api/networking/v1beta1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
gatewayv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1"
monitorContext "github.com/kubevela/pkg/monitor/context"
wfContext "github.com/kubevela/workflow/pkg/context"
"github.com/kubevela/workflow/pkg/cue/model/value"
"github.com/kubevela/workflow/pkg/types"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
apis "github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/multicluster"
querytypes "github.com/oam-dev/kubevela/pkg/velaql/providers/query/types"
)
// CollectServiceEndpoints generator service endpoints is available for common component type,
// such as webservice or helm
// it can not support the cloud service component currently
func (h *provider) CollectServiceEndpoints(ctx monitorContext.Context, wfCtx wfContext.Context, v *value.Value, act types.Action) error {
val, err := v.LookupValue("app")
if err != nil {
return err
}
opt := Option{}
if err = val.UnmarshalTo(&opt); err != nil {
return err
}
app := new(v1beta1.Application)
err = findResource(ctx, h.cli, app, opt.Name, opt.Namespace, "")
if err != nil {
return fmt.Errorf("query app failure %w", err)
}
serviceEndpoints := make([]querytypes.ServiceEndpoint, 0)
var clusterGatewayNodeIP = make(map[string]string)
collector := NewAppCollector(h.cli, opt)
resources, err := collector.ListApplicationResources(ctx, app)
if err != nil {
return err
}
for i, resource := range resources {
cluster := resources[i].Cluster
cachedSelectorNodeIP := func() string {
if ip, exist := clusterGatewayNodeIP[cluster]; exist {
return ip
}
ip := selectorNodeIP(ctx, cluster, h.cli)
if ip != "" {
clusterGatewayNodeIP[cluster] = ip
}
return ip
}
if resource.ResourceTree != nil {
serviceEndpoints = append(serviceEndpoints, getEndpointFromNode(ctx, h.cli, resource.ResourceTree, resource.Component, cachedSelectorNodeIP)...)
} else {
serviceEndpoints = append(serviceEndpoints, getServiceEndpoints(ctx, h.cli, resource.GroupVersionKind(), resource.Name, resource.Namespace, resource.Cluster, resource.Component, cachedSelectorNodeIP)...)
}
}
return fillQueryResult(v, serviceEndpoints, "list")
}
func getEndpointFromNode(ctx context.Context, cli client.Client, node *querytypes.ResourceTreeNode, component string, cachedSelectorNodeIP func() string) []querytypes.ServiceEndpoint {
if node == nil {
return nil
}
var serviceEndpoints []querytypes.ServiceEndpoint
serviceEndpoints = append(serviceEndpoints, getServiceEndpoints(ctx, cli, node.GroupVersionKind(), node.Name, node.Namespace, node.Cluster, component, cachedSelectorNodeIP)...)
for _, child := range node.LeafNodes {
serviceEndpoints = append(serviceEndpoints, getEndpointFromNode(ctx, cli, child, component, cachedSelectorNodeIP)...)
}
return serviceEndpoints
}
func getServiceEndpoints(ctx context.Context, cli client.Client, gvk schema.GroupVersionKind, name, namespace, cluster, component string, cachedSelectorNodeIP func() string) []querytypes.ServiceEndpoint {
var serviceEndpoints []querytypes.ServiceEndpoint
switch gvk.Kind {
case "Ingress":
if gvk.Group == networkv1beta1.GroupName && (gvk.Version == "v1beta1" || gvk.Version == "v1") {
var ingress v1.Ingress
ingress.SetGroupVersionKind(gvk)
if err := findResource(ctx, cli, &ingress, name, namespace, cluster); err != nil {
klog.Error(err, fmt.Sprintf("find v1 Ingress %s/%s from cluster %s failure", name, namespace, cluster))
return nil
}
serviceEndpoints = append(serviceEndpoints, generatorFromIngress(ingress, cluster, component)...)
} else {
klog.Warning("not support ingress version", "version", gvk)
}
case "Service":
var service corev1.Service
service.SetGroupVersionKind(gvk)
if err := findResource(ctx, cli, &service, name, namespace, cluster); err != nil {
klog.Error(err, fmt.Sprintf("find v1 Service %s/%s from cluster %s failure", name, namespace, cluster))
return nil
}
serviceEndpoints = append(serviceEndpoints, generatorFromService(service, cachedSelectorNodeIP, cluster, component, "")...)
case "SeldonDeployment":
obj := new(unstructured.Unstructured)
obj.SetGroupVersionKind(gvk)
if err := findResource(ctx, cli, obj, name, namespace, cluster); err != nil {
klog.Error(err, fmt.Sprintf("find v1 Seldon Deployment %s/%s from cluster %s failure", name, namespace, cluster))
return nil
}
anno := obj.GetAnnotations()
serviceName := "ambassador"
serviceNS := apis.DefaultKubeVelaNS
if anno != nil {
if anno[annoAmbassadorServiceName] != "" {
serviceName = anno[annoAmbassadorServiceName]
}
if anno[annoAmbassadorServiceNamespace] != "" {
serviceNS = anno[annoAmbassadorServiceNamespace]
}
}
var service corev1.Service
if err := findResource(ctx, cli, &service, serviceName, serviceNS, cluster); err != nil {
klog.Error(err, fmt.Sprintf("find v1 Service %s/%s from cluster %s failure", serviceName, serviceNS, cluster))
return nil
}
serviceEndpoints = append(serviceEndpoints, generatorFromService(service, cachedSelectorNodeIP, cluster, component, fmt.Sprintf("/seldon/%s/%s", namespace, name))...)
case "HTTPRoute":
var route gatewayv1beta1.HTTPRoute
route.SetGroupVersionKind(gvk)
if err := findResource(ctx, cli, &route, name, namespace, cluster); err != nil {
klog.Error(err, fmt.Sprintf("find HTTPRoute %s/%s from cluster %s failure", name, namespace, cluster))
return nil
}
serviceEndpoints = append(serviceEndpoints, generatorFromHTTPRoute(ctx, cli, route, cluster, component)...)
}
return serviceEndpoints
}
func findResource(ctx context.Context, cli client.Client, obj client.Object, name, namespace, cluster string) error {
obj.SetNamespace(namespace)
obj.SetName(name)
gctx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
if err := cli.Get(multicluster.ContextWithClusterName(gctx, cluster),
client.ObjectKeyFromObject(obj), obj); err != nil {
if kerrors.IsNotFound(err) {
return nil
}
return err
}
return nil
}
func generatorFromService(service corev1.Service, selectorNodeIP func() string, cluster, component, path string) []querytypes.ServiceEndpoint {
var serviceEndpoints []querytypes.ServiceEndpoint
var objRef = corev1.ObjectReference{
Kind: "Service",
Namespace: service.ObjectMeta.Namespace,
Name: service.ObjectMeta.Name,
UID: service.UID,
APIVersion: service.APIVersion,
ResourceVersion: service.ResourceVersion,
}
formatEndpoint := func(host, appProtocol string, portName string, portProtocol corev1.Protocol, portNum int32, inner bool) querytypes.ServiceEndpoint {
return querytypes.ServiceEndpoint{
Endpoint: querytypes.Endpoint{
Protocol: portProtocol,
AppProtocol: &appProtocol,
Host: host,
Port: int(portNum),
PortName: portName,
Path: path,
Inner: inner,
},
Ref: objRef,
Cluster: cluster,
Component: component,
}
}
switch service.Spec.Type {
case corev1.ServiceTypeLoadBalancer:
for _, port := range service.Spec.Ports {
appp := judgeAppProtocol(port.Port)
for _, ingress := range service.Status.LoadBalancer.Ingress {
if ingress.Hostname != "" {
serviceEndpoints = append(serviceEndpoints, formatEndpoint(ingress.Hostname, appp, port.Name, port.Protocol, port.Port, false))
}
if ingress.IP != "" {
serviceEndpoints = append(serviceEndpoints, formatEndpoint(ingress.IP, appp, port.Name, port.Protocol, port.Port, false))
}
}
}
case corev1.ServiceTypeNodePort:
for _, port := range service.Spec.Ports {
appp := judgeAppProtocol(port.Port)
serviceEndpoints = append(serviceEndpoints, formatEndpoint(selectorNodeIP(), appp, port.Name, port.Protocol, port.NodePort, false))
}
case corev1.ServiceTypeClusterIP, corev1.ServiceTypeExternalName:
for _, port := range service.Spec.Ports {
appp := judgeAppProtocol(port.Port)
serviceEndpoints = append(serviceEndpoints, formatEndpoint(fmt.Sprintf("%s.%s", service.Name, service.Namespace), appp, port.Name, port.Protocol, port.Port, true))
}
}
return serviceEndpoints
}
func generatorFromIngress(ingress v1.Ingress, cluster, component string) (serviceEndpoints []querytypes.ServiceEndpoint) {
getAppProtocol := func(host string) string {
if len(ingress.Spec.TLS) > 0 {
for _, tls := range ingress.Spec.TLS {
if len(tls.Hosts) > 0 && slices.Contains(tls.Hosts, host) {
return querytypes.HTTPS
}
if len(tls.Hosts) == 0 {
return querytypes.HTTPS
}
}
}
return querytypes.HTTP
}
// It depends on the Ingress Controller
getEndpointPort := func(appProtocol string) int {
if appProtocol == querytypes.HTTPS {
if port, err := strconv.Atoi(ingress.Annotations[apis.AnnoIngressControllerHTTPSPort]); port > 0 && err == nil {
return port
}
return 443
}
if port, err := strconv.Atoi(ingress.Annotations[apis.AnnoIngressControllerHTTPPort]); port > 0 && err == nil {
return port
}
return 80
}
// The host in rule maybe empty, means access the application by the Gateway Host(IP)
getHost := func(host string) string {
if host != "" {
return host
}
return ingress.Annotations[apis.AnnoIngressControllerHost]
}
for _, rule := range ingress.Spec.Rules {
var appProtocol = getAppProtocol(rule.Host)
var appPort = getEndpointPort(appProtocol)
if rule.HTTP != nil {
for _, path := range rule.HTTP.Paths {
serviceEndpoints = append(serviceEndpoints, querytypes.ServiceEndpoint{
Endpoint: querytypes.Endpoint{
Protocol: corev1.ProtocolTCP,
AppProtocol: &appProtocol,
Host: getHost(rule.Host),
Path: path.Path,
Port: appPort,
},
Ref: corev1.ObjectReference{
Kind: "Ingress",
Namespace: ingress.ObjectMeta.Namespace,
Name: ingress.ObjectMeta.Name,
UID: ingress.UID,
APIVersion: ingress.APIVersion,
ResourceVersion: ingress.ResourceVersion,
},
Cluster: cluster,
Component: component,
})
}
}
}
return serviceEndpoints
}
func getGatewayPortAndProtocol(ctx context.Context, cli client.Client, defaultNamespace, cluster string, parents []gatewayv1beta1.ParentReference) (string, int) {
for _, parent := range parents {
if parent.Kind != nil && *parent.Kind == "Gateway" {
var gateway gatewayv1beta1.Gateway
namespace := defaultNamespace
if parent.Namespace != nil {
namespace = string(*parent.Namespace)
}
if err := findResource(ctx, cli, &gateway, string(parent.Name), namespace, cluster); err != nil {
klog.Errorf("query the Gateway %s/%s/%s failure %s", cluster, namespace, string(parent.Name), err.Error())
}
var listener *gatewayv1beta1.Listener
if parent.SectionName != nil {
for i, lis := range gateway.Spec.Listeners {
if lis.Name == *parent.SectionName {
listener = &gateway.Spec.Listeners[i]
break
}
}
} else if len(gateway.Spec.Listeners) > 0 {
listener = &gateway.Spec.Listeners[0]
}
if listener != nil {
var protocol = querytypes.HTTP
if listener.Protocol == gatewayv1beta1.HTTPSProtocolType {
protocol = querytypes.HTTPS
}
var port = int(listener.Port)
// The gateway listener port may not be the externally exposed port.
// For example, the traefik addon has a default port mapping configuration of 8443->443 8000->80
// So users could set the `ports-mapping` annotation.
if mapping := gateway.Annotations["ports-mapping"]; mapping != "" {
for _, portItem := range strings.Split(mapping, ",") {
if portMap := strings.Split(portItem, ":"); len(portMap) == 2 {
if portMap[0] == fmt.Sprintf("%d", listener.Port) {
newPort, err := strconv.Atoi(portMap[1])
if err == nil {
port = newPort
}
}
}
}
}
return protocol, port
}
}
}
return querytypes.HTTP, 80
}
func generatorFromHTTPRoute(ctx context.Context, cli client.Client, route gatewayv1beta1.HTTPRoute, cluster, component string) []querytypes.ServiceEndpoint {
existPath := make(map[string]bool)
var serviceEndpoints []querytypes.ServiceEndpoint
for _, rule := range route.Spec.Rules {
for _, host := range route.Spec.Hostnames {
appProtocol, appPort := getGatewayPortAndProtocol(ctx, cli, route.Namespace, cluster, route.Spec.ParentRefs)
for _, match := range rule.Matches {
path := ""
if match.Path != nil && (match.Path.Type == nil || string(*match.Path.Type) == string(gatewayv1beta1.PathMatchPathPrefix)) {
path = *match.Path.Value
}
if !existPath[path] {
existPath[path] = true
serviceEndpoints = append(serviceEndpoints, querytypes.ServiceEndpoint{
Endpoint: querytypes.Endpoint{
Protocol: corev1.ProtocolTCP,
AppProtocol: &appProtocol,
Host: string(host),
Path: path,
Port: appPort,
},
Ref: corev1.ObjectReference{
Kind: route.Kind,
Namespace: route.ObjectMeta.Namespace,
Name: route.ObjectMeta.Name,
UID: route.UID,
APIVersion: route.APIVersion,
ResourceVersion: route.ResourceVersion,
},
Cluster: cluster,
Component: component,
})
}
}
}
}
return serviceEndpoints
}
func selectorNodeIP(ctx context.Context, clusterName string, client client.Client) string {
ctx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
var nodes corev1.NodeList
if err := client.List(multicluster.ContextWithClusterName(ctx, clusterName), &nodes); err != nil {
return ""
}
if len(nodes.Items) == 0 {
return ""
}
return selectGatewayIP(nodes.Items)
}
// judgeAppProtocol RFC-6335 and http://www.iana.org/assignments/service-names).
func judgeAppProtocol(port int32) string {
switch port {
case 80, 8080:
return querytypes.HTTP
case 443:
return querytypes.HTTPS
case 3306:
return querytypes.Mysql
case 6379:
return querytypes.Redis
default:
return ""
}
}
// selectGatewayIP will choose one gateway IP from all nodes, it will pick up external IP first. If there isn't any, it will pick the first node's internal IP.
func selectGatewayIP(nodes []corev1.Node) string {
var gatewayNode *corev1.Node
var workerNodes []corev1.Node
for i, node := range nodes {
if _, exist := node.Labels[apis.LabelNodeRoleGateway]; exist {
gatewayNode = &nodes[i]
break
} else if _, exist := node.Labels[apis.LabelNodeRoleWorker]; exist {
workerNodes = append(workerNodes, nodes[i])
}
}
var candidates = nodes
if gatewayNode != nil {
candidates = []corev1.Node{*gatewayNode}
} else if len(workerNodes) > 0 {
candidates = workerNodes
}
if len(candidates) == 0 {
return ""
}
var addressMaps = make([]map[corev1.NodeAddressType]string, 0)
for _, node := range candidates {
var addressMap = make(map[corev1.NodeAddressType]string)
for _, address := range node.Status.Addresses {
addressMap[address.Type] = address.Address
}
// first get external ip
if ip, exist := addressMap[corev1.NodeExternalIP]; exist {
return ip
}
addressMaps = append(addressMaps, addressMap)
}
return addressMaps[0][corev1.NodeInternalIP]
}
|
package main
import "fmt"
import "math"
func main() {
found := 0
for i := 2; ; i++ {
isPrime := true
for d := 2; d <= int(math.Sqrt(float64(i))); d++ {
if i%d == 0 {
isPrime = false
}
}
if isPrime {
found++
if found == 10001 {
fmt.Printf("%v\n", i)
break
}
}
}
}
|
//go:build integration
package test
import (
"bytes"
"context"
"flag"
"io"
"io/ioutil"
"testing"
"time"
krakendlambda "github.com/devopsfaith/krakend-lambda/v2"
"github.com/luraproject/lura/v2/config"
"github.com/luraproject/lura/v2/proxy"
)
var endpoint = flag.String("aws_endpoint", "http://192.168.99.100:4574", "url of the localstack's endpoint")
func TestLocalStack(t *testing.T) {
explosiveBF := func(remote *config.Backend) proxy.Proxy {
t.Error("this backend factory should not been called")
return proxy.NoopProxy
}
bf := krakendlambda.BackendFactory(explosiveBF)
for i, tc := range []struct {
Name string
Method string
Key string
Function string
ExpectedMsg string
Params map[string]string
Body io.ReadCloser
}{
{
Name: "get_with_default_key",
Method: "GET",
Params: map[string]string{"function": "python37", "first_name": "fooo", "last_name": "bar"},
ExpectedMsg: "Hello fooo bar!",
},
{
Name: "post_with_default_key",
Method: "POST",
Params: map[string]string{"function": "python37"},
Body: ioutil.NopCloser(bytes.NewBufferString(`{"first_name":"foobar","last_name":"some"}`)),
ExpectedMsg: "Hello foobar some!",
},
{
Name: "get_with_custom_key",
Method: "GET",
Params: map[string]string{"function": "unknown", "lambda": "python37", "first_name": "fooo", "last_name": "bar"},
Key: "lambda",
ExpectedMsg: "Hello fooo bar!",
},
{
Name: "post_with_custom_key",
Method: "POST",
Params: map[string]string{"function": "unknown", "lambda": "python37"},
Body: ioutil.NopCloser(bytes.NewBufferString(`{"first_name":"foobar","last_name":"some"}`)),
Key: "lambda",
ExpectedMsg: "Hello foobar some!",
},
{
Name: "get_with_function_name",
Method: "GET",
Params: map[string]string{"function": "unknown", "first_name": "fooo", "last_name": "bar"},
ExpectedMsg: "Hello fooo bar!",
Function: "python37",
},
{
Name: "post_with_function_name",
Method: "POST",
Params: map[string]string{"function": "unknown"},
Body: ioutil.NopCloser(bytes.NewBufferString(`{"first_name":"foobar","last_name":"some"}`)),
ExpectedMsg: "Hello foobar some!",
Function: "python37",
},
{
Name: "get_with_function_name_and_key",
Method: "GET",
Params: map[string]string{"function": "unknown", "lambda": "unknown", "first_name": "fooo", "last_name": "bar"},
Key: "lambda",
ExpectedMsg: "Hello fooo bar!",
Function: "python37",
},
{
Name: "post_with_function_name_and_key",
Method: "POST",
Params: map[string]string{"function": "unknown", "lambda": "unknown"},
Body: ioutil.NopCloser(bytes.NewBufferString(`{"first_name":"foobar","last_name":"some"}`)),
Key: "lambda",
ExpectedMsg: "Hello foobar some!",
Function: "python37",
},
} {
t.Run(tc.Name, func(t *testing.T) {
r := &proxy.Request{
Params: tc.Params,
Body: tc.Body,
}
extra := map[string]interface{}{
"region": "us-east-1",
"endpoint": *endpoint,
}
remote := &config.Backend{
Method: tc.Method,
ExtraConfig: config.ExtraConfig{
krakendlambda.Namespace: extra,
},
}
if tc.Key != "" {
extra["function_param_name"] = tc.Key
}
if tc.Function != "" {
extra["function_name"] = tc.Function
}
remote.ExtraConfig[krakendlambda.Namespace] = extra
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
resp, err := bf(remote)(ctx, r)
if err != nil {
t.Error(i, err)
return
}
if !resp.IsComplete {
t.Errorf("%d: incomplete response", i)
return
}
if m, ok := resp.Data["message"]; !ok || m != tc.ExpectedMsg {
t.Errorf("unexpected response: %v", resp.Data)
}
})
}
}
|
package leveldb
import (
"bytes"
"sync"
"testing"
)
var testDB *LevelDBEngine
var onlyOnce sync.Once
func createDB(name string) *LevelDBEngine {
f := func() {
var err error
testDB, err = Open(name)
if err != nil {
println(err.Error())
panic(err)
}
}
onlyOnce.Do(f)
return testDB
}
func TestSimple(t *testing.T) {
db := createDB("/tmp/testdb")
key := []byte("hi")
value := []byte("hello world")
if err := db.Put(key, value); err != nil {
t.Fatal(err)
}
if v, err := db.Get(key); err != nil {
t.Fatal(err)
} else if (!bytes.Equal(v, value)) {
t.Fatal("get value not equal")
}
if err := db.Delete(key); err != nil {
t.Fatal(err)
}
if v, err := db.Get(key); err != nil {
t.Fatal(err)
} else if v != nil {
t.Fatal("after delete, key should not exist any more")
}
} |
package msgHandler
import (
"bytes"
"github.com/HNB-ECO/HNB-Blockchain/HNB/consensus/algorand/types"
"github.com/HNB-ECO/HNB-Blockchain/HNB/util"
)
type BftGroup struct {
BgID uint64
// VRF
VRFValue []byte
VRFProof []byte
Validators []*types.Validator
}
func (bg BftGroup) Exist(digestAddr util.HexBytes) bool {
if len(digestAddr) == 0 {
return false
}
for _, validator := range bg.Validators {
if bytes.Equal(validator.Address, digestAddr) {
return true
}
}
return false
}
|
package sql
import (
"bytes"
"fmt"
"regexp"
"strconv"
"strings"
"time"
)
// Statement represents a single command in MessageQL.
type Statement interface {
Node
stmt()
RequiredPrivileges() ExecutionPrivileges
}
// Statements represents a list of statements.
type Statements []Statement
// String returns a string representation of the statements.
func (a Statements) String() string {
var str []string
for _, stmt := range a {
str = append(str, stmt.String())
}
return strings.Join(str, ";\n")
}
// Query represents a collection of ordered statements.
type Query struct {
Statements Statements
}
// String returns a string representation of the query.
func (q *Query) String() string { return q.Statements.String() }
// HasDefaultDatabase provides an interface to get the default database from a Statement.
type HasDefaultDatabase interface {
Node
stmt()
DefaultDatabase() string
}
// ExecutionPrivilege is a privilege required for a user to execute
// a statement on a database or resource.
type ExecutionPrivilege struct {
// Admin privilege required.
Admin bool
// Name of the database.
Name string
// Database privilege required.
Privilege Privilege
}
// ExecutionPrivileges is a list of privileges required to execute a statement.
type ExecutionPrivileges []ExecutionPrivilege
func (*AlterRetentionPolicyStatement) stmt() {}
// func (*CreateContinuousQueryStatement) stmt() {}
func (*CreateDatabaseStatement) stmt() {}
func (*CreateRetentionPolicyStatement) stmt() {}
func (*CreateUserStatement) stmt() {}
func (*DeleteStatement) stmt() {}
func (*DropConversationStatement) stmt() {}
func (*DropDatabaseStatement) stmt() {}
func (*DropOrganizationStatement) stmt() {}
func (*DropRetentionPolicyStatement) stmt() {}
func (*DropUserStatement) stmt() {}
func (*ShowConversationsStatement) stmt() {}
func (*ShowDatabasesStatement) stmt() {}
func (*ShowDiagnosticsStatement) stmt() {}
func (*ShowDevicesForUserStatement) stmt() {}
func (*ShowGrantsForUserStatement) stmt() {}
func (*ShowOrganizationsStatement) stmt() {}
func (*ShowOrganizationMembersStatement) stmt() {}
func (*ShowRetentionPoliciesStatement) stmt() {}
func (*ShowServersStatement) stmt() {}
func (*ShowStatsStatement) stmt() {}
func (*ShowUsersStatement) stmt() {}
func (*GrantStatement) stmt() {}
func (*GrantAdminStatement) stmt() {}
func (*RevokeStatement) stmt() {}
func (*RevokeAdminStatement) stmt() {}
func (*SelectStatement) stmt() {}
func (*SetPasswordUserStatement) stmt() {}
// STATEMENTS
// CreateDatabaseStatement represents a command for creating a new database.
type CreateDatabaseStatement struct {
// Name of the database to be created.
Name string
}
// String returns a string representation of the create database statement.
func (s *CreateDatabaseStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("CREATE DATABASE ")
_, _ = buf.WriteString(s.Name)
return buf.String()
}
// RequiredPrivileges returns the privilege required to execute a CreateDatabaseStatement.
func (s *CreateDatabaseStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
// DropDatabaseStatement represents a command to drop a database.
type DropDatabaseStatement struct {
// Name of the database to be dropped.
Name string
}
// String returns a string representation of the drop database statement.
func (s *DropDatabaseStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("DROP DATABASE ")
_, _ = buf.WriteString(s.Name)
return buf.String()
}
// RequiredPrivileges returns the privilege required to execute a DropDatabaseStatement.
func (s *DropDatabaseStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
// CreateUserStatement represents a command for creating a new user.
type CreateUserStatement struct {
// Name of the user to be created.
Name string
// User's password.
Password string
// User's admin privilege.
Admin bool
}
// String returns a string representation of the create user statement.
func (s *CreateUserStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("CREATE USER ")
_, _ = buf.WriteString(s.Name)
_, _ = buf.WriteString(" WITH PASSWORD ")
_, _ = buf.WriteString(s.Password)
if s.Admin {
_, _ = buf.WriteString(" WITH ALL PRIVILEGES")
}
return buf.String()
}
// RequiredPrivileges returns the privilege(s) required to execute a CreateUserStatement.
func (s *CreateUserStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
// DropUserStatement represents a command for dropping a user.
type DropUserStatement struct {
// Name of the user to drop.
Name string
}
// String returns a string representation of the drop user statement.
func (s *DropUserStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("DROP USER ")
_, _ = buf.WriteString(s.Name)
return buf.String()
}
// RequiredPrivileges returns the privilege(s) required to execute a DropUserStatement.
func (s *DropUserStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
// Privilege is a type of action a user can be granted the right to use.
type Privilege int
const (
// NoPrivileges means no privileges required / granted / revoked.
NoPrivileges Privilege = iota
// ReadPrivilege means read privilege required / granted / revoked.
ReadPrivilege
// WritePrivilege means write privilege required / granted / revoked.
WritePrivilege
// AllPrivileges means all privileges required / granted / revoked.
AllPrivileges
)
// NewPrivilege returns an initialized *Privilege.
func NewPrivilege(p Privilege) *Privilege { return &p }
// String returns a string representation of a Privilege.
func (p Privilege) String() string {
switch p {
case NoPrivileges:
return "NO PRIVILEGES"
case ReadPrivilege:
return "READ"
case WritePrivilege:
return "WRITE"
case AllPrivileges:
return "ALL PRIVILEGES"
}
return ""
}
// GrantStatement represents a command for granting a privilege.
type GrantStatement struct {
// The privilege to be granted.
Privilege Privilege
// Database to grant the privilege to.
On string
// Who to grant the privilege to.
User string
}
// String returns a string representation of the grant statement.
func (s *GrantStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("GRANT ")
_, _ = buf.WriteString(s.Privilege.String())
_, _ = buf.WriteString(" ON ")
_, _ = buf.WriteString(s.On)
_, _ = buf.WriteString(" TO ")
_, _ = buf.WriteString(s.User)
return buf.String()
}
// RequiredPrivileges returns the privilege required to execute a GrantStatement.
func (s *GrantStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
// GrantAdminStatement represents a command for granting admin privilege.
type GrantAdminStatement struct {
// Who to grant the privilege to.
User string
}
// String returns a string representation of the grant admin statement.
func (s *GrantAdminStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("GRANT ALL PRIVILEGES TO ")
_, _ = buf.WriteString(s.User)
return buf.String()
}
// RequiredPrivileges returns the privilege required to execute a GrantAdminStatement.
func (s *GrantAdminStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
// SetPasswordUserStatement represents a command for changing user password.
type SetPasswordUserStatement struct {
// Plain Password
Password string
// Who to grant the privilege to.
Name string
}
// String returns a string representation of the set password statement.
func (s *SetPasswordUserStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("SET PASSWORD FOR ")
_, _ = buf.WriteString(s.Name)
_, _ = buf.WriteString(" = ")
_, _ = buf.WriteString(s.Password)
return buf.String()
}
// RequiredPrivileges returns the privilege required to execute a SetPasswordUserStatement.
func (s *SetPasswordUserStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
// RevokeStatement represents a command to revoke a privilege from a user.
type RevokeStatement struct {
// The privilege to be revoked.
Privilege Privilege
// Database to revoke the privilege from.
On string
// Who to revoke privilege from.
User string
}
// String returns a string representation of the revoke statement.
func (s *RevokeStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("REVOKE ")
_, _ = buf.WriteString(s.Privilege.String())
_, _ = buf.WriteString(" ON ")
_, _ = buf.WriteString(s.On)
_, _ = buf.WriteString(" FROM ")
_, _ = buf.WriteString(s.User)
return buf.String()
}
// RequiredPrivileges returns the privilege required to execute a RevokeStatement.
func (s *RevokeStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
// RevokeAdminStatement represents a command to revoke admin privilege from a user.
type RevokeAdminStatement struct {
// Who to revoke admin privilege from.
User string
}
// String returns a string representation of the revoke admin statement.
func (s *RevokeAdminStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("REVOKE ALL PRIVILEGES FROM ")
_, _ = buf.WriteString(s.User)
return buf.String()
}
// RequiredPrivileges returns the privilege required to execute a RevokeAdminStatement.
func (s *RevokeAdminStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
// CreateRetentionPolicyStatement represents a command to create a retention policy.
type CreateRetentionPolicyStatement struct {
// Name of policy to create.
Name string
// Name of database this policy belongs to.
Database string
// Duration data written to this policy will be retained.
Duration time.Duration
// Replication factor for data written to this policy.
Replication int
// Should this policy be set as default for the database?
Default bool
}
// String returns a string representation of the create retention policy.
func (s *CreateRetentionPolicyStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("CREATE RETENTION POLICY ")
_, _ = buf.WriteString(s.Name)
_, _ = buf.WriteString(" ON ")
_, _ = buf.WriteString(s.Database)
_, _ = buf.WriteString(" DURATION ")
_, _ = buf.WriteString(FormatDuration(s.Duration))
_, _ = buf.WriteString(" REPLICATION ")
_, _ = buf.WriteString(strconv.Itoa(s.Replication))
if s.Default {
_, _ = buf.WriteString(" DEFAULT")
}
return buf.String()
}
// RequiredPrivileges returns the privilege required to execute a CreateRetentionPolicyStatement.
func (s *CreateRetentionPolicyStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
// AlterRetentionPolicyStatement represents a command to alter an existing retention policy.
type AlterRetentionPolicyStatement struct {
// Name of policy to alter.
Name string
// Name of the database this policy belongs to.
Database string
// Duration data written to this policy will be retained.
Duration *time.Duration
// Replication factor for data written to this policy.
Replication *int
// Should this policy be set as defalut for the database?
Default bool
}
// String returns a string representation of the alter retention policy statement.
func (s *AlterRetentionPolicyStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("ALTER RETENTION POLICY ")
_, _ = buf.WriteString(s.Name)
_, _ = buf.WriteString(" ON ")
_, _ = buf.WriteString(s.Database)
if s.Duration != nil {
_, _ = buf.WriteString(" DURATION ")
_, _ = buf.WriteString(FormatDuration(*s.Duration))
}
if s.Replication != nil {
_, _ = buf.WriteString(" REPLICATION ")
_, _ = buf.WriteString(strconv.Itoa(*s.Replication))
}
if s.Default {
_, _ = buf.WriteString(" DEFAULT")
}
return buf.String()
}
// RequiredPrivileges returns the privilege required to execute an AlterRetentionPolicyStatement.
func (s *AlterRetentionPolicyStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
// DropRetentionPolicyStatement represents a command to drop a retention policy from a database.
type DropRetentionPolicyStatement struct {
// Name of the policy to drop.
Name string
// Name of the database to drop the policy from.
Database string
}
// String returns a string representation of the drop retention policy statement.
func (s *DropRetentionPolicyStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("DROP RETENTION POLICY ")
_, _ = buf.WriteString(s.Name)
_, _ = buf.WriteString(" ON ")
_, _ = buf.WriteString(s.Database)
return buf.String()
}
// RequiredPrivileges returns the privilege required to execute a DropRetentionPolicyStatement.
func (s *DropRetentionPolicyStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: WritePrivilege}}
}
// SelectStatement represents a command for extracting data from the database.
type SelectStatement struct {
// Expressions returned from the selection.
Fields Fields
// Expressions used for grouping the selection.
Dimensions Dimensions
// Data sources that fields are extracted from.
Sources Sources
// An expression evaluated on data point.
Condition Expr
// Fields to sort results by
SortFields SortFields
// Maximum number of rows to be returned. Unlimited if zero.
Limit int
// Returns rows starting at an offset from the first row.
Offset int
// if it's a query for raw data values (i.e. not an aggregate)
IsRawQuery bool
}
// Clone returns a deep copy of the statement.
func (s *SelectStatement) Clone() *SelectStatement {
clone := &SelectStatement{
Fields: make(Fields, 0, len(s.Fields)),
Dimensions: make(Dimensions, 0, len(s.Dimensions)),
Sources: cloneSources(s.Sources),
SortFields: make(SortFields, 0, len(s.SortFields)),
Condition: CloneExpr(s.Condition),
Limit: s.Limit,
Offset: s.Offset,
IsRawQuery: s.IsRawQuery,
}
for _, f := range s.Fields {
clone.Fields = append(clone.Fields, &Field{Expr: CloneExpr(f.Expr), Alias: f.Alias})
}
for _, d := range s.Dimensions {
clone.Dimensions = append(clone.Dimensions, &Dimension{Expr: CloneExpr(d.Expr)})
}
for _, f := range s.SortFields {
clone.SortFields = append(clone.SortFields, &SortField{Name: f.Name, Ascending: f.Ascending})
}
return clone
}
// String returns a string representation of the select statement.
func (s *SelectStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("SELECT ")
_, _ = buf.WriteString(s.Fields.String())
if len(s.Sources) > 0 {
_, _ = buf.WriteString(" FROM ")
_, _ = buf.WriteString(s.Sources.String())
}
if s.Condition != nil {
_, _ = buf.WriteString(" WHERE ")
_, _ = buf.WriteString(s.Condition.String())
}
if len(s.SortFields) > 0 {
_, _ = buf.WriteString(" ORDER BY ")
_, _ = buf.WriteString(s.SortFields.String())
}
if s.Limit > 0 {
_, _ = fmt.Fprintf(&buf, " LIMIT %d", s.Limit)
}
if s.Offset > 0 {
_, _ = buf.WriteString(" OFFSET ")
_, _ = buf.WriteString(strconv.Itoa(s.Offset))
}
return buf.String()
}
// RequiredPrivileges returns the privilege required to execute the SelectStatement.
func (s *SelectStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}
}
// OnlyTimeDimensions returns true if the statement has a where clause with only time constraints
func (s *SelectStatement) OnlyTimeDimensions() bool {
return s.walkForTime(s.Condition)
}
// walkForTime is called by the OnlyTimeDimensions method to walk the where clause to determine if
// the only things specified are based on time
func (s *SelectStatement) walkForTime(node Node) bool {
switch n := node.(type) {
case *BinaryExpr:
if n.Op == AND || n.Op == OR {
return s.walkForTime(n.LHS) && s.walkForTime(n.RHS)
}
if ref, ok := n.LHS.(*VarRef); ok && strings.ToLower(ref.Val) == "time" {
return true
}
return false
case *ParenExpr:
// walk down the tree
return s.walkForTime(n.Expr)
default:
return false
}
}
// HasWildcard returns whether or not the select statement has at least 1 wildcard
func (s *SelectStatement) HasWildcard() bool {
for _, f := range s.Fields {
_, ok := f.Expr.(*Wildcard)
if ok {
return true
}
}
for _, d := range s.Dimensions {
_, ok := d.Expr.(*Wildcard)
if ok {
return true
}
}
return false
}
// hasTimeDimensions returns whether or not the select statement has at least 1
// where condition with time as the condition
func (s *SelectStatement) hasTimeDimensions(node Node) bool {
switch n := node.(type) {
case *BinaryExpr:
if n.Op == AND || n.Op == OR {
return s.hasTimeDimensions(n.LHS) || s.hasTimeDimensions(n.RHS)
}
if ref, ok := n.LHS.(*VarRef); ok && strings.ToLower(ref.Val) == "time" {
return true
}
return false
case *ParenExpr:
// walk down the tree
return s.hasTimeDimensions(n.Expr)
default:
return false
}
}
// SetTimeRange sets the start and end time of the select statement to [start, end). i.e. start inclusive, end exclusive.
// This is used commonly for continuous queries so the start and end are in buckets.
func (s *SelectStatement) SetTimeRange(start, end time.Time) error {
cond := fmt.Sprintf("time >= '%s' AND time < '%s'", start.UTC().Format(time.RFC3339Nano), end.UTC().Format(time.RFC3339Nano))
if s.Condition != nil {
cond = fmt.Sprintf("%s AND %s", s.rewriteWithoutTimeDimensions(), cond)
}
expr, err := NewParser(strings.NewReader(cond)).ParseExpr()
if err != nil {
return err
}
// fold out any previously replaced time dimensios and set the condition
s.Condition = Reduce(expr, nil)
return nil
}
// rewriteWithoutTimeDimensions will remove any WHERE time... clauses from the select statement
// This is necessary when setting an explicit time range to override any that previously existed.
func (s *SelectStatement) rewriteWithoutTimeDimensions() string {
n := RewriteFunc(s.Condition, func(n Node) Node {
switch n := n.(type) {
case *BinaryExpr:
if n.LHS.String() == "time" {
return &BooleanLiteral{Val: true}
}
return n
case *Call:
return &BooleanLiteral{Val: true}
default:
return n
}
})
return n.String()
}
// NamesInWhere returns the field and tag names (idents) referenced in the where clause
func (s *SelectStatement) NamesInWhere() []string {
var a []string
if s.Condition != nil {
a = walkNames(s.Condition)
}
return a
}
// NamesInSelect returns the field and tag names (idents) in the select clause
func (s *SelectStatement) NamesInSelect() []string {
var a []string
for _, f := range s.Fields {
a = append(a, walkNames(f.Expr)...)
}
return a
}
// FunctionCalls returns the Call objects from the query
func (s *SelectStatement) FunctionCalls() []*Call {
var a []*Call
for _, f := range s.Fields {
a = append(a, walkFunctionCalls(f.Expr)...)
}
return a
}
// DeleteStatement represents a command for removing data from the database.
type DeleteStatement struct {
// Data source that values are removed from.
Source Source
// An expression evaluated on data point.
Condition Expr
}
// String returns a string representation of the delete statement.
func (s *DeleteStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("DELETE ")
_, _ = buf.WriteString(s.Source.String())
if s.Condition != nil {
_, _ = buf.WriteString(" WHERE ")
_, _ = buf.WriteString(s.Condition.String())
}
return s.String()
}
// RequiredPrivileges returns the privilege required to execute a DeleteStatement.
func (s *DeleteStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}}
}
// ShowOrganizationsStatement represents a command for listing organizations.
type ShowOrganizationsStatement struct{}
// String returns a string representation of the list continuous queries statement.
func (s *ShowOrganizationsStatement) String() string { return "SHOW ORGANIZATIONS" }
// RequiredPrivileges returns the privilege required to execute a ShowOrganizationsStatement.
func (s *ShowOrganizationsStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Name: "", Privilege: ReadPrivilege}}
}
// DropContinuousQueryStatement represents a command for removing a organization.
type DropOrganizationStatement struct {
Name string
Database string
}
// String returns a string representation of the statement.
func (s *DropOrganizationStatement) String() string {
return fmt.Sprintf("DROP ORGANIZATION %s", s.Name)
}
// RequiredPrivileges returns the privilege(s) required to execute a DropOrganizationStatement
func (s *DropOrganizationStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Name: "", Privilege: WritePrivilege}}
}
// ShowConversationsStatement represents a command for listing conversations in the organization.
type ShowConversationsStatement struct {
// Namespaces(s) the conversations are listed for.
Sources Sources
// An expression evaluated on a conversation name or tag.
Condition Expr
// Fields to sort results by
SortFields SortFields
// Maximum number of rows to be returned.
// Unlimited if zero.
Limit int
// Returns rows starting at an offset from the first row.
Offset int
}
// String returns a string representation of the list series statement.
func (s *ShowConversationsStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("SHOW CONVERSATIONS")
if s.Sources != nil {
_, _ = buf.WriteString(" FROM ")
_, _ = buf.WriteString(s.Sources.String())
}
if s.Condition != nil {
_, _ = buf.WriteString(" WHERE ")
_, _ = buf.WriteString(s.Condition.String())
}
if len(s.SortFields) > 0 {
_, _ = buf.WriteString(" ORDER BY ")
_, _ = buf.WriteString(s.SortFields.String())
}
if s.Limit > 0 {
_, _ = buf.WriteString(" LIMIT ")
_, _ = buf.WriteString(strconv.Itoa(s.Limit))
}
if s.Offset > 0 {
_, _ = buf.WriteString(" OFFSET ")
_, _ = buf.WriteString(strconv.Itoa(s.Offset))
}
return buf.String()
}
// RequiredPrivileges returns the privilege required to execute a ShowConversationsStatement.
func (s *ShowConversationsStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}
}
// DropConversationStatement represents a command for removing a conversation from the database.
type DropConversationStatement struct {
Name string
}
// String returns a string representation of the drop conversation statement.
func (s *DropConversationStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("DROP CONVERSATION ")
_, _ = buf.WriteString(s.Name)
return buf.String()
}
// RequiredPrivileges returns the privilege required to execute a DropSeriesStatement.
func (s DropConversationStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}}
}
// ShowOrganizationMembersStatement represents a command for listing user privileges.
type ShowOrganizationMembersStatement struct {
// Name of the user to display privileges.
Name string
Database string
// Data source that fields are extracted from (optional)
Sources Sources
// Maximum number of rows to be returned.
// Unlimited if zero.
Limit int
// Returns rows starting at an offset from the first row.
Offset int
}
// String returns a string representation of the show members for organization.
func (s *ShowOrganizationMembersStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("SHOW ORGANIZATION MEMBERS FOR ")
_, _ = buf.WriteString(s.Name)
return buf.String()
}
// RequiredPrivileges returns the privilege required to execute a ShowOrganizationMembersStatement
func (s *ShowOrganizationMembersStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: ReadPrivilege}}
}
// ShowGrantsForUserStatement represents a command for listing user privileges.
type ShowGrantsForUserStatement struct {
// Name of the user to display privileges.
Name string
}
// String returns a string representation of the show grants for user.
func (s *ShowGrantsForUserStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("SHOW GRANTS FOR ")
_, _ = buf.WriteString(s.Name)
return buf.String()
}
// RequiredPrivileges returns the privilege required to execute a ShowGrantsForUserStatement
func (s *ShowGrantsForUserStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
// ShowDevicesForUserStatement represents a command for listing user privileges.
type ShowDevicesForUserStatement struct {
// Name of the user to display privileges.
Name string
}
// String returns a string representation of the show devices for user.
func (s *ShowDevicesForUserStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("SHOW DEVICES FOR ")
_, _ = buf.WriteString(s.Name)
return buf.String()
}
// RequiredPrivileges returns the privilege required to execute a ShowDevicesForUserStatement
func (s *ShowDevicesForUserStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
// ShowServersStatement represents a command for listing all servers.
type ShowServersStatement struct{}
// String returns a string representation of the show servers command.
func (s *ShowServersStatement) String() string { return "SHOW SERVERS" }
// RequiredPrivileges returns the privilege required to execute a ShowServersStatement
func (s *ShowServersStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
// ShowDatabasesStatement represents a command for listing all databases in the cluster.
type ShowDatabasesStatement struct{}
// String returns a string representation of the list databases command.
func (s *ShowDatabasesStatement) String() string { return "SHOW DATABASES" }
// RequiredPrivileges returns the privilege required to execute a ShowDatabasesStatement
func (s *ShowDatabasesStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
// ShowRetentionPoliciesStatement represents a command for listing retention policies.
type ShowRetentionPoliciesStatement struct {
// Name of the database to list policies for.
Database string
}
// String returns a string representation of a ShowRetentionPoliciesStatement.
func (s *ShowRetentionPoliciesStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("SHOW RETENTION POLICIES ")
_, _ = buf.WriteString(s.Database)
return buf.String()
}
// RequiredPrivileges returns the privilege(s) required to execute a ShowRetentionPoliciesStatement
func (s *ShowRetentionPoliciesStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}
}
// ShowRetentionPoliciesStatement represents a command for displaying stats for a given server.
type ShowStatsStatement struct {
// Hostname or IP of the server for stats.
Host string
}
// String returns a string representation of a ShowStatsStatement.
func (s *ShowStatsStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("SHOW STATS ")
if s.Host != "" {
_, _ = buf.WriteString(s.Host)
}
return buf.String()
}
// RequiredPrivileges returns the privilege(s) required to execute a ShowStatsStatement
func (s *ShowStatsStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Name: "", Privilege: AllPrivileges}}
}
// ShowDiagnosticsStatement represents a command for show node diagnostics.
type ShowDiagnosticsStatement struct{}
// String returns a string representation of the ShowDiagnosticsStatement.
func (s *ShowDiagnosticsStatement) String() string { return "SHOW DIAGNOSTICS" }
// RequiredPrivileges returns the privilege required to execute a ShowDiagnosticsStatement
func (s *ShowDiagnosticsStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
// ShowUsersStatement represents a command for listing users.
type ShowUsersStatement struct{}
// String returns a string representation of the ShowUsersStatement.
func (s *ShowUsersStatement) String() string {
return "SHOW USERS"
}
// RequiredPrivileges returns the privilege(s) required to execute a ShowUsersStatement
func (s *ShowUsersStatement) RequiredPrivileges() ExecutionPrivileges {
return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}
}
func cloneSources(sources Sources) Sources {
clone := make(Sources, 0, len(sources))
for _, s := range sources {
clone = append(clone, cloneSource(s))
}
return clone
}
func cloneSource(s Source) Source {
if s == nil {
return nil
}
switch s := s.(type) {
case *Conversation:
m := &Conversation{Database: s.Database, RetentionPolicy: s.RetentionPolicy, Name: s.Name}
if s.Regex != nil {
m.Regex = &RegexLiteral{Val: regexp.MustCompile(s.Regex.Val.String())}
}
return m
default:
panic("unreachable")
}
}
// walkNames will walk the Expr and return the database fields
func walkNames(exp Expr) []string {
switch expr := exp.(type) {
case *VarRef:
return []string{expr.Val}
case *Call:
if len(expr.Args) == 0 {
return nil
}
lit, ok := expr.Args[0].(*VarRef)
if !ok {
return nil
}
return []string{lit.Val}
case *BinaryExpr:
var ret []string
ret = append(ret, walkNames(expr.LHS)...)
ret = append(ret, walkNames(expr.RHS)...)
return ret
case *ParenExpr:
return walkNames(expr.Expr)
}
return nil
}
// walkFunctionCalls walks the Field of a query for any function calls made
func walkFunctionCalls(exp Expr) []*Call {
switch expr := exp.(type) {
case *VarRef:
return nil
case *Call:
return []*Call{expr}
case *BinaryExpr:
var ret []*Call
ret = append(ret, walkFunctionCalls(expr.LHS)...)
ret = append(ret, walkFunctionCalls(expr.RHS)...)
return ret
case *ParenExpr:
return walkFunctionCalls(expr.Expr)
}
return nil
}
|
package graphql_test
import (
"reflect"
"testing"
"github.com/graphql-go/graphql"
"github.com/graphql-go/graphql/gqlerrors"
"github.com/graphql-go/graphql/language/location"
"github.com/graphql-go/graphql/testutil"
)
func checkList(t *testing.T, testType graphql.Type, testData interface{}, expected *graphql.Result) {
// TODO: uncomment t.Helper when support for go1.8 is dropped.
//t.Helper()
data := map[string]interface{}{
"test": testData,
}
dataType := graphql.NewObject(graphql.ObjectConfig{
Name: "DataType",
Fields: graphql.Fields{
"test": &graphql.Field{
Type: testType,
},
},
})
dataType.AddFieldConfig("nest", &graphql.Field{
Type: dataType,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return data, nil
},
})
schema, err := graphql.NewSchema(graphql.SchemaConfig{
Query: dataType,
})
if err != nil {
t.Fatalf("Error in schema %v", err.Error())
}
// parse query
ast := testutil.TestParse(t, `{ nest { test } }`)
// execute
ep := graphql.ExecuteParams{
Schema: schema,
AST: ast,
Root: data,
}
result := testutil.TestExecute(t, ep)
if !testutil.EqualResults(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
}
// Describe [T] Array<T>
func TestLists_ListOfNullableObjects_ContainsValues(t *testing.T) {
ttype := graphql.NewList(graphql.Int)
data := []interface{}{
1, 2,
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_ListOfNullableObjects_ContainsNull(t *testing.T) {
ttype := graphql.NewList(graphql.Int)
data := []interface{}{
1, nil, 2,
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, nil, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_ListOfNullableObjects_ReturnsNull(t *testing.T) {
ttype := graphql.NewList(graphql.Int)
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": nil,
},
},
}
checkList(t, ttype, nil, expected)
}
// Describe [T] Func()Array<T> // equivalent to Promise<Array<T>>
func TestLists_ListOfNullableFunc_ContainsValues(t *testing.T) {
ttype := graphql.NewList(graphql.Int)
// `data` is a function that return values
// Note that its uses the expected signature `func() interface{} {...}`
data := func() interface{} {
return []interface{}{
1, 2,
}
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_ListOfNullableFunc_ContainsNull(t *testing.T) {
ttype := graphql.NewList(graphql.Int)
// `data` is a function that return values
// Note that its uses the expected signature `func() interface{} {...}`
data := func() interface{} {
return []interface{}{
1, nil, 2,
}
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, nil, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_ListOfNullableFunc_ReturnsNull(t *testing.T) {
ttype := graphql.NewList(graphql.Int)
// `data` is a function that return values
// Note that its uses the expected signature `func() interface{} {...}`
data := func() interface{} {
return nil
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": nil,
},
},
}
checkList(t, ttype, data, expected)
}
// Describe [T] Array<Func()<T>> // equivalent to Array<Promise<T>>
func TestLists_ListOfNullableArrayOfFuncContainsValues(t *testing.T) {
ttype := graphql.NewList(graphql.Int)
// `data` is a slice of functions that return values
// Note that its uses the expected signature `func() (interface{}, error) {...}`
data := []interface{}{
func() (interface{}, error) {
return 1, nil
},
func() (interface{}, error) {
return 2, nil
},
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_ListOfNullableArrayOfFuncContainsNulls(t *testing.T) {
ttype := graphql.NewList(graphql.Int)
// `data` is a slice of functions that return values
// Note that its uses the expected signature `func() (interface{}, error) {...}`
data := []interface{}{
func() (interface{}, error) {
return 1, nil
},
func() (interface{}, error) {
return nil, nil
},
func() (interface{}, error) {
return 2, nil
},
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, nil, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
// Describe [T]! Array<T>
func TestLists_NonNullListOfNullableObjectsContainsValues(t *testing.T) {
ttype := graphql.NewNonNull(graphql.NewList(graphql.Int))
data := []interface{}{
1, 2,
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_NonNullListOfNullableObjectsContainsNull(t *testing.T) {
ttype := graphql.NewNonNull(graphql.NewList(graphql.Int))
data := []interface{}{
1, nil, 2,
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, nil, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_NonNullListOfNullableObjectsReturnsNull(t *testing.T) {
ttype := graphql.NewNonNull(graphql.NewList(graphql.Int))
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": nil,
},
Errors: []gqlerrors.FormattedError{
{
Message: "Cannot return null for non-nullable field DataType.test.",
Locations: []location.SourceLocation{
{
Line: 1,
Column: 10,
},
},
Path: []interface{}{
"nest",
"test",
},
},
},
}
checkList(t, ttype, nil, expected)
}
// Describe [T]! Func()Array<T> // equivalent to Promise<Array<T>>
func TestLists_NonNullListOfNullableFunc_ContainsValues(t *testing.T) {
ttype := graphql.NewNonNull(graphql.NewList(graphql.Int))
// `data` is a function that return values
// Note that its uses the expected signature `func() interface{} {...}`
data := func() interface{} {
return []interface{}{
1, 2,
}
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_NonNullListOfNullableFunc_ContainsNull(t *testing.T) {
ttype := graphql.NewNonNull(graphql.NewList(graphql.Int))
// `data` is a function that return values
// Note that its uses the expected signature `func() interface{} {...}`
data := func() interface{} {
return []interface{}{
1, nil, 2,
}
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, nil, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_NonNullListOfNullableFunc_ReturnsNull(t *testing.T) {
ttype := graphql.NewNonNull(graphql.NewList(graphql.Int))
// `data` is a function that return values
// Note that its uses the expected signature `func() interface{} {...}`
data := func() interface{} {
return nil
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": nil,
},
Errors: []gqlerrors.FormattedError{
{
Message: "Cannot return null for non-nullable field DataType.test.",
Locations: []location.SourceLocation{
{
Line: 1,
Column: 10,
},
},
Path: []interface{}{
"nest",
"test",
},
},
},
}
checkList(t, ttype, data, expected)
}
// Describe [T]! Array<Func()<T>> // equivalent to Array<Promise<T>>
func TestLists_NonNullListOfNullableArrayOfFunc_ContainsValues(t *testing.T) {
ttype := graphql.NewNonNull(graphql.NewList(graphql.Int))
// `data` is a slice of functions that return values
// Note that its uses the expected signature `func() (interface{}, error) {...}`
data := []interface{}{
func() (interface{}, error) {
return 1, nil
},
func() (interface{}, error) {
return 2, nil
},
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_NonNullListOfNullableArrayOfFunc_ContainsNulls(t *testing.T) {
ttype := graphql.NewNonNull(graphql.NewList(graphql.Int))
// `data` is a slice of functions that return values
// Note that its uses the expected signature `func() (interface{}, error) {...}`
data := []interface{}{
func() (interface{}, error) {
return 1, nil
},
func() (interface{}, error) {
return nil, nil
},
func() (interface{}, error) {
return 2, nil
},
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, nil, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
// Describe [T!] Array<T>
func TestLists_NullableListOfNonNullObjects_ContainsValues(t *testing.T) {
ttype := graphql.NewList(graphql.NewNonNull(graphql.Int))
data := []interface{}{
1, 2,
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_NullableListOfNonNullObjects_ContainsNull(t *testing.T) {
ttype := graphql.NewList(graphql.NewNonNull(graphql.Int))
data := []interface{}{
1, nil, 2,
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": nil,
},
},
Errors: []gqlerrors.FormattedError{
{
Message: "Cannot return null for non-nullable field DataType.test.",
Locations: []location.SourceLocation{
{
Line: 1,
Column: 10,
},
},
Path: []interface{}{
"nest",
"test",
1,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_NullableListOfNonNullObjects_ReturnsNull(t *testing.T) {
ttype := graphql.NewList(graphql.NewNonNull(graphql.Int))
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": nil,
},
},
}
checkList(t, ttype, nil, expected)
}
// Describe [T!] Func()Array<T> // equivalent to Promise<Array<T>>
func TestLists_NullableListOfNonNullFunc_ContainsValues(t *testing.T) {
ttype := graphql.NewList(graphql.NewNonNull(graphql.Int))
// `data` is a function that return values
// Note that its uses the expected signature `func() interface{} {...}`
data := func() interface{} {
return []interface{}{
1, 2,
}
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_NullableListOfNonNullFunc_ContainsNull(t *testing.T) {
ttype := graphql.NewList(graphql.NewNonNull(graphql.Int))
// `data` is a function that return values
// Note that its uses the expected signature `func() interface{} {...}`
data := func() interface{} {
return []interface{}{
1, nil, 2,
}
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": nil,
},
},
Errors: []gqlerrors.FormattedError{
{
Message: "Cannot return null for non-nullable field DataType.test.",
Locations: []location.SourceLocation{
{
Line: 1,
Column: 10,
},
},
Path: []interface{}{
"nest",
"test",
1,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_NullableListOfNonNullFunc_ReturnsNull(t *testing.T) {
ttype := graphql.NewList(graphql.NewNonNull(graphql.Int))
// `data` is a function that return values
// Note that its uses the expected signature `func() interface{} {...}`
data := func() interface{} {
return nil
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": nil,
},
},
}
checkList(t, ttype, data, expected)
}
// Describe [T!] Array<Func()<T>> // equivalent to Array<Promise<T>>
func TestLists_NullableListOfNonNullArrayOfFunc_ContainsValues(t *testing.T) {
ttype := graphql.NewList(graphql.NewNonNull(graphql.Int))
// `data` is a slice of functions that return values
// Note that its uses the expected signature `func() interface{} {...}`
data := []interface{}{
func() (interface{}, error) {
return 1, nil
},
func() (interface{}, error) {
return 2, nil
},
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_NullableListOfNonNullArrayOfFunc_ContainsNulls(t *testing.T) {
ttype := graphql.NewList(graphql.NewNonNull(graphql.Int))
// `data` is a slice of functions that return values
// Note that its uses the expected signature `func() (interface{}, error){...}`
data := []interface{}{
func() (interface{}, error) {
return 1, nil
},
func() (interface{}, error) {
return nil, nil
},
func() (interface{}, error) {
return 2, nil
},
}
expected := &graphql.Result{
/*
// TODO: Because thunks are called after the result map has been assembled,
// we are not able to traverse up the tree until we find a nullable type,
// so in this case the entire data is nil. Will need some significant code
// restructure to restore this.
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": nil,
},
},
*/
Data: nil,
Errors: []gqlerrors.FormattedError{
{
Message: "Cannot return null for non-nullable field DataType.test.",
Locations: []location.SourceLocation{
{
Line: 1,
Column: 10,
},
},
Path: []interface{}{
"nest",
"test",
1,
},
},
},
}
checkList(t, ttype, data, expected)
}
// Describe [T!]! Array<T>
func TestLists_NonNullListOfNonNullObjects_ContainsValues(t *testing.T) {
ttype := graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(graphql.Int)))
data := []interface{}{
1, 2,
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_NonNullListOfNonNullObjects_ContainsNull(t *testing.T) {
ttype := graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(graphql.Int)))
data := []interface{}{
1, nil, 2,
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": nil,
},
Errors: []gqlerrors.FormattedError{
{
Message: "Cannot return null for non-nullable field DataType.test.",
Locations: []location.SourceLocation{
{
Line: 1,
Column: 10,
},
},
Path: []interface{}{
"nest",
"test",
1,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_NonNullListOfNonNullObjects_ReturnsNull(t *testing.T) {
ttype := graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(graphql.Int)))
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": nil,
},
Errors: []gqlerrors.FormattedError{
{
Message: "Cannot return null for non-nullable field DataType.test.",
Locations: []location.SourceLocation{
{
Line: 1,
Column: 10,
},
},
Path: []interface{}{
"nest",
"test",
},
},
},
}
checkList(t, ttype, nil, expected)
}
// Describe [T!]! Func()Array<T> // equivalent to Promise<Array<T>>
func TestLists_NonNullListOfNonNullFunc_ContainsValues(t *testing.T) {
ttype := graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(graphql.Int)))
// `data` is a function that return values
// Note that its uses the expected signature `func() interface{} {...}`
data := func() interface{} {
return []interface{}{
1, 2,
}
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_NonNullListOfNonNullFunc_ContainsNull(t *testing.T) {
ttype := graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(graphql.Int)))
// `data` is a function that return values
// Note that its uses the expected signature `func() interface{} {...}`
data := func() interface{} {
return []interface{}{
1, nil, 2,
}
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": nil,
},
Errors: []gqlerrors.FormattedError{
{
Message: "Cannot return null for non-nullable field DataType.test.",
Locations: []location.SourceLocation{
{
Line: 1,
Column: 10,
},
},
Path: []interface{}{
"nest",
"test",
1,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_NonNullListOfNonNullFunc_ReturnsNull(t *testing.T) {
ttype := graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(graphql.Int)))
// `data` is a function that return values
// Note that its uses the expected signature `func() interface{} {...}`
data := func() interface{} {
return nil
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": nil,
},
Errors: []gqlerrors.FormattedError{
{
Message: "Cannot return null for non-nullable field DataType.test.",
Locations: []location.SourceLocation{
{
Line: 1,
Column: 10,
},
},
Path: []interface{}{
"nest",
"test",
},
},
},
}
checkList(t, ttype, data, expected)
}
// Describe [T!]! Array<Func()<T>> // equivalent to Array<Promise<T>>
func TestLists_NonNullListOfNonNullArrayOfFunc_ContainsValues(t *testing.T) {
ttype := graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(graphql.Int)))
// `data` is a slice of functions that return values
// Note that its uses the expected signature `func() (interface{}, error) {...}`
data := []interface{}{
func() (interface{}, error) {
return 1, nil
},
func() (interface{}, error) {
return 2, nil
},
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_NonNullListOfNonNullArrayOfFunc_ContainsNulls(t *testing.T) {
ttype := graphql.NewNonNull(graphql.NewList(graphql.NewNonNull(graphql.Int)))
// `data` is a slice of functions that return values
// Note that its uses the expected signature `func() (interface{}, error) {...}`
data := []interface{}{
func() (interface{}, error) {
return 1, nil
},
func() (interface{}, error) {
return nil, nil
},
func() (interface{}, error) {
return 2, nil
},
}
expected := &graphql.Result{
/*
// TODO: Because thunks are called after the result map has been assembled,
// we are not able to traverse up the tree until we find a nullable type,
// so in this case the entire data is nil. Will need some significant code
// restructure to restore this.
Data: map[string]interface{}{
"nest": nil,
},
*/
Data: nil,
Errors: []gqlerrors.FormattedError{
{
Message: "Cannot return null for non-nullable field DataType.test.",
Locations: []location.SourceLocation{
{
Line: 1,
Column: 10,
},
},
Path: []interface{}{
"nest",
"test",
1,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_UserErrorExpectIterableButDidNotGetOne(t *testing.T) {
ttype := graphql.NewList(graphql.Int)
data := "Not an iterable"
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": nil,
},
},
Errors: []gqlerrors.FormattedError{
{
Message: "User Error: expected iterable, but did not find one for field DataType.test.",
Locations: []location.SourceLocation{
{
Line: 1,
Column: 10,
},
},
Path: []interface{}{
"nest",
"test",
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_ArrayOfNullableObjects_ContainsValues(t *testing.T) {
ttype := graphql.NewList(graphql.Int)
data := [2]interface{}{
1, 2,
}
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": []interface{}{
1, 2,
},
},
},
}
checkList(t, ttype, data, expected)
}
func TestLists_ValueMayBeNilPointer(t *testing.T) {
var listTestSchema, _ = graphql.NewSchema(graphql.SchemaConfig{
Query: graphql.NewObject(graphql.ObjectConfig{
Name: "Query",
Fields: graphql.Fields{
"list": &graphql.Field{
Type: graphql.NewList(graphql.Int),
Resolve: func(_ graphql.ResolveParams) (interface{}, error) {
return []int(nil), nil
},
},
},
}),
})
query := "{ list }"
expected := &graphql.Result{
Data: map[string]interface{}{
"list": []interface{}{},
},
}
result := g(t, graphql.Params{
Schema: listTestSchema,
RequestString: query,
})
if !reflect.DeepEqual(expected, result) {
t.Fatalf("Unexpected result, Diff: %v", testutil.Diff(expected, result))
}
}
func TestLists_NullableListOfInt_ReturnsNull(t *testing.T) {
ttype := graphql.NewList(graphql.Int)
type dataType *[]int
var data dataType
expected := &graphql.Result{
Data: map[string]interface{}{
"nest": map[string]interface{}{
"test": nil,
},
},
}
checkList(t, ttype, data, expected)
}
|
package controllers
import (
"businessense/models"
u "businessense/utils"
"encoding/json"
"fmt"
"net/http"
"strconv"
"github.com/gorilla/mux"
)
//CreateProject HandlerFunc
var CreateProject = func(w http.ResponseWriter, r *http.Request) {
project := &models.Project{}
err := json.NewDecoder(r.Body).Decode(project) //decode the request body into struct and failed if any error occur
if err != nil {
u.Respond(w, u.Message(false, "Invalid request"))
return
}
resp := project.Create() //Create Project
u.Respond(w, resp)
}
//GetProjectByCompany HandlerFunc
var GetProjectByCompany = func(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
id, _ := strconv.Atoi(params["id"])
fmt.Println("id: ", id)
data := models.GetProjectByCompany(id)
resp := u.Message(true, "success")
resp["data"] = data
u.Respond(w, resp)
}
//GetProject HandlerFunc
var GetProject = func(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
id, _ := strconv.Atoi(params["id"])
fmt.Println("id: ", id)
data := models.GetProject(id)
resp := u.Message(true, "success")
resp["data"] = data
u.Respond(w, resp)
}
|
package mesh
import (
"fmt"
"net"
"strconv"
"github.com/asaskevich/govalidator"
mesh_proto "github.com/kumahq/kuma/api/mesh/v1alpha1"
"github.com/kumahq/kuma/pkg/core/validators"
)
func (es *ExternalServiceResource) Validate() error {
var err validators.ValidationError
err.Add(validateExternalServiceNetworking(es.Spec.GetNetworking()))
err.Add(validateTags(es.Spec.Tags))
if _, exist := es.Spec.Tags[mesh_proto.ServiceTag]; !exist {
err.AddViolationAt(validators.RootedAt("tags").Key(mesh_proto.ServiceTag), `tag has to exist`)
}
if value, exist := es.Spec.Tags[mesh_proto.ProtocolTag]; exist {
if ParseProtocol(value) == ProtocolUnknown {
err.AddViolationAt(validators.RootedAt("tags").Key(mesh_proto.ProtocolTag), fmt.Sprintf("tag %q has an invalid value %q. %s", mesh_proto.ProtocolTag, value, AllowedValuesHint(SupportedProtocols.Strings()...)))
}
}
return err.OrNil()
}
func validateExternalServiceNetworking(networking *mesh_proto.ExternalService_Networking) validators.ValidationError {
var err validators.ValidationError
path := validators.RootedAt("networking")
if networking == nil {
err.AddViolation("networking", "should have networking")
} else {
err.Add(validateExternalServiceAddress(path, networking.Address))
}
if networking.GetTls().GetServerName() != nil && networking.GetTls().GetServerName().GetValue() == "" {
err.AddViolationAt(path.Field("tls").Field("serverName"), "cannot be empty")
}
return err
}
func validateExternalServiceAddress(path validators.PathBuilder, address string) validators.ValidationError {
var err validators.ValidationError
if address == "" {
err.AddViolationAt(path.Field("address"), "address can't be empty")
return err
}
host, port, e := net.SplitHostPort(address)
if e != nil {
err.AddViolationAt(path.Field("address"), "unable to parse address")
}
if !govalidator.IsIP(host) && !govalidator.IsDNSName(host) {
err.AddViolationAt(path.Field("address"), "address has to be a valid IP address or a domain name")
}
iport, e := strconv.ParseUint(port, 10, 32)
if e != nil {
err.AddViolationAt(path.Field("address"), "unable to parse port in address")
}
err.Add(ValidatePort(path.Field("address"), uint32(iport)))
return err
}
|
package practice
import (
"github.com/sko00o/leetcode-adventure/queue-stack/queue"
)
// Queue defines a queue for interface{} type.
type Queue struct {
queue.SliceQueue
}
func openLock(deadends []string, target string) int {
if len(target) != 4 {
return -1
}
var queue Queue
var step int
visited := make(map[string]struct{})
for _, d := range deadends {
if len(d) == 4 {
visited[d] = struct{}{}
}
}
// init
start := "0000"
if _, ok := visited[start]; ok {
return -1
}
visited[start] = struct{}{}
queue.EnQueue(start)
d := []int{1, 0, 0, 0, -1, 0, 0, 0, 1, 0, 0}
for !queue.IsEmpty() {
for k := len(queue.Data); k > 0; k-- {
curr := queue.Front().(string)
if curr == target {
return step
}
for i := 0; i < 8; i++ {
next := nextStats(curr, d[i:i+4])
if _, ok := visited[next]; ok {
continue
}
visited[next] = struct{}{}
queue.EnQueue(next)
}
queue.DeQueue()
}
step++
}
return -1
}
func nextStats(s string, d []int) string {
curr := []byte(s)
if len(curr) != 4 || len(d) != 4 {
return ""
}
for i := 0; i < 4; i++ {
rd := int(curr[i] - '0')
rd = (rd + (d[i] % 10) + 10) % 10
curr[i] = '0' + byte(rd)
}
return string(curr)
}
func openLock1(deadends []string, target string) int {
start := "0000"
visited := make(map[string]bool)
for _, d := range deadends {
visited[d] = true
}
if visited[start] {
return -1
}
visited[start] = true
d := []int{1, 0, 0, 0, -1, 0, 0, 0, 1, 0, 0}
for q, cnt := []string{start}, 0; len(q) != 0; cnt++ {
nq := []string{}
for _, curr := range q {
if curr == target {
return cnt
}
for i := 0; i < 8; i++ {
next := nextStats(curr, d[i:i+4])
if visited[next] {
continue
}
visited[next] = true
nq = append(nq, next)
}
}
q = nq
}
return -1
}
|
package flow
// Message represents a single FBP protocol message
type Message struct {
// Protocol is NoFlo protocol identifier:
// "runtime", "component", "graph" or "network"
Protocol string
// Command is a command to be executed within the protocol
Command string
// Payload is JSON-encoded body of the message
Payload interface{}
}
// runtimeInfo message contains response to runtime.getruntime request
type runtimeInfo struct {
Type string `json:"type"`
Version string `json:"version"`
Capabilities []string `json:"capabilities"`
Id string `json:"id"`
}
// clearGraph message is sent by client to create a new empty graph
type clearGraph struct {
Id string
Name string `json:",omitempty"` // ignored
Library string `json:",omitempty"` // ignored
Main bool `json:",omitempty"`
Icon string `json:",omitempty"` // ignored
Description string `json:",omitempty"` // ignored
}
// addNode message is sent by client to add a node to a graph
type addNode struct {
Id string
Component string
Graph string
Metadata map[string]interface{} `json:",omitempty"` // ignored
}
// removeNode is a client message to remove a node from a graph
type removeNode struct {
Id string
Graph string
}
// renameNode is a client message to rename a node in a graph
type renameNode struct {
From string
To string
Graph string
}
// changeNode is a client message to change the metadata
// associated to a node in the graph
type changeNode struct { // ignored
Id string
Graph string
Metadata map[string]interface{}
}
// addEdge is a client message to create a connection in a graph
type addEdge struct {
Src struct {
Node string
Port string
Index int `json:",omitempty"` // ignored
}
Tgt struct {
Node string
Port string
Index int `json:",omitempty"` // ignored
}
Graph string
Metadata map[string]interface{} `json:",omitempty"` // ignored
}
// removeEdge is a client message to delete a connection from a graph
type removeEdge struct {
Src struct {
Node string
Port string
}
Tgt struct {
Node string
Port string
}
Graph string
}
// changeEdge is a client message to change connection metadata
type changeEdge struct { // ignored
Src struct {
Node string
Port string
Index int `json:",omitempty"`
}
Tgt struct {
Node string
Port string
Index int `json:",omitempty"`
}
Graph string
Metadata map[string]interface{}
}
// addInitial is a client message to add an IIP to a graph
type addInitial struct {
Src struct {
Data interface{}
}
Tgt struct {
Node string
Port string
Index int `json:",omitempty"` // ignored
}
Graph string
Metadata map[string]interface{} `json:",omitempty"` // ignored
}
// removeInitial is a client message to remove an IIP from a graph
type removeInitial struct {
Tgt struct {
Node string
Port string
Index int `json:",omitempty"` // ignored
}
Graph string
}
// addPort is a client message to add an exported inport/outport to the graph
type addPort struct {
Public string
Node string
Port string
Graph string
Metadata map[string]interface{} `json:",omitempty"` // ignored
}
// removePort is a client message to remove an exported inport/outport from the graph
type removePort struct {
Public string
Graph string
}
// renamePort is a client message to rename a port of a graph
type renamePort struct {
From string
To string
Graph string
}
// portInfo represents a port to a runtime client
type portInfo struct {
Id string `json:"id"`
Type string `json:"type"`
Description string `json:"description"`
Addressable bool `json:"addressable"` // ignored
Required bool `json:"required"`
Values []interface{} `json:"values"` // ignored
Default interface{} `json:"default"` // ignored
}
// componentInfo represents a component to a protocol client
type componentInfo struct {
Name string `json:"name"`
Description string `json:"description"`
Icon string `json:"icon"`
Subgraph bool `json:"subgraph"`
InPorts []portInfo `json:"inPorts"`
OutPorts []portInfo `json:"outPorts"`
}
|
package TryMe
import (
"testing"
)
func BenchmarkFibGenerator(b *testing.B) {
//b.ResetTimer()
for i := 0; i < b.N; i++ {
<-FibGenerator(10)
/*
go func(i int) {
log.Printf("result %v for %v \n", <-FibGenerator(i), i)
}(i)
*/
}
}
|
package main
import (
"log"
"time"
)
func main() {
ticker := time.NewTicker(500 * time.Millisecond)
done := make(chan int)
go runTickerUntilDone(ticker, done)
log.Printf("go routine created")
time.Sleep(2 * time.Second)
log.Printf("change to 100 ms")
ticker.Reset(100*time.Millisecond)
time.Sleep(2 * time.Second)
done <- 1
}
func runTickerUntilDone(ticker *time.Ticker, done chan int) {
for {
select {
case <-done:
log.Printf("done")
return
case t := <-ticker.C:
log.Printf("tick at t: %v, in seconds: %v", t, time.Now().Unix())
}
}
}
|
package main
import "fmt"
func main(){
numbers := []int{1,2,3,4,5}
sum := 0
for _,number := range numbers{
sum += number
}
fmt.Println("sum", sum)
for index := range numbers{
fmt.Println("index", index)
}
maps := map[int]string{4:"sushil",1:"sanjay",2:"bharati",3:"suuhas",5:"arati"}
for i,val := range maps{
fmt.Println("\n\n",i, val)
fmt.Printf("%d -> %s", i, val) //using printf function here
}
fmt.Println("\n\n")
name := "sushil"
for i, val := range name{
fmt.Println(i,val) //prints unicode code points
}
} |
package model
import (
"github.com/jinlicode/jinli-panel/global"
"github.com/jinlicode/jinli-panel/model/request"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
)
var db *gorm.DB
func InitDbConnt() {
//open a db connection
var err error
db, err = gorm.Open(sqlite.Open(global.BASEPATH+"config.db"), &gorm.Config{})
if err != nil {
panic("failed to connect database")
}
db.AutoMigrate(
&request.Database{},
&request.Task{},
&request.Site{},
&request.Domain{},
&request.User{},
&Config{},
)
}
|
package controllers
import (
"coludRenderDiscovery/discovery"
"coludRenderDiscovery/models"
"encoding/base64"
"errors"
"fmt"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
"github.com/golibs/uuid"
"github.com/gorilla/websocket"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
"strings"
"time"
)
type UploadController struct {
beego.Controller
responseMsg ResponseMsg
}
// 上传文件返回结构
type ResponseMsg struct {
Code int16 // 状态码
Err string // 错误描述
Time time.Time // 处理时间
Data responseData // 返回数据
}
type responseData struct {
Name string
TaskId string
SavePath string
GroupNum int16
Cores int16
}
func (c *UploadController) wsConn() (*websocket.Conn, error) {
u := url.URL{
Scheme: "ws",
Host: "127.0.0.1:" + beego.AppConfig.String("default::httpport"),
Path: "/ws",
}
ws, _, err := websocket.DefaultDialer.Dial(u.String(), nil)
if err != nil {
return nil, err
}
return ws, nil
}
func (c *UploadController) base64(s string) string {
b4 := base64.NewEncoding(discovery.BASE64_TABLE)
return b4.EncodeToString([]byte(s))
}
func (c *UploadController) Post() {
c.responseMsg.Data = responseData{}
c.responseMsg.Code = 500
c.responseMsg.Time = time.Now()
c.responseMsg.Data.TaskId = uuid.Rand().Hex()
c.responseMsg.Data.GroupNum, _ = c.GetInt16("groupNum")
c.responseMsg.Data.Cores, _ = c.GetInt16("cores")
c.responseMsg.Data.Name = c.GetString("name")
if c.responseMsg.Data.GroupNum < 1 {
c.responseMsg.Data.GroupNum = 1
}
defer func() {
c.Data["json"] = c.responseMsg
c.ServeJSON()
}()
savePath, err := c.upfile()
if err != nil {
c.responseMsg.Err = err.Error()
return
}
c.responseMsg.Data.SavePath = savePath
if err := c.idb(); err != nil {
c.responseMsg.Err = err.Error()
return
}
err = c.notice()
if err != nil {
c.responseMsg.Err = err.Error()
return
}
c.responseMsg.Code = 200
}
// 通知WEB端数据有变化
func (c *UploadController) notice() error {
render := discovery.Render{}
render.BroadcastRenderOnline()
return nil
}
/**
处理上传附件
*/
func (c *UploadController) upfile() (string, error) {
saveDir, err := c.mkDir()
if err != nil {
return "", err
}
f, h, err := c.GetFile("file")
if err != nil {
return "", err
}
f.Close()
savePath := path.Join(saveDir, fmt.Sprintf("%s%s", c.responseMsg.Data.TaskId, path.Ext(h.Filename)))
if err := c.SaveToFile("file", savePath); err != nil {
return "", err
}
return savePath, nil
}
// 创建文件存储文件夹
func (c *UploadController) mkDir() (string, error) {
file, _ := exec.LookPath(os.Args[0])
runPath, _ := filepath.Abs(file)
runPath = filepath.Dir(runPath)
runPath = strings.Replace(runPath, `\`, "/", -1)
now := time.Now()
saveDir := path.Join(runPath,
beego.AppConfig.String("upfile::SaveDir"),
now.Format("2006"),
now.Format("01"),
now.Format("02"),
)
if err := os.MkdirAll(saveDir, 0755); err != nil {
return "", err
}
return saveDir, nil
}
// 写入数据库
func (c *UploadController) idb() error {
var id int64
var err error
var order = c.responseMsg.Data.TaskId
var isGroupItem = false
o := orm.NewOrm()
if c.responseMsg.Data.GroupNum > 1 {
if id, err = o.Insert(&models.RenderTask{
Order: order,
GroupNum: c.responseMsg.Data.GroupNum,
Name: c.responseMsg.Data.Name,
}); err != nil {
return err
}
isGroupItem = true
order = strconv.Itoa(int(id))
}
i := int16(1)
renderTasks := make([]models.RenderTask, 0)
for ; i <= c.responseMsg.Data.GroupNum; i++ {
renderTasks = append(renderTasks, models.RenderTask{
Order: order,
FilePath: c.responseMsg.Data.SavePath,
Xml: c.base64(c.GetString(fmt.Sprintf("xml%d", i))),
Ip: c.Ctx.Input.IP(),
WorkStatus: discovery.RENDER_WAIT,
GroupNum: 1,
IsGroupItem: isGroupItem,
Cores: c.responseMsg.Data.Cores,
})
}
renderTasksLen := len(renderTasks)
if renderTasksLen > 0 {
if successNum, err := o.InsertMulti(renderTasksLen, renderTasks); err != nil {
return err
} else if int(successNum) != renderTasksLen {
return errors.New(discovery.ERR_SUCCESS_NUM)
}
}
return nil
}
|
// Copyright 2019-present PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mvcc
import (
"encoding/binary"
"encoding/hex"
"fmt"
"unsafe"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/util/codec"
)
var defaultEndian = binary.LittleEndian
// DBUserMeta is the user meta used in DB.
type DBUserMeta []byte
// DecodeLock decodes data to lock, the primary and value is copied, the secondaries are copied if async commit is enabled.
func DecodeLock(data []byte) (l Lock) {
l.LockHdr = *(*LockHdr)(unsafe.Pointer(&data[0]))
cursor := mvccLockHdrSize
lockBuf := append([]byte{}, data[cursor:]...)
l.Primary = lockBuf[:l.PrimaryLen]
cursor = int(l.PrimaryLen)
if l.LockHdr.SecondaryNum > 0 {
l.Secondaries = make([][]byte, l.LockHdr.SecondaryNum)
for i := uint32(0); i < l.LockHdr.SecondaryNum; i++ {
keyLen := binary.LittleEndian.Uint16(lockBuf[cursor:])
cursor += 2
l.Secondaries[i] = lockBuf[cursor : cursor+int(keyLen)]
cursor += int(keyLen)
}
}
l.Value = lockBuf[cursor:]
return
}
// LockHdr holds fixed size fields for mvcc Lock.
type LockHdr struct {
StartTS uint64
ForUpdateTS uint64
MinCommitTS uint64
TTL uint32
Op uint8
HasOldVer bool
PrimaryLen uint16
UseAsyncCommit bool
SecondaryNum uint32
}
const mvccLockHdrSize = int(unsafe.Sizeof(LockHdr{}))
// Lock is the structure for MVCC lock.
type Lock struct {
LockHdr
Primary []byte
Value []byte
Secondaries [][]byte
}
// MarshalBinary implements encoding.BinaryMarshaler interface.
func (l *Lock) MarshalBinary() []byte {
lockLen := mvccLockHdrSize + len(l.Primary) + len(l.Value)
length := lockLen
if l.LockHdr.SecondaryNum > 0 {
for _, secondaryKey := range l.Secondaries {
length += 2
length += len(secondaryKey)
}
}
buf := make([]byte, length)
hdr := (*LockHdr)(unsafe.Pointer(&buf[0]))
*hdr = l.LockHdr
cursor := mvccLockHdrSize
copy(buf[cursor:], l.Primary)
cursor += len(l.Primary)
if l.LockHdr.SecondaryNum > 0 {
for _, secondaryKey := range l.Secondaries {
binary.LittleEndian.PutUint16(buf[cursor:], uint16(len(secondaryKey)))
cursor += 2
copy(buf[cursor:], secondaryKey)
cursor += len(secondaryKey)
}
}
copy(buf[cursor:], l.Value)
return buf
}
// ToLockInfo converts an mvcc Lock to kvrpcpb.LockInfo
func (l *Lock) ToLockInfo(key []byte) *kvrpcpb.LockInfo {
return &kvrpcpb.LockInfo{
PrimaryLock: l.Primary,
LockVersion: l.StartTS,
Key: key,
LockTtl: uint64(l.TTL),
LockType: kvrpcpb.Op(l.Op),
LockForUpdateTs: l.ForUpdateTS,
UseAsyncCommit: l.UseAsyncCommit,
MinCommitTs: l.MinCommitTS,
Secondaries: l.Secondaries,
}
}
// String implements fmt.Stringer for Lock.
func (l *Lock) String() string {
return fmt.Sprintf(
"Lock { Type: %v, StartTS: %v, ForUpdateTS: %v, Primary: %v, UseAsyncCommit: %v }",
kvrpcpb.Op(l.Op).String(),
l.StartTS,
l.ForUpdateTS,
hex.EncodeToString(l.Primary),
l.UseAsyncCommit,
)
}
// UserMeta value for lock.
const (
LockUserMetaNoneByte = 0
LockUserMetaDeleteByte = 2
)
// UserMeta byte slices for lock.
var (
LockUserMetaNone = []byte{LockUserMetaNoneByte}
LockUserMetaDelete = []byte{LockUserMetaDeleteByte}
)
// DecodeKeyTS decodes the TS in a key.
func DecodeKeyTS(buf []byte) uint64 {
tsBin := buf[len(buf)-8:]
_, ts, err := codec.DecodeUintDesc(tsBin)
if err != nil {
panic(err)
}
return ts
}
// NewDBUserMeta creates a new DBUserMeta.
func NewDBUserMeta(startTS, commitTS uint64) DBUserMeta {
m := make(DBUserMeta, 16)
defaultEndian.PutUint64(m, startTS)
defaultEndian.PutUint64(m[8:], commitTS)
return m
}
// CommitTS reads the commitTS from the DBUserMeta.
func (m DBUserMeta) CommitTS() uint64 {
return defaultEndian.Uint64(m[8:])
}
// StartTS reads the startTS from the DBUserMeta.
func (m DBUserMeta) StartTS() uint64 {
return defaultEndian.Uint64(m[:8])
}
// EncodeExtraTxnStatusKey encodes a extra transaction status key.
// It is only used for Rollback and Op_Lock.
func EncodeExtraTxnStatusKey(key []byte, startTS uint64) []byte {
b := append([]byte{}, key...)
ret := codec.EncodeUintDesc(b, startTS)
ret[0]++
return ret
}
// DecodeExtraTxnStatusKey decodes a extra transaction status key.
func DecodeExtraTxnStatusKey(extraKey []byte) (key []byte) {
if len(extraKey) <= 9 {
return nil
}
key = append([]byte{}, extraKey[:len(extraKey)-8]...)
key[0]--
return
}
|
package main
import "time"
func main() {
NewThing()
select {}
}
type Thing struct{}
func NewThing() *Thing {
t := &Thing{}
go t.loop()
return t
}
func (t *Thing) loop() {
for range time.Tick(time.Second) {
println("blep")
}
}
|
package model
import "time"
type Event struct {
Metadata []byte `json:"metadata"`
Type string `json:"type"`
Timestamp time.Time `json:"timestamp"`
}
|
package generator
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"strings"
"github.com/pkg/errors"
)
func generateMocks(fullOutputDir string) error {
directories, err := ioutil.ReadDir(fullOutputDir)
if err != nil {
return errors.Wrap(err, "failed to get output directories")
}
for _, d := range directories {
if !d.IsDir() {
continue
}
if d.Name() == "models" {
continue
}
full := path.Join(fullOutputDir, d.Name())
files, err := ioutil.ReadDir(full)
if err != nil {
return errors.Wrapf(err, "failed to read files in '%s'", d.Name())
}
for _, f := range files {
if strings.Contains(f.Name(), ".mock.go") {
continue
}
name := strings.TrimRight(f.Name(), ".go")
err = mockGen(path.Join(full, name+".go"), path.Join(full, name+".mock.go"), d.Name())
if err != nil {
return errors.Wrapf(err, "failed to gen mock for '%s", name)
}
}
}
return nil
}
func mockGen(source string, destination string, pkg string) error {
args :=
[]string{
fmt.Sprintf("-source=%s", source),
fmt.Sprintf("-destination=%s", destination),
fmt.Sprintf("-package=%s", pkg),
}
errBuf := bytes.NewBuffer([]byte{})
cmd := exec.Command("mockgen", args...)
cmd.Env = os.Environ()
cmd.Stderr = errBuf
_, err := cmd.Output()
if err != nil {
return errors.Wrapf(err, "failed to run mockgen: %s", string(errBuf.Bytes()))
}
if cmd.ProcessState.ExitCode() != 0 {
return errors.Errorf("mockgen did not return 0")
}
return nil
}
|
package linkedlist
import "testing"
func TestAddTwoNumbers(t *testing.T) {
l1 := newListNodes([]int{1, 2, 3, 4, 5, 6, 7}, false)
l2 := newListNodes([]int{1, 2, 3, 4, 5, 6, 7, 9}, false)
l3 := addTwoNumbers(l1, l2)
l4 := newListNodes([]int{2, 4, 6, 8, 0, 3, 5, 0, 1}, false)
if !equalTwoList(l3, l4) {
t.Fail()
}
}
|
//
// Copyright 2020 The AVFS authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package dummyidm
import "github.com/avfs/avfs"
// New create a new identity manager.
func New() *DummyIdm {
return &DummyIdm{}
}
// Type returns the type of the fileSystem or Identity manager.
func (idm *DummyIdm) Type() string {
return "DummyIdm"
}
// Features returns the set of features provided by the file system or identity manager.
func (idm *DummyIdm) Features() avfs.Feature {
return 0
}
// HasFeature returns true if the file system or identity manager provides a given feature.
func (idm *DummyIdm) HasFeature(feature avfs.Feature) bool {
return false
}
|
package controller
import (
"database/sql"
"encoding/json"
"net/http"
"sms-aiforesee-be/database"
"sms-aiforesee-be/models"
"time"
"github.com/google/uuid"
"golang.org/x/crypto/bcrypt"
)
func Register(w http.ResponseWriter, r *http.Request) {
decoder := json.NewDecoder(r.Body)
var us models.User
err := decoder.Decode(&us)
if err != nil {
ResponseHandler(w, nil, err.Error(), http.StatusBadRequest)
return
}
isExist, err := QueryUser(us.Username)
if err != nil {
ResponseHandler(w, nil, err.Error(), http.StatusInternalServerError)
return
}
db, err := database.ConnectDB()
if err != nil {
ResponseHandler(w, nil, err.Error(), http.StatusInternalServerError)
return
}
tx, err := db.Begin()
if err != nil {
tx.Rollback()
ResponseHandler(w, nil, err.Error(), http.StatusInternalServerError)
return
}
if !isExist {
us.ID = uuid.New().String()
us.Created = time.Now()
token, err := GenerateKey()
if err != nil {
tx.Rollback()
ResponseHandler(w, nil, err.Error(), http.StatusInternalServerError)
return
}
us.ApiKey = token
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(us.Password), bcrypt.DefaultCost)
us.Password = string(hashedPassword)
stmt, err := tx.Prepare(`INSERT INTO
users(id,username,password,email,api_key,created)
VALUES ($1, $2, $3, $4,$5,$6)`)
if err == nil {
_, err := stmt.Exec(us.ID, us.Username, us.Password, us.Email, us.ApiKey, us.Created)
if err != nil {
tx.Rollback()
ResponseHandler(w, nil, err.Error(), http.StatusInternalServerError)
return
}
} else {
tx.Rollback()
ResponseHandler(w, nil, err.Error(), http.StatusInternalServerError)
return
}
tx.Commit()
ResponseHandler(w, us, "Success", http.StatusOK)
} else {
ResponseHandler(w, nil, "Username Already Exist", http.StatusInternalServerError)
return
}
}
func Login(w http.ResponseWriter, r *http.Request) {
var creds models.Credentials
err := json.NewDecoder(r.Body).Decode(&creds)
if err != nil {
ResponseHandler(w, nil, err.Error(), http.StatusBadRequest)
return
}
db, err := database.ConnectDB()
if err != nil {
ResponseHandler(w, nil, err.Error(), http.StatusInternalServerError)
return
}
// Get the existing entry present in the database for the given username
result := db.QueryRow("select * from users where username=$1 OR email=$1", creds.Username)
if err != nil {
// If there is an issue with the database, return a 500 error
ResponseHandler(w, nil, err.Error(), http.StatusInternalServerError)
return
}
var us models.User
err = result.Scan(&us.ID,
&us.Username,
&us.Password,
&us.Email,
&us.ApiKey,
&us.Created,
)
if err != nil {
// If an entry with the username does not exist, send an "Unauthorized"(401) status
if err == sql.ErrNoRows {
ResponseHandler(w, nil, "Username or email not found", http.StatusUnauthorized)
return
}
// If the error is of any other type, send a 500 status
ResponseHandler(w, nil, err.Error(), http.StatusInternalServerError)
return
}
if err = bcrypt.CompareHashAndPassword([]byte(us.Password), []byte(creds.Password)); err != nil {
// If there is an issue with the database, return a 500 error
ResponseHandler(w, nil, "Wrong Password", http.StatusInternalServerError)
return
}
ResponseHandler(w, us, "Login Sucessful ", http.StatusOK)
return
}
func QueryUser(username string) (bool, error) {
var users models.User
db, err := database.ConnectDB()
defer db.Close()
err = db.QueryRow("SELECT ID FROM users WHERE username = $1", username).Scan(&users.ID)
if err != nil {
if err != sql.ErrNoRows {
// a real error happened! you should change your function return
// to "(bool, error)" and return "false, err" here
return false, err
}
return false, nil
}
return true, err
}
|
package model
type RainlabBlogPosts struct {
Id int
Title string
ContentHtml string
}
|
package bean
import (
"log"
"sync"
"time"
"github.com/astaxie/beego/orm"
. "webapi/common"
)
const (
DefaultChannelID = "default"
)
type Announcement struct {
ID int64 `orm:"column(id);auto;pk"` // 主键
Channel string `orm:"column(channel);size(8)"` // 渠道编号
Illustration string `orm:"column(illu_all)"` // 默认插图(英文)
IllustrationZH_TW string `orm:"column(illu_zh_tw)"` // 繁体插图
IllustrationZH_CN string `orm:"column(illu_zh_cn)"` // 简体插图
Link string `orm:"column(link)"` // 外部链接
Force bool `orm:"column(force)"` // 是否强制显示
}
func (self *Announcement) TableName() string {
return "announcement"
}
func (self *Announcement) GetIllustration(lang int32) string {
switch lang {
case ZH_CN:
if self.IllustrationZH_CN != "" {
return self.IllustrationZH_CN
}
case ZH_TW:
if self.IllustrationZH_TW != "" {
return self.IllustrationZH_TW
}
case EN_US:
return self.Illustration
}
return self.Illustration
}
// ---------------------------------------------------------------------------
var gCacheAnnouncements []*Announcement
var gCacheAnnouncementLock sync.RWMutex
func PreloadAnnouncement() {
doAnnouncementLoad(true)
go func() {
t := time.NewTicker(30 * time.Second)
for {
select {
case <-t.C:
doAnnouncementLoad(false)
}
}
}()
}
func doAnnouncementLoad(verbose bool) {
o := orm.NewOrm()
var arr []*Announcement
num, err := o.QueryTable(new(Announcement)).Limit(-1).All(&arr)
if err != nil {
log.Printf("更新公告缓存数据时出错: %v", err)
return
}
gCacheAnnouncementLock.Lock()
if num > 0 {
gCacheAnnouncements = make([]*Announcement, num)
for i := int64(0); i < num; i++ {
gCacheAnnouncements[i] = arr[i]
}
} else {
gCacheAnnouncements = nil
}
gCacheAnnouncementLock.Unlock()
if verbose {
log.Printf("预载入%d条公告记录", num)
}
}
func GetAnnouncements() []*Announcement {
gCacheAnnouncementLock.RLock()
defer gCacheAnnouncementLock.RUnlock()
if gCacheAnnouncements != nil {
arr := make([]*Announcement, len(gCacheAnnouncements))
for i := 0; i < len(gCacheAnnouncements); i++ {
arr[i] = gCacheAnnouncements[i]
}
return arr
}
return nil
}
|
package main
import (
"bufio"
"fmt"
"os"
"strconv"
)
func main() {
var mealCost, tipPercent, taxPercent float64
var texts []string
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
text := scanner.Text()
texts = append(texts, text)
if len(texts) >= 3 {
break
}
}
mealCost, _ = strconv.ParseFloat(texts[0], 64)
tipPercent, _ = strconv.ParseFloat(texts[1], 64)
taxPercent, _ = strconv.ParseFloat(texts[2], 64)
tip := mealCost * (tipPercent / 100)
tax := mealCost * (taxPercent / 100)
totalCost := round(mealCost + tip + tax)
fmt.Printf("The total meal cost is %d dollars.", totalCost)
}
func round(aFloat float64) int {
if aFloat < 0 {
return int(aFloat - 0.5)
}
return int(aFloat + 0.5)
}
|
/*
GoLang code created by Jirawat Harnsiriwatanakit https://github.com/kazekim
*/
package tbccert
import (
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"math/big"
"time"
)
const (
x509CertificateCommonName = "ThaiBankClientGo"
x509CertificateOrganization = "Jirawat.Kim"
)
type TLSCertGenerator struct {
key *rsa.PrivateKey
}
func NewTLSCertGenerator() (*TLSCertGenerator, error) {
key, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return nil, fmt.Errorf("private key cannot be created: %v", err.Error())
}
return &TLSCertGenerator{
key: key,
}, nil
}
func (g *TLSCertGenerator) GeneratePEMPrivateKey() (*[]byte, error) {
// Generate a pem block with the private key
keyPem := pem.EncodeToMemory(&pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(g.key),
})
return &keyPem, nil
}
func (g *TLSCertGenerator) GenerateDefaultPEMCertificate() (*[]byte, error) {
sn, err := g.randomBigInt()
if err != nil {
return nil, err
}
tml := x509.Certificate{
// you can add any attr that you need
NotBefore: time.Now(),
NotAfter: time.Now().AddDate(5, 0, 0),
SerialNumber: sn,
Subject: pkix.Name{
CommonName: x509CertificateCommonName,
Organization: []string{x509CertificateOrganization},
},
BasicConstraintsValid: true,
}
return g.GeneratePEMCertificate(tml)
}
func (g *TLSCertGenerator) randomBigInt() (*big.Int, error) {
//Max random value, a 130-bits integer, i.e 2^130 - 1
max := new(big.Int)
max.Exp(big.NewInt(2), big.NewInt(130), nil).Sub(max, big.NewInt(1))
//Generate cryptographically strong pseudo-random between 0 - max
n, err := rand.Int(rand.Reader, max)
return n, err
}
func (g *TLSCertGenerator) GeneratePEMCertificate(x509Cert x509.Certificate) (*[]byte, error){
cert, err := x509.CreateCertificate(rand.Reader, &x509Cert, &x509Cert, &g.key.PublicKey, g.key)
if err != nil {
return nil, fmt.Errorf("certificate cannot be created: %v", err.Error())
}
// Generate a pem block with the certificate
certPem := pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE",
Bytes: cert,
})
return &certPem, nil
}
func (g *TLSCertGenerator) GenerateX509KeyPair(certPEM, keyPEM []byte) (*tls.Certificate, error) {
tlsCert, err := tls.X509KeyPair(certPEM, keyPEM)
if err != nil {
return nil, fmt.Errorf("cannot be loaded the certificate: %v", err.Error())
}
return &tlsCert, nil
}
func (g *TLSCertGenerator) GenerateDefaultX509KeyPair() (*tls.Certificate, error) {
keyPEM, err := g.GeneratePEMPrivateKey()
if err != nil {
return nil, err
}
certPEM, err := g.GenerateDefaultPEMCertificate()
if err != nil {
return nil, err
}
return g.GenerateX509KeyPair(*certPEM, *keyPEM)
} |
// Package server defines internal behaviour.
package server
import (
"encoding/json"
"net/http"
)
// Error explains what went wrong.
func Error(w http.ResponseWriter, code int, message string) {
JSON(w, code, map[string]string{"error": message})
}
// JSON marshals a JSON payload and writes it out to the response, with some headers.
func JSON(w http.ResponseWriter, code int, payload interface{}) {
response, _ := json.Marshal(payload)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
w.Write(response)
}
// JSONWithCookie marshals a JSON payload, sets a cookie, and writes out to the response, with some headers.
func JSONWithCookie(w http.ResponseWriter, code int, payload interface{}, cookie http.Cookie) {
response, _ := json.Marshal(payload)
http.SetCookie(w, &cookie)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
w.Write(response)
}
|
package cotacao
import (
"fmt"
"github.com/fabioxgn/go-bot"
. "github.com/smartystreets/goconvey/convey"
"net/http"
"net/http/httptest"
"testing"
)
const (
expectedJSON = `{
"bovespa":{
"cotacao":"60800",
"variacao":"-1.68"
},
"dolar":{
"cotacao":"2.2430",
"variacao":"+0.36"
},
"euro":{
"cotacao":"2.9018",
"variacao":"-1.21"
},
"atualizacao":"04\/09\/14 -18:13"
}`
)
func TestCotacao(t *testing.T) {
Convey("Ao executar o comando cotação", t, func() {
cmd := &bot.Cmd{}
Convey("Deve responder com a cotação do dólar e euro", func() {
ts := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, expectedJSON)
}))
defer ts.Close()
url = ts.URL
c, err := cotacao(cmd)
So(err, ShouldBeNil)
So(c, ShouldEqual, "Dólar: 2.2430 (+0.36), Euro: 2.9018 (-1.21)")
})
Convey("Quando o webservice retornar algo inválido deve retornar erro", func() {
ts := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "invalid")
}))
defer ts.Close()
url = ts.URL
_, err := cotacao(cmd)
So(err, ShouldNotBeNil)
})
})
}
|
package core
import "github.com/zhenghaoz/gorse/base"
// ModelInterface is the interface for all models. Any model in this
// package should implement it.
type ModelInterface interface {
// Set parameters.
SetParams(params base.Params)
// Get parameters.
GetParams() base.Params
// Predict the rating given by a user (userId) to a item (itemId).
Predict(userId, itemId int) float64
// Fit a model with a train set and parameters.
Fit(trainSet DataSetInterface, options *base.RuntimeOptions)
}
|
// Copyright 2016 Tim O'Brien. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package jnigi
/*
#include <jni.h>
#include <Windows.h>
typedef jint (*type_JNI_GetDefaultJavaVMInitArgs)(void*);
type_JNI_GetDefaultJavaVMInitArgs var_JNI_GetDefaultJavaVMInitArgs;
jint dyn_JNI_GetDefaultJavaVMInitArgs(void *args) {
return var_JNI_GetDefaultJavaVMInitArgs(args);
}
typedef jint (*type_JNI_CreateJavaVM)(JavaVM**, void**, void*);
type_JNI_CreateJavaVM var_JNI_CreateJavaVM;
jint dyn_JNI_CreateJavaVM(JavaVM **pvm, void **penv, void *args) {
return var_JNI_CreateJavaVM(pvm, penv, args);
}
*/
import "C"
import (
"unsafe"
"errors"
)
func jni_GetDefaultJavaVMInitArgs(args unsafe.Pointer) jint {
return jint(C.dyn_JNI_GetDefaultJavaVMInitArgs((unsafe.Pointer)(args)))
}
func jni_CreateJavaVM(pvm unsafe.Pointer, penv unsafe.Pointer, args unsafe.Pointer) jint {
return jint(C.dyn_JNI_CreateJavaVM((**C.JavaVM)(pvm), (*unsafe.Pointer)(penv), (unsafe.Pointer)(args)))
}
func LoadJVMLib(jvmLibPath string) error {
cs := cString(jvmLibPath)
defer free(cs)
libHandle := C.LoadLibrary((*C.char)(cs))
if libHandle == nil {
return errors.New("could not dyanmically load jvm.dll")
}
cs2 := cString("JNI_GetDefaultJavaVMInitArgs")
defer free(cs2)
ptr := C.GetProcAddress(libHandle, (*C.char)(cs2))
if ptr == nil {
return errors.New("could not find JNI_GetDefaultJavaVMInitArgs in jvm.dll")
}
C.var_JNI_GetDefaultJavaVMInitArgs = C.type_JNI_GetDefaultJavaVMInitArgs(ptr)
cs3 := cString("JNI_CreateJavaVM")
defer free(cs3)
ptr = C.GetProcAddress(libHandle, (*C.char)(cs3))
if ptr == nil {
return errors.New("could not find JNI_CreateJavaVM in jvm.dll")
}
C.var_JNI_CreateJavaVM = C.type_JNI_CreateJavaVM(ptr)
return nil
}
|
package gominin
import (
"testing"
)
type intList []int
func (list intList) Len() int {
return len(list)
}
func TestBinarySearchSingleValue(t *testing.T) {
target := 0
list := intList{target}
x := BinarySearch(list, func(index int) int { return list[index] - target })
if x != 0 {
t.Error("Error. BinarySearch should be able to find an item.")
}
}
func TestBinarySearchTwoValues(t *testing.T) {
target := 0
list := intList{0, 1}
x := BinarySearch(list, func(index int) int { return list[index] - target })
if x != 0 {
t.Error("Error. BinarySearch should be able to find an item.")
}
}
func TestBinarySearchThreeValues(t *testing.T) {
list := intList{0, 1, 2}
for target := 0; target < 2; target++ {
x := BinarySearch(list, func(index int) int { return list[index] - target })
if x != target {
t.Error("Error. BinarySearch should be able to find an item.")
}
}
}
func TestBinarySearchFiveValues(t *testing.T) {
list := intList{0, 1, 2, 3, 4}
for target := 0; target < 5; target++ {
x := BinarySearch(list, func(index int) int { return list[index] - target })
if x != target {
t.Error("Error. BinarySearch should be able to find an item.")
}
}
}
func TestBinarySearchNotFound(t *testing.T) {
list := intList{0, 1, 2, 3, 4}
x := BinarySearch(list, func(index int) int { return list[index] - 6 })
if x != -1 {
t.Error("Error. BinarySearch should return not found(-1)")
}
}
func TestLowerBound(t *testing.T) {
target := 1
list := intList{0, 2, 4}
x := LowerBound(list, func(index int) int { return list[index] - target })
if x != 1 {
t.Error("Error. Index for 2nd item should be return.")
}
}
func TestLowerBoundExactSame(t *testing.T) {
target := 2
list := intList{0, 2, 4}
x := LowerBound(list, func(index int) int { return list[index] - target })
if x != 1 {
t.Error("Error. Index for 2nd item should be return.")
}
}
func TestLowerBoundUnderMinimum(t *testing.T) {
target := -1
list := intList{0, 2, 4}
x := LowerBound(list, func(index int) int { return list[index] - target })
if x != 0 {
t.Error("Error. Index for 1st item should be return.")
}
}
func TestLowerBoundUnderMaximum(t *testing.T) {
target := 5
list := intList{0, 2, 4}
x := LowerBound(list, func(index int) int { return list[index] - target })
if x != 3 {
t.Error("Error. Not found is the same as Len.")
}
}
|
package main
import (
"fmt"
"strconv"
"strings"
"github.com/jnewmano/advent2020/input"
"github.com/jnewmano/advent2020/output"
)
func main() {
sum := parta()
fmt.Println(sum)
}
func parta() interface{} {
// input.SetRaw(raw2)
// var things = input.Load()
// var things = input.LoadSliceSliceString("")
var things = input.LoadSliceString("")
rules := parseBags(things)
for _, v := range rules {
fmt.Printf("%s - %#v\n", v.bag, v.contains)
}
// How many individual bags are required inside your single shiny gold bag?
sum := howManyBagsInBag(rules, "shiny gold")
return sum
}
func howManyBagsInBag(rules []Rule, color string) int {
fmt.Println("finding rule for", color)
r := findRuleForColor(rules, color)
sum := 0
for _, v := range r.contains {
n := howManyBagsInBag(rules, v.color)
sum += v.count + v.count*n
}
return sum
}
func findRuleForColor(rules []Rule, color string) Rule {
for _, v := range rules {
if v.bag == color {
return v
}
}
panic("couldn't find rule for: " + color)
}
func canContain(rule Rule, color string) bool {
for _, v := range rule.contains {
if v.color == color {
return true
}
}
return false
}
func parseBags(things []string) []Rule {
all := []Rule{}
for _, v := range things {
r := parseRule(v)
all = append(all, r)
}
return all
}
type Rule struct {
bag string
contains []Contains
}
type Contains struct {
count int
color string
}
func parseRule(r string) Rule {
fmt.Println(r)
parts := strings.Split(r, " bags contain ")
bag := strings.TrimSpace(parts[0])
items := strings.TrimSpace(parts[1])
bags := strings.Split(items, ", ")
all := make([]Contains, 0)
for _, v := range bags {
v = strings.Trim(v, ". s")
if v == "no other bag" {
continue
}
var count int
parts := strings.SplitN(v, " ", 2)
count, err := strconv.Atoi(parts[0])
if err != nil {
panic(err)
}
bag := strings.TrimSuffix(parts[1], "bag")
all = append(all, Contains{
count: count,
color: strings.TrimSpace(bag),
})
}
rule := Rule{
bag: bag,
contains: all,
}
return rule
}
var _ = output.High(nil)
var raw = `light red bags contain 1 bright white bag, 2 muted yellow bags.
dark orange bags contain 3 bright white bags, 4 muted yellow bags.
bright white bags contain 1 shiny gold bag.
muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
dark olive bags contain 3 faded blue bags, 4 dotted black bags.
vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.
faded blue bags contain no other bags.
dotted black bags contain no other bags.`
var raw2 = `shiny gold bags contain 2 dark red bags.
dark red bags contain 2 dark orange bags.
dark orange bags contain 2 dark yellow bags.
dark yellow bags contain 2 dark green bags.
dark green bags contain 2 dark blue bags.
dark blue bags contain 2 dark violet bags.
dark violet bags contain no other bags.`
|
package demo
import (
"fmt"
"math/rand"
)
type Job struct {
Id int
Number int
}
type Result struct {
job *Job
sum int
}
func cals(job *Job, resultChan chan *Result) {
sum := 0
number := job.Number
for number > 0 {
temp := number % 10
sum += temp
number = number / 10
}
resulet := &Result{
job: job,
sum: sum,
}
resultChan <- resulet
}
func Worker(jobChan chan *Job, resultChan chan *Result) {
for job := range jobChan {
cals(job, resultChan)
}
}
func StartWorker(number int, jobChan chan *Job, resultChan chan *Result) {
for i := 0; i < number; i++ {
go Worker(jobChan, resultChan)
}
}
func PrintResult(resultChan chan *Result) {
for result := range resultChan {
fmt.Printf("id=%v, number=%v, result=%d \n", result.job.Id, result.job.Number, result.sum)
}
}
func Test() {
jobChan := make(chan *Job, 1000)
resultChan := make(chan *Result, 1000)
StartWorker(128, jobChan, resultChan)
go PrintResult(resultChan)
id := 1
for {
id++
number := rand.Int()
jobChan <- &Job{
Id: id,
Number: number,
}
}
}
|
package qiwi
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestCardRequest(t *testing.T) {
// Expected reply from QIWI
// HTTP/1.1 200 OK
// Content-Type: application/json
reply := `
{
"siteId": "test-01",
"billId": "gg",
"amount": {
"currency": "RUB",
"value": 42.24
},
"status": {
"value": "WAITING",
"changedDateTime": "2019-08-28T16:26:36.835+03:00"
},
"customFields": {},
"comment": "Spasibo",
"creationDateTime": "2019-08-28T16:26:36.835+03:00",
"expirationDateTime": "2019-09-13T14:30:00+03:00",
"payUrl": "https://oplata.qiwi.com/form/?invoice_uid=78d60ca9-7c99-481f-8e51-0100c9012087"
}`
serv := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, reply)
}))
serv.Start()
defer serv.Close()
// Route request to mocked http server
pay := New("billId", "SiteID", "TOKEN", serv.URL)
pay.CardRequest(context.TODO(), "pubKey", 100)
if pay.PayURL != "https://oplata.qiwi.com/form/?invoice_uid=78d60ca9-7c99-481f-8e51-0100c9012087" {
t.Error("PayURL not received")
}
}
func TestLocationTime(t *testing.T) {
pay := New("billID", "siteID", "token", "")
pay.CardRequest(context.TODO(), "pubKey", 100)
// Moscow time
msktz, _ := time.LoadLocation("Europe/Moscow")
msktime := time.Now().In(msktz)
if pay.Expiration.Before(msktime) {
t.Error("Bad expiration time")
}
}
|
package remotego
import (
"errors"
"github.com/dash-app/remote-go/aircon"
"github.com/dash-app/remote-go/aircon/daikin/daikin01"
"github.com/dash-app/remote-go/aircon/daikin/daikin02"
"github.com/dash-app/remote-go/aircon/daikin/daikin03"
"github.com/dash-app/remote-go/aircon/daikin/daikin04"
"github.com/dash-app/remote-go/aircon/fujitsu/fujitsu01"
"github.com/dash-app/remote-go/aircon/mitsubishi/mitsubishi02"
"github.com/dash-app/remote-go/aircon/panasonic/panasonic01"
"github.com/dash-app/remote-go/light"
"github.com/dash-app/remote-go/light/hitachi/ira03h"
"github.com/dash-app/remote-go/template"
)
// VendorSet - Remote Controller Identifier
type VendorSet struct {
Vendor string `json:"vendor"`
Model string `json:"model"`
}
// Remote - Remote set (aircon, light etc...)
type Remote struct {
aircon map[VendorSet]aircon.Remote
light map[VendorSet]light.Remote
}
// Init - Initialize remote controller
func Init() *Remote {
return &Remote{
aircon: map[VendorSet]aircon.Remote{
{Vendor: "daikin", Model: "daikin01"}: daikin01.New(),
{Vendor: "daikin", Model: "daikin02"}: daikin02.New(),
{Vendor: "daikin", Model: "daikin03"}: daikin03.New(),
{Vendor: "daikin", Model: "daikin04"}: daikin04.New(),
{Vendor: "fujitsu", Model: "fujitsu01"}: fujitsu01.New(),
{Vendor: "mitsubishi", Model: "mitsubishi02"}: mitsubishi02.New(),
{Vendor: "panasonic", Model: "panasonic01"}: panasonic01.New(),
},
light: map[VendorSet]light.Remote{
{Vendor: "hitachi", Model: "ir-a03h"}: ira03h.New(),
},
}
}
func (r *Remote) GetTemplate(kind, vendor, model string) (*template.Template, error) {
switch kind {
case "AIRCON":
e, err := r.GetAircon(vendor, model)
if err != nil {
return nil, err
}
return e.Template(), nil
case "LIGHT":
e, err := r.GetLight(vendor, model)
if err != nil {
return nil, err
}
return e.Template(), nil
default:
return nil, errors.New("unsupport kind")
}
}
func (r *Remote) GetAircon(vendor, model string) (aircon.Remote, error) {
if ac, ok := r.aircon[VendorSet{Vendor: vendor, Model: model}]; ok {
return ac, nil
}
return nil, errors.New("not found")
}
func (r *Remote) GetLight(vendor, model string) (light.Remote, error) {
if ac, ok := r.light[VendorSet{Vendor: vendor, Model: model}]; ok {
return ac, nil
}
return nil, errors.New("not found")
}
// AvailableAircons - Get vendor/models name
func (r *Remote) AvailableAircons() map[string][]string {
var set []VendorSet
for k := range r.aircon {
set = append(set, k)
}
return extractVendorSet(set)
}
// AvailableLights - Get vendor/models name
func (r *Remote) AvailableLights() map[string][]string {
var set []VendorSet
for k := range r.light {
set = append(set, k)
}
return extractVendorSet(set)
}
// extractVendorSet - Convert to map[string][]string format
func extractVendorSet(from []VendorSet) map[string][]string {
result := make(map[string][]string)
for _, k := range from {
if result[k.Vendor] == nil {
result[k.Vendor] = []string{k.Model}
} else {
result[k.Vendor] = append(result[k.Vendor], k.Model)
}
}
return result
}
|
package glog
import (
"fmt"
"testing"
"github.com/onsi/gomega"
)
func TestNewLogLevel(t *testing.T) {
g := gomega.NewGomegaWithT(t)
cases := []struct {
input string
expected LogLevel
ok bool
}{
{"debug", Debug, true},
{"info", Info, true},
{"notice", Notice, true},
{"warning", Warning, true},
{"error", Error, true},
{"critical", Critical, true},
{"foobar", Debug, false},
}
for _, c := range cases {
got, err := NewLogLevel(c.input)
if c.ok {
g.Expect(err).To(gomega.BeNil())
} else {
g.Expect(err).NotTo(gomega.BeNil())
}
g.Expect(got).To(gomega.Equal(c.expected))
}
}
func TestLogLevelString(t *testing.T) {
g := gomega.NewGomegaWithT(t)
cases := []struct {
input LogLevel
expected string
}{
{Debug, "debug"},
{Info, "info"},
{Notice, "notice"},
{Warning, "warning"},
{Error, "error"},
{Critical, "critical"},
}
for _, c := range cases {
got := fmt.Sprint(c.input)
g.Expect(got).To(gomega.Equal(c.expected))
}
}
|
package db
import (
"sync"
"time"
_ "github.com/go-sql-driver/mysql"
"github.com/golang/glog"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/mysql"
"sub_account_service/finance/models"
)
//DbClient 客户端
var DbClient *Db
//AutoMigrate 自动迁移
var AutoMigrate = false
//Db 数据库
type Db struct {
addr string // the addr of db server
Lock sync.RWMutex // lock
Client *gorm.DB // mysql client
}
//InitDb 初始化数据库
func InitDb(addr string) {
glog.Infoln("starting db :", addr)
mydb := &Db{}
mydb.addr = addr
db, err := gorm.Open("mysql", addr)
if err != nil {
glog.Errorln("db initing fail", err)
return
}
err = db.DB().Ping()
if err != nil {
glog.Errorln("db ping fail", err)
return
} else {
glog.Infoln("connecting db success!")
}
mydb.Client = db
DbClient = mydb
db.DB().SetMaxIdleConns(10)
db.DB().SetMaxOpenConns(100)
db.LogMode(false)
AutoMigrate = true
//创建表
createTable()
go timer1(addr)
}
//CreateTable 创建表
func (this *Db) CreateTable(models interface{}) {
DbClient.Client.AutoMigrate(models)
}
func timer1(addr string) {
timer1 := time.NewTicker(5 * time.Second)
for {
select {
case <-timer1.C:
err := DbClient.Client.DB().Ping()
if err != nil {
glog.Errorln("mysql connect fail,err:", err)
InitDb(addr)
}
}
}
}
func createTable() {
DbClient.Client.AutoMigrate(&models.User{})
DbClient.Client.AutoMigrate(&models.ExpensesBill{})
DbClient.Client.AutoMigrate(&models.IncomeStatement{})
DbClient.Client.AutoMigrate(&models.UserBill{})
DbClient.Client.AutoMigrate(&models.UserAccount{})
DbClient.Client.AutoMigrate(&models.RefundTrade{})
DbClient.Client.AutoMigrate(&models.ScheduleAccount{})
}
|
package collection
import (
"runtime"
"github.com/tidwall/btree"
"github.com/tidwall/geojson"
"github.com/tidwall/geojson/geometry"
"github.com/tidwall/rtree"
"github.com/tidwall/tile38/internal/deadline"
"github.com/tidwall/tile38/internal/field"
"github.com/tidwall/tile38/internal/object"
)
// yieldStep forces the iterator to yield goroutine every 256 steps.
const yieldStep = 256
// Cursor allows for quickly paging through Scan, Within, Intersects, and Nearby
type Cursor interface {
Offset() uint64
Step(count uint64)
}
func byID(a, b *object.Object) bool {
return a.ID() < b.ID()
}
func byValue(a, b *object.Object) bool {
value1 := a.String()
value2 := b.String()
if value1 < value2 {
return true
}
if value1 > value2 {
return false
}
// the values match so we'll compare IDs, which are always unique.
return byID(a, b)
}
func byExpires(a, b *object.Object) bool {
if a.Expires() < b.Expires() {
return true
}
if a.Expires() > b.Expires() {
return false
}
// the values match so we'll compare IDs, which are always unique.
return byID(a, b)
}
// Collection represents a collection of geojson objects.
type Collection struct {
objs btree.Map[string, *object.Object] // sorted by id
spatial rtree.RTreeGN[float32, *object.Object] // geospatially indexed
values *btree.BTreeG[*object.Object] // sorted by value+id
expires *btree.BTreeG[*object.Object] // sorted by ex+id
weight int
points int
objects int // geometry count
nobjects int // non-geometry count
}
var optsNoLock = btree.Options{NoLocks: true}
// New creates an empty collection
func New() *Collection {
col := &Collection{
values: btree.NewBTreeGOptions(byValue, optsNoLock),
expires: btree.NewBTreeGOptions(byExpires, optsNoLock),
}
return col
}
// Count returns the number of objects in collection.
func (c *Collection) Count() int {
return c.objects + c.nobjects
}
// StringCount returns the number of string values.
func (c *Collection) StringCount() int {
return c.nobjects
}
// PointCount returns the number of points (lat/lon coordinates) in collection.
func (c *Collection) PointCount() int {
return c.points
}
// TotalWeight calculates the in-memory cost of the collection in bytes.
func (c *Collection) TotalWeight() int {
return c.weight
}
// Bounds returns the bounds of all the items in the collection.
func (c *Collection) Bounds() (minX, minY, maxX, maxY float64) {
_, _, left := c.spatial.LeftMost()
_, _, bottom := c.spatial.BottomMost()
_, _, right := c.spatial.RightMost()
_, _, top := c.spatial.TopMost()
if left == nil {
return
}
return left.Rect().Min.X, bottom.Rect().Min.Y,
right.Rect().Max.X, top.Rect().Max.Y
}
func (c *Collection) indexDelete(item *object.Object) {
if !item.Geo().Empty() {
c.spatial.Delete(rtreeItem(item))
}
}
func (c *Collection) indexInsert(item *object.Object) {
if !item.Geo().Empty() {
c.spatial.Insert(rtreeItem(item))
}
}
const dRNDTOWARDS = (1.0 - 1.0/8388608.0) /* Round towards zero */
const dRNDAWAY = (1.0 + 1.0/8388608.0) /* Round away from zero */
func rtreeValueDown(d float64) float32 {
f := float32(d)
if float64(f) > d {
if d < 0 {
f = float32(d * dRNDAWAY)
} else {
f = float32(d * dRNDTOWARDS)
}
}
return f
}
func rtreeValueUp(d float64) float32 {
f := float32(d)
if float64(f) < d {
if d < 0 {
f = float32(d * dRNDTOWARDS)
} else {
f = float32(d * dRNDAWAY)
}
}
return f
}
func rtreeItem(item *object.Object) (min, max [2]float32, data *object.Object) {
min, max = rtreeRect(item.Rect())
return min, max, item
}
func rtreeRect(rect geometry.Rect) (min, max [2]float32) {
return [2]float32{
rtreeValueDown(rect.Min.X),
rtreeValueDown(rect.Min.Y),
}, [2]float32{
rtreeValueUp(rect.Max.X),
rtreeValueUp(rect.Max.Y),
}
}
// Set adds or replaces an object in the collection and returns the fields
// array.
func (c *Collection) Set(obj *object.Object) (prev *object.Object) {
prev, _ = c.objs.Set(obj.ID(), obj)
c.setFill(prev, obj)
return prev
}
func (c *Collection) setFill(prev, obj *object.Object) {
if prev != nil {
if prev.IsSpatial() {
c.indexDelete(prev)
c.objects--
} else {
c.values.Delete(prev)
c.nobjects--
}
if prev.Expires() != 0 {
c.expires.Delete(prev)
}
c.points -= prev.Geo().NumPoints()
c.weight -= prev.Weight()
}
if obj.IsSpatial() {
c.indexInsert(obj)
c.objects++
} else {
c.values.Set(obj)
c.nobjects++
}
if obj.Expires() != 0 {
c.expires.Set(obj)
}
c.points += obj.Geo().NumPoints()
c.weight += obj.Weight()
}
// Delete removes an object and returns it.
// If the object does not exist then the 'ok' return value will be false.
func (c *Collection) Delete(id string) (prev *object.Object) {
prev, _ = c.objs.Delete(id)
if prev == nil {
return nil
}
if prev.IsSpatial() {
if !prev.Geo().Empty() {
c.indexDelete(prev)
}
c.objects--
} else {
c.values.Delete(prev)
c.nobjects--
}
if prev.Expires() != 0 {
c.expires.Delete(prev)
}
c.points -= prev.Geo().NumPoints()
c.weight -= prev.Weight()
return prev
}
// Get returns an object.
// If the object does not exist then the 'ok' return value will be false.
func (c *Collection) Get(id string) *object.Object {
obj, _ := c.objs.Get(id)
return obj
}
// Scan iterates though the collection ids.
func (c *Collection) Scan(
desc bool,
cursor Cursor,
deadline *deadline.Deadline,
iterator func(obj *object.Object) bool,
) bool {
var keepon = true
var count uint64
var offset uint64
if cursor != nil {
offset = cursor.Offset()
cursor.Step(offset)
}
iter := func(_ string, obj *object.Object) bool {
count++
if count <= offset {
return true
}
nextStep(count, cursor, deadline)
keepon = iterator(obj)
return keepon
}
if desc {
c.objs.Reverse(iter)
} else {
c.objs.Scan(iter)
}
return keepon
}
// ScanRange iterates though the collection starting with specified id.
func (c *Collection) ScanRange(
start, end string,
desc bool,
cursor Cursor,
deadline *deadline.Deadline,
iterator func(o *object.Object) bool,
) bool {
var keepon = true
var count uint64
var offset uint64
if cursor != nil {
offset = cursor.Offset()
cursor.Step(offset)
}
iter := func(_ string, o *object.Object) bool {
count++
if count <= offset {
return true
}
nextStep(count, cursor, deadline)
if !desc {
if o.ID() >= end {
return false
}
} else {
if o.ID() <= end {
return false
}
}
keepon = iterator(o)
return keepon
}
if desc {
c.objs.Descend(start, iter)
} else {
c.objs.Ascend(start, iter)
}
return keepon
}
// SearchValues iterates though the collection values.
func (c *Collection) SearchValues(
desc bool,
cursor Cursor,
deadline *deadline.Deadline,
iterator func(o *object.Object) bool,
) bool {
var keepon = true
var count uint64
var offset uint64
if cursor != nil {
offset = cursor.Offset()
cursor.Step(offset)
}
iter := func(o *object.Object) bool {
count++
if count <= offset {
return true
}
nextStep(count, cursor, deadline)
keepon = iterator(o)
return keepon
}
if desc {
c.values.Reverse(iter)
} else {
c.values.Scan(iter)
}
return keepon
}
// SearchValuesRange iterates though the collection values.
func (c *Collection) SearchValuesRange(start, end string, desc bool,
cursor Cursor,
deadline *deadline.Deadline,
iterator func(o *object.Object) bool,
) bool {
var keepon = true
var count uint64
var offset uint64
if cursor != nil {
offset = cursor.Offset()
cursor.Step(offset)
}
iter := func(o *object.Object) bool {
count++
if count <= offset {
return true
}
nextStep(count, cursor, deadline)
keepon = iterator(o)
return keepon
}
pstart := object.New("", String(start), 0, field.List{})
pend := object.New("", String(end), 0, field.List{})
if desc {
// descend range
c.values.Descend(pstart, func(item *object.Object) bool {
return bGT(c.values, item, pend) && iter(item)
})
} else {
c.values.Ascend(pstart, func(item *object.Object) bool {
return bLT(c.values, item, pend) && iter(item)
})
}
return keepon
}
func bLT(tr *btree.BTreeG[*object.Object], a, b *object.Object) bool { return tr.Less(a, b) }
func bGT(tr *btree.BTreeG[*object.Object], a, b *object.Object) bool { return tr.Less(b, a) }
// ScanGreaterOrEqual iterates though the collection starting with specified id.
func (c *Collection) ScanGreaterOrEqual(id string, desc bool,
cursor Cursor,
deadline *deadline.Deadline,
iterator func(o *object.Object) bool,
) bool {
var keepon = true
var count uint64
var offset uint64
if cursor != nil {
offset = cursor.Offset()
cursor.Step(offset)
}
iter := func(_ string, o *object.Object) bool {
count++
if count <= offset {
return true
}
nextStep(count, cursor, deadline)
keepon = iterator(o)
return keepon
}
if desc {
c.objs.Descend(id, iter)
} else {
c.objs.Ascend(id, iter)
}
return keepon
}
func (c *Collection) geoSearch(
rect geometry.Rect,
iter func(o *object.Object) bool,
) bool {
alive := true
min, max := rtreeRect(rect)
c.spatial.Search(
min, max,
func(_, _ [2]float32, o *object.Object) bool {
alive = iter(o)
return alive
},
)
return alive
}
func (c *Collection) geoSparse(
obj geojson.Object, sparse uint8,
iter func(o *object.Object) (match, ok bool),
) bool {
matches := make(map[string]bool)
alive := true
c.geoSparseInner(obj.Rect(), sparse, func(o *object.Object) (match, ok bool) {
ok = true
if !matches[o.ID()] {
match, ok = iter(o)
if match {
matches[o.ID()] = true
}
}
return match, ok
})
return alive
}
func (c *Collection) geoSparseInner(
rect geometry.Rect, sparse uint8,
iter func(o *object.Object) (match, ok bool),
) bool {
if sparse > 0 {
w := rect.Max.X - rect.Min.X
h := rect.Max.Y - rect.Min.Y
quads := [4]geometry.Rect{
{
Min: geometry.Point{X: rect.Min.X, Y: rect.Min.Y + h/2},
Max: geometry.Point{X: rect.Min.X + w/2, Y: rect.Max.Y},
},
{
Min: geometry.Point{X: rect.Min.X + w/2, Y: rect.Min.Y + h/2},
Max: geometry.Point{X: rect.Max.X, Y: rect.Max.Y},
},
{
Min: geometry.Point{X: rect.Min.X, Y: rect.Min.Y},
Max: geometry.Point{X: rect.Min.X + w/2, Y: rect.Min.Y + h/2},
},
{
Min: geometry.Point{X: rect.Min.X + w/2, Y: rect.Min.Y},
Max: geometry.Point{X: rect.Max.X, Y: rect.Min.Y + h/2},
},
}
for _, quad := range quads {
if !c.geoSparseInner(quad, sparse-1, iter) {
return false
}
}
return true
}
alive := true
c.geoSearch(rect, func(o *object.Object) bool {
match, ok := iter(o)
if !ok {
alive = false
return false
}
return !match
})
return alive
}
// Within returns all object that are fully contained within an object or
// bounding box. Set obj to nil in order to use the bounding box.
func (c *Collection) Within(
obj geojson.Object,
sparse uint8,
cursor Cursor,
deadline *deadline.Deadline,
iter func(o *object.Object) bool,
) bool {
var count uint64
var offset uint64
if cursor != nil {
offset = cursor.Offset()
cursor.Step(offset)
}
if sparse > 0 {
return c.geoSparse(obj, sparse, func(o *object.Object) (match, ok bool) {
count++
if count <= offset {
return false, true
}
nextStep(count, cursor, deadline)
if match = o.Geo().Within(obj); match {
ok = iter(o)
}
return match, ok
})
}
return c.geoSearch(obj.Rect(), func(o *object.Object) bool {
count++
if count <= offset {
return true
}
nextStep(count, cursor, deadline)
if o.Geo().Within(obj) {
return iter(o)
}
return true
})
}
// Intersects returns all object that are intersect an object or bounding box.
// Set obj to nil in order to use the bounding box.
func (c *Collection) Intersects(
gobj geojson.Object,
sparse uint8,
cursor Cursor,
deadline *deadline.Deadline,
iter func(o *object.Object) bool,
) bool {
var count uint64
var offset uint64
if cursor != nil {
offset = cursor.Offset()
cursor.Step(offset)
}
if sparse > 0 {
return c.geoSparse(gobj, sparse, func(o *object.Object) (match, ok bool) {
count++
if count <= offset {
return false, true
}
nextStep(count, cursor, deadline)
if match = o.Geo().Intersects(gobj); match {
ok = iter(o)
}
return match, ok
})
}
return c.geoSearch(gobj.Rect(), func(o *object.Object) bool {
count++
if count <= offset {
return true
}
nextStep(count, cursor, deadline)
if o.Geo().Intersects(gobj) {
return iter(o)
}
return true
},
)
}
// Nearby returns the nearest neighbors
func (c *Collection) Nearby(
target geojson.Object,
cursor Cursor,
deadline *deadline.Deadline,
iter func(o *object.Object, dist float64) bool,
) bool {
alive := true
center := target.Center()
var count uint64
var offset uint64
if cursor != nil {
offset = cursor.Offset()
cursor.Step(offset)
}
distFn := geodeticDistAlgo([2]float64{center.X, center.Y})
c.spatial.Nearby(
func(min, max [2]float32, data *object.Object, item bool) float64 {
return distFn(
[2]float64{float64(min[0]), float64(min[1])},
[2]float64{float64(max[0]), float64(max[1])},
data, item,
)
},
func(_, _ [2]float32, o *object.Object, dist float64) bool {
count++
if count <= offset {
return true
}
nextStep(count, cursor, deadline)
alive = iter(o, dist)
return alive
},
)
return alive
}
func nextStep(step uint64, cursor Cursor, deadline *deadline.Deadline) {
if step&(yieldStep-1) == (yieldStep - 1) {
runtime.Gosched()
deadline.Check()
}
if cursor != nil {
cursor.Step(1)
}
}
// ScanExpires returns a list of all objects that have expired.
func (c *Collection) ScanExpires(iter func(o *object.Object) bool) {
c.expires.Scan(iter)
}
|
package domain
import "errors"
type Recipes struct {
recipes map[Beverage]*Recipe
}
func CreateRecipes(recipes map[Beverage]*Recipe) *Recipes {
return &Recipes{recipes: recipes}
}
func (r *Recipes) get(beverage Beverage) (*Recipe, error) {
recipe := r.recipes[beverage]
if recipe == nil {
return nil, errors.New("Invalid_Beverage")
}
return recipe, nil
}
|
package gchalk
//go:generate stringer -type=ColorLevel
// ColorLevel represents the ANSI color level supported by the terminal.
type ColorLevel int
const (
// LevelNone represents a terminal that does not support color at all.
LevelNone ColorLevel = 0
// LevelBasic represents a terminal with basic 16 color support.
LevelBasic ColorLevel = 1
// LevelAnsi256 represents a terminal with 256 color support.
LevelAnsi256 ColorLevel = 2
// LevelAnsi16m represents a terminal with full true color support.
LevelAnsi16m ColorLevel = 3
)
|
package binance
import (
"context"
"net/http"
)
// AssetDividendService fetches the saving purchases
type AssetDividendService struct {
c *Client
asset *string
startTime *int64
endTime *int64
limit *int
}
// Asset sets the asset parameter.
func (s *AssetDividendService) Asset(asset string) *AssetDividendService {
s.asset = &asset
return s
}
// Limit sets the limit parameter.
func (s *AssetDividendService) Limit(limit int) *AssetDividendService {
s.limit = &limit
return s
}
// StartTime sets the startTime parameter.
// If present, EndTime MUST be specified. The difference between EndTime - StartTime MUST be between 0-90 days.
func (s *AssetDividendService) StartTime(startTime int64) *AssetDividendService {
s.startTime = &startTime
return s
}
// EndTime sets the endTime parameter.
// If present, StartTime MUST be specified. The difference between EndTime - StartTime MUST be between 0-90 days.
func (s *AssetDividendService) EndTime(endTime int64) *AssetDividendService {
s.endTime = &endTime
return s
}
// Do sends the request.
func (s *AssetDividendService) Do(ctx context.Context) (*DividendResponseWrapper, error) {
r := &request{
method: http.MethodGet,
endpoint: "/sapi/v1/asset/assetDividend",
secType: secTypeSigned,
}
if s.asset != nil {
r.setParam("asset", *s.asset)
}
if s.limit != nil {
r.setParam("limit", *s.limit)
} else {
r.setParam("limit", 20)
}
if s.startTime != nil {
r.setParam("startTime", *s.startTime)
}
if s.endTime != nil {
r.setParam("endTime", *s.endTime)
}
data, err := s.c.callAPI(ctx, r)
if err != nil {
return nil, err
}
res := new(DividendResponseWrapper)
err = json.Unmarshal(data, res)
if err != nil {
return nil, err
}
return res, nil
}
// DividendResponseWrapper represents a wrapper around a AssetDividendService.
type DividendResponseWrapper struct {
Rows *[]DividendResponse `json:"rows"`
Total int32 `json:"total"`
}
// DividendResponse represents a response from AssetDividendService.
type DividendResponse struct {
ID int64 `json:"id"`
Amount string `json:"amount"`
Asset string `json:"asset"`
Info string `json:"enInfo"`
Time int64 `json:"divTime"`
TranID int64 `json:"tranId"`
}
|
/*
hub.go
*/
package main
import (
"encoding/json"
"log"
"net/http"
"github.com/gorilla/websocket"
"github.com/tidwall/gjson"
)
type Hub struct {
clients []*Client
register chan*Client
unregister chan*Client
}
// Constructor
func newHub() *Hub {
return &Hub {
clients: make([]*Client, 0),
register: make(chan *Client),
unregister: make(chan *Client),
}
}
// Run
func (hub *Hub) run() {
for {
select {
case client := <-hub.register:
hub.onConnect(client)
case client := <-hub.register:
hub.onDisconect(client)
}
}
}
// HTTP Handler - upgrades request to websocket, if succeded gets added
// to the list of clients
var upgrader = websocket.Upgrader {
// Allow all origins
CheckOrigin: func(r *http.Request) bool { return true },
}
func (hub *Hub) handleWebSocket(w http.Responsewriter, r *http.Request) {
socket, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println(err)
http.Error(w, "could not upgrade", http.StatusInternalServerError)
return
}
client := newClient(hub, socket)
hub.clients = append(hub.clients, client)
hub.register <= client
client.run()
}
// Write - sends message to a client
func (hub *Hub) send(message interface{}, client *Client) {
data, _ := json.Marshal(message)
client.outbound <- data
}
// Broadcast message to all clients, except one
// Ex: forward messsages to other clients, while excluding sender
func (hub *Hub) broadcast(message interface{}, ignore *Client) {
data, _ := json.Marshal(message)
for _, c := range hub.clients {
if c != ignore {
c.outbound <- data
}
}
}
// onConnect - called from run, sends user's color and information to
// other clients and notifies them a user has joined
func (hub *Hub) onConnect(client *Client) {
log.Println("client connected: ", client.socket.RemoteAddr())
// list of all users
users := []message.User{}
for _, c := range hub.clients {
users = append(users, message.User { ID: c.id, Color: c.color })
}
// Notification
hub.send(message.NewConnected(client.color, users), client)
hub.broadcast(message.NewUserJoined(client.id, client.color), client)
}
// onDisconnect - removes disconnected client from list of clients
// and notifies all clients that someone left
func (hub *Hub) onDisconnect(client *Client) {
log.Println("client disconnected: ", client.socket.RemoteAddr())
client.close
// find index
i := -1
for j, c := range hub.clients {
if c.id == client.id {
i = j
break
}
}
// Delete client
copy(hub.clients[i:], hub.clients[i+1:])
hub.clients[len(hub.clients)-1] = nil
hub.clients = hub.clients[:len(hub.clients)-1]
// Notification
hub.broadcast(message.NewUserLeft(client.id), nil)
}
// onMessage - called whenever a message is recieved from the client, first
// reads the kind of message, then handles each case
func (hub *Hub) onMessage(data []byte, client *Client) {
kind := gjson.GetBytes(data, "kind").Int()
if kind == message.KindStroke {
var msg message.Stroke
if json.Unmarshal(data, &msg) != nil {
return
}
msg.UserID = client.id
hub.broadcast(msg, client)
} else if kind == message.KindClear {
var msg message.Clear
if json.Unmarshal(data, &msg) != nil {
return
}
}
msg.UserID = client.id
hub.broadcast(msg, client)
}
|
package main
import (
"math"
"math/rand"
)
func distanceBetweenPoints(p1, p2 *City) float64 {
return math.Sqrt(math.Pow(p1.x-p2.x, 2) + math.Pow(p1.y - p2.y, 2))
}
func calculateDistance(cities []City) float64 {
total := 0.0
for i := 0; i < len(cities) - 1; i++ {
total += distanceBetweenPoints(&cities[i], &cities[i+1])
}
total += distanceBetweenPoints(&cities[len(cities)-1], &cities[0])
return total
}
func createRandomSolution(cities []City) Solution {
order := make([]City, len(cities))
for i, j := range rand.Perm(len(cities)){
order[i] = cities[j]
}
return Solution{order, calculateDistance(order)}
}
//func populationStats(population *Population) (float64, float64, float64) {
// best, worst, sum := population.solutions[0].distance, population.solutions[0].distance, 0.0
// for _, p := range population.solutions{
// if p.distance < best {
// best = p.distance
// }
// if p.distance > worst {
// worst = p.distance
// }
// sum += p.distance
// }
// average := sum / float64(len(population.solutions))
// return best, worst, average
//}
func averageDist(solutions []Solution) float64 {
sum := 0.0
for _, s := range solutions {
sum += s.Distance
}
return sum / float64(len(solutions))
} |
package camt
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00700201 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:camt.007.002.01 Document"`
Message *RequestToModifyPayment `xml:"camt.007.002.01"`
}
func (d *Document00700201) AddMessage() *RequestToModifyPayment {
d.Message = new(RequestToModifyPayment)
return d.Message
}
// Scope
// The Request To Modify Payment message is sent by a case creator/case assigner to a case assignee.
// This message is used to request the modification of characteristics of an original payment instruction.
// Usage
// The Request To Modify Payment message must be answered with a:
// - Resolution Of Investigation message with a positive final outcome when the case assignee can perform the requested modification
// - Resolution Of Investigation message with a negative final outcome when the case assignee may perform the requested modification but fails to do so (too late, irrevocable instruction, one requested element cannot be modified, ...)
// - Reject Case Assignment message when the case assignee is unable or not authorised to perform the requested modification
// - Notification Of Case Assignment message to indicate whether the case assignee will take on the case himself or reassign the case to a subsequent party in the payment processing chain.
// The Request To Modify Payment message covers one and only one original instruction at a time. If several original payment instructions need to be modified, then multiple Request To Modify Payment messages must be sent.
// The Request To Modify Payment message can be sent to request the modification of one or several elements of the original payment instruction. If many elements need to be modified, it is recommended to cancel the original payment instruction and initiate a new one.
// The Request To Modify Payment must be processed on an all or nothing basis. If one of the elements to be modified cannot be altered, the assignment must be rejected in full by means of a negative Resolution Of Investigation message. (See section on Resolution Of Investigation for more details.)
// The Request To Modify Payment message must never be sent to request the modification of the currency of the original payment instruction. If the currency is wrong, use Request To Cancel Payment message to cancel it and issue and a new payment instruction.
// The Request To Modify Payment message may be forwarded to subsequent case assignee(s).
// When a Request To Modify Payment message is used to decrease the amount of the original payment instruction, the modification will trigger a return of funds from the case assignee to the case creator. The assignee may indicate, within the Resolution Of Investigation message, the amount to be returned, the date it is or will be returned and the channel through which the return will be done.
// The Request To Modify Payment message must never be sent to request the increase of the amount of the original payment instruction. To increase the amount in a payment, the debtor can do one of the following:
// - Cancel the first payment using a Request To Cancel Payment message and make a new payment with a higher and correct amount.
// - Simply send a second payment with the supplementary amount.
// Depending on the requested modification(s) and the processing stage of the original payment instruction, the processing of a request to modify payment case may end with one of the following:
// - an Additional Payment Information message sent to the creditor of the original payment instruction
// - a Debit Authorisation Request message sent to the creditor of the original payment instruction
// - a Request To Cancel Payment message sent to a subsequent case assignee
// The Request To Modify Payment message can be sent to correct characteristics of an original payment instruction following receipt of an Unable To Apply message. In this scenario, the case identification will remain the same.
// The RequestToModifyPayment message has the following main characteristics:
// The case creator assigns a unique case identification. This information will be passed unchanged to all subsequent case assignee(s).
// Lowering the amount of an original payment instruction for which cover is provided by a separate instruction will systematically mean the modification of the whole transaction, including the cover. The case assignee performing the amount modification must initiate the return of funds in excess to the case creator.
// The modification of the agent's or agents' information on an original payment instruction for which cover is provided by a separate instruction will systematically mean the whole transaction is modified, i.e., the cover is executed through the agent(s) mentioned in the Request To Modify Payment message. The cover payment must not be modified separately.
// The modification of a payment instruction can be initiated by either the debtor or any subsequent agent in the payment processing chain.
// The case creator provides the information to be modified in line with agreements made with the case assignee. If the case assignee needs in turn to assign the case to a subsequent case assignee, the requested modification(s) must be in line with the agreement made with the next case assignee and a Notification Of Case Assignment message must be sent to the case assigner. Otherwise, the request to modify payment case must be rejected (by means of a negative Resolution Of Investigation message).
type RequestToModifyPayment struct {
// Identifies the assignment.
Assignment *iso20022.CaseAssignment `xml:"Assgnmt"`
// Identifies the case.
Case *iso20022.Case `xml:"Case"`
// Identifies the Payment Transaction to modify.
Underlying *iso20022.PaymentInstructionExtract `xml:"Undrlyg"`
//
Modification *iso20022.RequestedModification `xml:"Mod"`
}
func (r *RequestToModifyPayment) AddAssignment() *iso20022.CaseAssignment {
r.Assignment = new(iso20022.CaseAssignment)
return r.Assignment
}
func (r *RequestToModifyPayment) AddCase() *iso20022.Case {
r.Case = new(iso20022.Case)
return r.Case
}
func (r *RequestToModifyPayment) AddUnderlying() *iso20022.PaymentInstructionExtract {
r.Underlying = new(iso20022.PaymentInstructionExtract)
return r.Underlying
}
func (r *RequestToModifyPayment) AddModification() *iso20022.RequestedModification {
r.Modification = new(iso20022.RequestedModification)
return r.Modification
}
|
package routes
import (
"fmt"
"github.com/gin-gonic/gin"
E "github.com/gowyu/yuw/exceptions"
M "github.com/gowyu/yuw/modules"
"html/template"
"strings"
)
type (
Routes interface {
Tag() string
Put(r *gin.Engine, toFunc map[string][]gin.HandlerFunc)
ToFunc() template.FuncMap
}
Rcfg []Routes
Rtpl []interface{}
Rarr map[string]map[string][]gin.HandlerFunc
PoT struct {
Rcfg *Rcfg
Rtpl *Rtpl
Rarr *Rarr
}
)
const YuwSp string = "->"
var RPoT *PoT
func To(r *gin.Engine) {
var exc *E.Exceptions = E.NewExceptions()
/**
* Todo: No Routes To Redirect
**/
r.NoRoute(exc.NoRoute)
/**
* Todo: No Method To Redirect
**/
r.NoMethod(exc.NoMethod)
for _, to := range *RPoT.Rcfg {
if _, ok := (*RPoT.Rarr)[to.Tag()]; ok == false {
continue
}
if len((*RPoT.Rarr)[to.Tag()]) == 0 {
continue
}
to.Put(r, (*RPoT.Rarr)[to.Tag()])
}
}
func ToFunc(tpl ...interface{}) template.FuncMap {
tplFunc := template.FuncMap{}
var util *M.Utils = M.NewUtils()
for _, to := range *RPoT.Rcfg {
if ok, _ := util.StrContains(to.Tag(), tpl ...); ok == false {
continue
}
if to.ToFunc() == nil {
continue
}
for Tag, toFunc := range to.ToFunc() {
tplFunc[Tag] = toFunc
}
}
return tplFunc
}
func ToLoggerWithFormatter() gin.HandlerFunc {
return gin.LoggerWithFormatter(func(param gin.LogFormatterParams) (strLog string) {
msg := E.TxTErr("yuw^m_logs_a")
if param.ErrorMessage != "" {
msg = param.ErrorMessage
}
if param.StatusCode != 200 || param.ErrorMessage != "" {
strLog = fmt.Sprintf(`
---------------------------------------------------------------------------------------------------
%s » %s » %s
%s » %s » %s » %s
%s » %s
%s » %d
%s » %s
%s » %v
---------------------------------------------------------------------------------------------------
`,
E.TxTErr("yuw^m_logs_b"),
param.ClientIP,
param.TimeStamp.Format("2006-01-02 15:04:05"),
E.TxTErr("yuw^m_logs_c"),
param.Method,
param.Request.Proto,
param.Path,
E.TxTErr("yuw^m_logs_d"),
param.Request.UserAgent(),
E.TxTErr("yuw^m_logs_e"),
param.StatusCode,
E.TxTErr("yuw^m_logs_f"),
param.Latency,
E.TxTErr("yuw^m_logs_g"),
msg,
)
}
return
})
}
func Do(g *gin.RouterGroup, toFunc map[string][]gin.HandlerFunc) {
for route, ctrl := range toFunc {
Y := strings.Split(route, YuwSp)
if len(Y) != 3 {
continue
}
switch strings.ToLower(Y[1]) {
case "get":
g.GET (Y[2], ctrl ...)
continue
case "any":
g.Any (Y[2], ctrl ...)
continue
case "post":
g.POST(Y[2], ctrl ...)
continue
default:
continue
}
}
}
|
/**
* (c) 2014, Caoimhe Chaos <caoimhechaos@protonmail.com>,
* Ancient Solutions. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Ancient Solutions nor the name of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package main
import (
"sync"
"code.google.com/p/goprotobuf/proto"
"github.com/caoimhechaos/blubberstore"
"github.com/ha/doozer"
)
// Serivce object for server-to-server manipulation of the blubber block
// directory.
type BlubberS2SProto struct {
// Doozer connection (if any).
doozerConn *doozer.Conn
// Pointers to the inner server state and its mutex.
blockMap *map[string]map[string]*blubberstore.ServerBlockStatus
blockMapMtx *sync.RWMutex
blockMapVersion *uint64
}
// Create a new server2server object of the pointed-to block directory state.
func NewBlubberS2SProto(conn *doozer.Conn,
blockmap *map[string]map[string]*blubberstore.ServerBlockStatus,
blockmapmtx *sync.RWMutex) *BlubberS2SProto {
return &BlubberS2SProto{
doozerConn: conn,
blockMap: blockmap,
blockMapMtx: blockmapmtx,
}
}
// Determine the current block directory version.
func (b *BlubberS2SProto) GetBlockDirectoryRevision(
empty *blubberstore.Empty, rev *blubberstore.BlockDirectoryRevision) error {
rev.Revision = proto.Uint64(*b.blockMapVersion)
return nil
}
func (b *BlubberS2SProto) GetBlockDirectoryFull(
empty *blubberstore.Empty, rv *blubberstore.BlockDirectoryRevision) error {
var states map[string]*blubberstore.ServerBlockStatus
var id string
// Make sure we're not being disturbed.
b.blockMapMtx.RLock()
defer b.blockMapMtx.RUnlock()
rv.Revision = proto.Uint64(*b.blockMapVersion)
for id, states = range *b.blockMap {
var status *blubberstore.BlockStatus = new(blubberstore.BlockStatus)
var sstat *blubberstore.ServerBlockStatus
status.BlockId = make([]byte, len(id))
copy(status.BlockId, []byte(id))
status.ReplicationFactor = proto.Uint32(uint32(len(states)))
status.Servers = make([]*blubberstore.ServerBlockStatus, 0)
for _, sstat = range states {
status.Servers = append(status.Servers, sstat)
}
rv.Status = append(rv.Status, status)
}
return nil
}
|
package user
import (
"fmt"
"octlink/mirage/src/modules/account"
"octlink/mirage/src/modules/session"
"octlink/mirage/src/utils/config"
"octlink/mirage/src/utils/merrors"
"octlink/mirage/src/utils/octlog"
"octlink/mirage/src/utils/octmysql"
"time"
)
var logger *octlog.LogConfig
func InitLog(level int) {
logger = octlog.InitLogConfig("user.log", level)
}
type User struct {
Id string `json:"id"`
Name string `json:"name"`
State int `json:"state"` // 1 for enable,0 for disabled
Type int `json:"type"` // 1 for terminal user
Email string `json:"email"`
PhoneNumber string `json:"phoneNumber"`
Password string `json:"passord"`
LastLogin int64 `json:"lastLogin"`
CreateTime int64 `json:"createTime"`
LastSync int64 `json:"lastSync"`
Desc string `json:"desc"`
}
func (user *User) Brief() map[string]string {
return map[string]string{
"id": user.Id,
"name": user.Name,
}
}
func GetUserCount(db *octmysql.OctMysql) int {
count, _ := db.Count(config.TB_USER, "")
return count
}
func (user *User) Add(db *octmysql.OctMysql) int {
sql := fmt.Sprintf("INSERT INTO %s (ID, U_Name, U_Type, "+
"U_Email, U_PhoneNumber, U_Password, U_CreateTime, "+
"U_Description) VALUES ('%s', '%s', '%d', '%s', '%s', "+
"'%s', '%d', '%s')",
config.TB_USER,
user.Id, user.Name, user.Type,
user.Email, user.PhoneNumber, user.Password,
user.CreateTime, user.Desc)
_, err := db.Exec(sql)
if err != nil {
logger.Errorf("insert db error %s", sql)
return merrors.ERR_DB_ERR
}
return 0
}
func (user *User) Delete(db *octmysql.OctMysql) int {
sql := fmt.Sprintf("DELETE FROM %s WHERE ID='%s'", config.TB_USER, user.Id)
_, err := db.Exec(sql)
if err != nil {
logger.Errorf("delete user %s error", user.Id)
return merrors.ERR_DB_ERR
}
octlog.Debug(sql)
return 0
}
func (user *User) UpdateLogin(db *octmysql.OctMysql) int {
sql := fmt.Sprintf("UPDATE %s SET U_LastLogin='%d' WHERE ID='%s';",
config.TB_USER, int64(time.Now().Unix()), user.Id)
_, err := db.Exec(sql)
if err != nil {
logger.Errorf("update Login %s error %s",
user.Name, sql)
return merrors.ERR_DB_ERR
}
return 0
}
func (user *User) UpdateSyncTime(db *octmysql.OctMysql) int {
sql := fmt.Sprintf("UPDATE %s SET U_LastSync='%d' WHERE ID='%s';",
config.TB_USER, int64(time.Now().Unix()), user.Id)
_, err := db.Exec(sql)
if err != nil {
logger.Errorf("update last sync %s error %s",
user.Name, sql)
return merrors.ERR_DB_ERR
}
return 0
}
func (user *User) Login(db *octmysql.OctMysql,
password string) *session.Session {
var uid string
encPass := account.GetEncPassword(password)
sql := fmt.Sprintf("SELECT ID FROM %s WHERE U_Name='%s' AND U_Password='%s';",
config.TB_USER, user.Name, encPass)
row := db.QueryRow(sql)
err := row.Scan(&uid)
if err != nil {
logger.Errorf("Login user %s error %s", user.Name, err.Error())
return nil
}
user.UpdateLogin(db)
return session.NewSession(db, user.Id, user.Name, account.USER_TYPE_USER)
}
func FindUserByName(db *octmysql.OctMysql, name string) *User {
row := db.QueryRow("SELECT ID,U_Name,U_State,U_Type,U_Email,U_PhoneNumber,"+
"U_Description,U_CreateTime,U_LastLogin,U_LastSync "+
"FROM tb_user WHERE U_Name = ? LIMIT 1", name)
user := new(User)
err := row.Scan(&user.Id, &user.Name, &user.State,
&user.Type, &user.Email, &user.PhoneNumber, &user.Desc,
&user.CreateTime, &user.LastLogin, &user.LastSync)
if err != nil {
logger.Errorf("Find account %s error %s", name, err.Error())
return nil
}
octlog.Debug("id %s, name :%s", user.Id, user.Name)
return user
}
func FindUser(db *octmysql.OctMysql, id string) *User {
row := db.QueryRow("SELECT ID,U_Name,U_State,U_Type,U_Email,U_PhoneNumber,"+
"U_Description,U_CreateTime,U_LastLogin,U_LastSync "+
"FROM tb_user WHERE ID = ? LIMIT 1", id)
user := new(User)
err := row.Scan(&user.Id, &user.Name, &user.State,
&user.Type, &user.Email, &user.PhoneNumber, &user.Desc,
&user.CreateTime, &user.LastLogin, &user.LastSync)
if err != nil {
logger.Errorf("Find account %s error %s", id, err.Error())
return nil
}
octlog.Debug("id %s, name :%s", user.Id, user.Name)
return user
}
|
package main
import (
"encoding/hex"
"fmt"
"io/ioutil"
"os"
shellcode "github.com/brimstone/go-shellcode"
)
// This program runs the shellcode from: https://www.exploit-db.com/exploits/40245/
//
// As the shellcode is 32 bit, this must also be compiled as a 32 bit go application
// via "set GOARCH=386"
func main() {
if len(os.Args) != 2 {
fmt.Printf("Must have shellcode of file\n")
os.Exit(1)
}
// First, try to read the arg as a file
sc, err := ioutil.ReadFile(os.Args[1])
if os.IsNotExist(err) {
// If that fails, try to interpret the arg as hex encoded
sc, err = hex.DecodeString(os.Args[1])
if err != nil {
fmt.Printf("Error decoding arg 1: %s\n", err)
os.Exit(1)
}
}
shellcode.Run(sc)
}
|
package testing
import (
"fmt"
"github.com/gorilla/mux"
qupHttp "github.com/queueup-dev/qup-io/v2/http"
types "github.com/queueup-dev/qup-types"
"io/ioutil"
"log"
"net/http"
"sync"
"testing"
"time"
)
const (
inputTypeRequestBody = "REQUEST_BODY"
inputTypeRequestHeader = "REQUEST_HEADER"
)
type Logger interface {
Print(...interface{})
}
type StdLogger int
func (l StdLogger) Print(v ...interface{}) {
log.Print(v...)
}
type HttpMockBuilder struct {
mocks []*HttpMock
}
func (m *HttpMockBuilder) When(uri string, method string) *HttpMock {
newMock := &HttpMock{
routeUri: uri,
routeMethod: method,
response: nil,
}
m.mocks = append(m.mocks, newMock)
return newMock
}
type HttpMock struct {
routeUri string
routeMethod string
response *HttpMockResponse
}
func (h *HttpMock) RespondWith(body types.PayloadWriter, headers qupHttp.Headers, statusCode int) {
response := &HttpMockResponse{
headers: headers,
body: body,
statusCode: statusCode,
}
h.response = response
}
type HttpMockResponse struct {
headers qupHttp.Headers
body types.PayloadWriter
statusCode int
}
type HttpAssertBuilder struct {
httpAssertions []*HttpAssertion
t *testing.T
log Logger
}
func (builder *HttpAssertBuilder) That(uri string, method string) *HttpAssertion {
assertion := HttpAssertion{
routeUri: uri,
routeMethod: method,
assertion: &AssertInstance{},
}
builder.httpAssertions = append(builder.httpAssertions, &assertion)
assertInstance := builder.httpAssertions[len(builder.httpAssertions)-1]
return assertInstance
}
func (builder *HttpAssertBuilder) execute(input interface{}) bool {
for _, httpAssertion := range builder.httpAssertions {
if !httpAssertion.assertion.Execute(input) {
builder.log.Print("failed assertion")
builder.t.Fail()
}
}
return true
}
type HttpAssertion struct {
routeUri string
routeMethod string
inputValue string
inputFilter string
assertion *AssertInstance
}
func (h *HttpAssertion) RequestBody() *AssertInstance {
h.inputValue = inputTypeRequestBody
return h.assertion
}
func (h *HttpAssertion) RequestHeader(header string) *AssertInstance {
h.inputValue = inputTypeRequestHeader
h.inputFilter = header
return h.assertion
}
type MockAPI struct {
router *mux.Router
logger Logger
waitGroup *sync.WaitGroup
assertBuilder HttpAssertBuilder
mockBuilder HttpMockBuilder
}
func (api *MockAPI) Assert() *HttpAssertBuilder {
return &api.assertBuilder
}
func (api *MockAPI) Mock() *HttpMockBuilder {
return &api.mockBuilder
}
func (api *MockAPI) composeAssertion(assertion *HttpAssertion) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
if assertion.routeMethod != r.Method {
// not for this method, just return
return
}
switch assertion.inputValue {
case inputTypeRequestBody:
read, _ := ioutil.ReadAll(r.Body)
if !assertion.assertion.Execute(string(read)) {
api.assertBuilder.t.Fail()
}
case inputTypeRequestHeader:
header := r.Header[assertion.inputFilter]
if header == nil {
api.assertBuilder.t.Fail()
api.logger.Print(fmt.Sprintf("Unable to find header %s in the request", assertion.inputFilter))
return
}
if !assertion.assertion.Execute(header[0]) {
api.assertBuilder.t.Fail()
}
}
}
}
func (api *MockAPI) composeMock(mock *HttpMock) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
if mock.routeMethod != r.Method {
// not for this method, just return
return
}
for key, val := range mock.response.headers {
w.Header().Add(key, val)
}
w.WriteHeader(mock.response.statusCode)
response, _ := mock.response.body.Marshal()
w.Write(response.([]byte))
}
}
func (api *MockAPI) compose(callbacks []func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
api.waitGroup.Add(1)
return func(w http.ResponseWriter, r *http.Request) {
for _, callback := range callbacks {
callback(w, r)
}
api.waitGroup.Done()
}
}
func (api *MockAPI) Listen(address string) error {
r := mux.NewRouter()
routes := map[string][]func(http.ResponseWriter, *http.Request){}
for _, httpAssertion := range api.assertBuilder.httpAssertions {
routes[httpAssertion.routeUri] = append(routes[httpAssertion.routeUri], api.composeAssertion(httpAssertion))
}
for _, httpMock := range api.mockBuilder.mocks {
routes[httpMock.routeUri] = append(routes[httpMock.routeUri], api.composeMock(httpMock))
}
for route, callbacks := range routes {
r.HandleFunc(route, api.compose(callbacks))
}
http.Handle("/", r)
srv := &http.Server{
Handler: r,
Addr: address,
// Good practice: enforce timeouts for servers you create!
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
return srv.ListenAndServe()
}
func NewMockApi(t *testing.T, l Logger, wg *sync.WaitGroup) MockAPI {
return MockAPI{
logger: l,
waitGroup: wg,
assertBuilder: HttpAssertBuilder{
log: l,
t: t,
httpAssertions: []*HttpAssertion{},
},
mockBuilder: HttpMockBuilder{
mocks: []*HttpMock{},
},
}
}
|
package main
import (
"bytes"
"net"
"time"
"fmt"
"github.com/nkbai/goice/ice"
"github.com/nkbai/log"
)
const (
typHost = 1
typStun = 2
typTurn = 3
)
type icecb struct {
data chan []byte
iceresult chan error
name string
}
func newicecb(name string) *icecb {
return &icecb{
name: name,
data: make(chan []byte, 1),
iceresult: make(chan error, 1),
}
}
func (c *icecb) OnReceiveData(data []byte, from net.Addr) {
c.data <- data
}
/*
Callback to report status of various ICE operations.
*/
func (c *icecb) OnIceComplete(result error) {
c.iceresult <- result
log.Trace(fmt.Sprintf("%s negotiation complete", c.name))
}
func setupIcePair(typ int) (s1, s2 *ice.StreamTransport, err error) {
var cfg *ice.TransportConfig
switch typ {
case typHost:
cfg = ice.NewTransportConfigHostonly()
case typStun:
cfg = ice.NewTransportConfigWithStun("182.254.155.208:3478")
case typTurn:
cfg = ice.NewTransportConfigWithTurn("182.254.155.208:3478", "bai", "bai")
}
s1, err = ice.NewIceStreamTransport(cfg, "s1")
if err != nil {
return
}
s2, err = ice.NewIceStreamTransport(cfg, "s2")
log.Trace("-----------------------------------------")
return
}
func main() {
s1, s2, err := setupIcePair(typTurn)
if err != nil {
log.Crit(err.Error())
return
}
cb1 := newicecb("s1")
cb2 := newicecb("s2")
s1.SetCallBack(cb1)
s2.SetCallBack(cb2)
err = s1.InitIce(ice.SessionRoleControlling)
if err != nil {
log.Crit(err.Error())
return
}
err = s2.InitIce(ice.SessionRoleControlled)
if err != nil {
log.Crit(err.Error())
return
}
rsdp, err := s2.EncodeSession()
if err != nil {
log.Crit(err.Error())
return
}
err = s1.StartNegotiation(rsdp)
if err != nil {
log.Crit(err.Error())
return
}
lsdp, err := s1.EncodeSession()
if err != nil {
log.Crit(err.Error())
return
}
err = s2.StartNegotiation(lsdp)
if err != nil {
log.Crit(err.Error())
return
}
select {
case <-time.After(10 * time.Second):
log.Error("s1 negotiation timeout")
return
case err = <-cb1.iceresult:
if err != nil {
log.Error(fmt.Sprintf("s1 negotiation failed %s", err))
return
}
}
select {
case <-time.After(10 * time.Second):
log.Error("s2 negotiation timeout")
return
case err = <-cb2.iceresult:
if err != nil {
log.Error(fmt.Sprintf("s2 negotiation failed %s", err))
return
}
}
s1data := []byte("hello,s2")
s2data := []byte("hello,s1")
err = s1.SendData(s1data)
if err != nil {
log.Crit(err.Error())
return
}
err = s2.SendData(s2data)
if err != nil {
log.Crit(err.Error())
return
}
select {
case <-time.After(10 * time.Second):
log.Error("s2 recevied timeout")
return
case data := <-cb2.data:
if !bytes.Equal(data, s1data) {
log.Error(fmt.Sprintf("s2 recevied error ,got %s", string(data)))
return
}
}
select {
case <-time.After(10 * time.Second):
log.Error("s1 recevied timeout")
return
case data := <-cb1.data:
if !bytes.Equal(data, s2data) {
log.Error(fmt.Sprintf("s1 recevied error ,got %s", string(data)))
return
}
}
log.Info("ice complete...")
}
|
package main
import (
"fmt"
"github.com/Cloud-Foundations/Dominator/lib/log"
"github.com/Cloud-Foundations/Dominator/lib/srpc"
"github.com/Cloud-Foundations/Dominator/proto/sub"
"github.com/Cloud-Foundations/Dominator/sub/client"
)
func deleteSubcommand(args []string, logger log.DebugLogger) error {
srpcClient := getSubClient(logger)
defer srpcClient.Close()
if err := deletePaths(srpcClient, args); err != nil {
return fmt.Errorf("error deleting: %s", err)
}
return nil
}
func deletePaths(srpcClient *srpc.Client, pathnames []string) error {
return client.CallUpdate(srpcClient, sub.UpdateRequest{
PathsToDelete: pathnames,
Wait: true},
&sub.UpdateResponse{})
}
|
package remark
import (
"time"
"github.com/go-jar/goerror"
"github.com/go-jar/gohttp/query"
"blog/entity"
"blog/errno"
)
func (rc *RemarkController) CreateAction(context *RemarkContext) {
remarkEntity, e := rc.parseCreateActionParams(context)
if e != nil {
context.ApiData.Err = e
return
}
ids, err := context.remarkSvc.Insert(remarkEntity)
if err != nil {
context.ApiData.Err = goerror.New(errno.ESysMysqlError, err.Error())
return
}
context.ApiData.Data = map[string]interface{}{
"RemarkId": ids[0],
"RequestId": context.TraceId,
}
}
func (rc *RemarkController) parseCreateActionParams(context *RemarkContext) (*entity.RemarkEntity, *goerror.Error) {
remarkEntity := &entity.RemarkEntity{
CreatedTime: time.Now(),
UpdatedTime: time.Now(),
}
qs := query.NewQuerySet()
qs.Int64Var(&remarkEntity.ArticleId, "ArticleId", true, errno.ECommonInvalidArg, "invalid ArticleId", query.CheckInt64GreaterEqual0)
qs.StringVar(&remarkEntity.Nickname, "Nickname", true, errno.ECommonInvalidArg, "invalid Nickname", query.CheckStringNotEmpty)
qs.StringVar(&remarkEntity.Content, "Content", true, errno.ECommonInvalidArg, "invalid Content", query.CheckStringNotEmpty)
qs.Int64Var(&remarkEntity.InitRemarkId, "InitRemarkId", false, errno.ECommonInvalidArg, "invalid InitRemarkId", query.CheckInt64GreaterEqual0)
qs.StringVar(&remarkEntity.NicknameReplied, "NicknameReplied", false, errno.ECommonInvalidArg, "invalid NicknameReplied", query.CheckStringNotEmpty)
if err := qs.Parse(context.QueryValues); err != nil {
context.ErrorLog([]byte("RemarkController.parseCreateActionParams"), []byte(err.Error()))
return nil, err
}
return remarkEntity, nil
}
|
// Copyright (C) 2021 Cisco Systems Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package vpplink
import (
"fmt"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/generated/bindings/pbl"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/types"
)
func (v *VppLink) AddPblClient(pblClient *types.PblClient) (id uint32, err error) {
client := pbl.NewServiceClient(v.GetConnection())
portRanges := make([]pbl.PblPortRange, 0, len(pblClient.PortRanges))
for _, r := range pblClient.PortRanges {
portRanges = append(portRanges, pbl.PblPortRange{
Start: r.Start,
End: r.End,
Iproto: types.ToVppIPProto(r.Proto),
})
}
response, err := client.PblClientUpdate(v.GetContext(), &pbl.PblClientUpdate{
Client: pbl.PblClient{
ID: pblClient.ID,
TableID: pblClient.TableId,
Addr: types.ToVppAddress(pblClient.Addr),
Paths: pblClient.Path.ToFibPath(false),
Flags: 0,
PortRanges: portRanges,
},
})
if err != nil {
return 0, fmt.Errorf("failed to update Pbl Client: %w", err)
}
pblClient.ID = response.ID
return response.ID, nil
}
func (v *VppLink) DelPblClient(id uint32) error {
client := pbl.NewServiceClient(v.GetConnection())
_, err := client.PblClientDel(v.GetContext(), &pbl.PblClientDel{
ID: id,
})
if err != nil {
return fmt.Errorf("failed to delete Pbl Client: %w", err)
}
return nil
}
|
package user_test
import (
"bytes"
"encoding/json"
"errors"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/golovers/gotest/user"
gomock "github.com/golang/mock/gomock"
)
func TestHandleRegister(t *testing.T) {
srv := NewMockservice(gomock.NewController(t))
handler := user.NewHandler(srv)
user1 := &user.User{
ID: "123",
Name: "jack",
}
serverErr := errors.New("internal server error")
type expect struct {
code int
body string
}
testCases := []struct {
name string
tearDown func()
input *user.User
expect expect
}{
{
name: "register success",
tearDown: func() {
srv.EXPECT().Register(gomock.Any(), user1).Times(1).Return(nil)
},
input: user1,
expect: expect{
code: http.StatusOK,
body: `{"id":"123","name":"jack"}`,
},
},
{
name: "register failed",
tearDown: func() {
srv.EXPECT().Register(gomock.Any(), user1).Times(1).Return(serverErr)
},
input: user1,
expect: expect{
code: http.StatusInternalServerError,
body: serverErr.Error(),
},
},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
test.tearDown()
w := httptest.NewRecorder()
var inputBody bytes.Buffer
if err := json.NewEncoder(&inputBody).Encode(test.input); err != nil {
t.Error(err)
}
r, err := http.NewRequest(http.MethodPost, "", &inputBody)
if err != nil {
t.Error(err)
}
handler.Register(w, r)
if w.Code != test.expect.code {
t.Errorf("got code=%d, wants code=%d", w.Code, test.expect.code)
}
gotBody := strings.TrimSpace(w.Body.String())
if gotBody != test.expect.body {
t.Errorf("got body=%s, wants body=%s", gotBody, test.expect.body)
}
})
}
}
|
package frontend
//go:generate esc -o static.go -pkg frontend -ignore=(.go|.swp) -modtime 0 .
|
/*
Copyright 2014 GoPivotal (UK) Limited.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package kernel encapsulates the operating system features required
to create a container.
*/
package kernel
// ResourceController provides containment for a specific type of resource.
type ResourceController interface {
Init(rCtx ResourceContext) error
}
// ResourceContext provides configuration for resource controllers.
type ResourceContext interface {
// GetRootFS returns the path of the root file system. A root file system is an
// arbitrary filesystem directory.
GetRootFS() string
}
type resourceContext struct {
rootfs string
}
func (rCtx *resourceContext) GetRootFS() string {
return rCtx.rootfs
}
// CreateResourceContext creates a ResourceContext with the given root file system.
func CreateResourceContext(rootfs string) *resourceContext {
return &resourceContext{rootfs}
}
|
package main
import (
_ "hello_api/routers"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
"fmt"
_ "github.com/go-sql-driver/mysql"
"hello_api/kafka"
)
func init(){
beego.SetLogger("file", `{"filename":"logs/test.log"}`)
logLevel, err:= beego.AppConfig.Int("logLevel")
if nil != err{
logLevel = beego.LevelDebug
}
beego.SetLevel(logLevel)
dataSource := fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8&loc=Local",
beego.AppConfig.String("DBUserName"), beego.AppConfig.String("DBPassword"),
beego.AppConfig.String("DBAddr"), beego.AppConfig.String("DBName"))
maxIdle := 30
maxConn := 30
orm.RegisterDataBase("default","mysql",dataSource,maxIdle, maxConn)
orm.RunSyncdb("default",false,true)
}
func main() {
if beego.BConfig.RunMode == "dev" {
beego.BConfig.WebConfig.DirectoryIndex = true
beego.BConfig.WebConfig.StaticDir["/swagger"] = "swagger"
}
//test()
//models.AddApp("kevin","bbbbbbbbbb",1)
//models.DeleteApp("kevin")
//models.UpdateApp("kevin","2222222222")
beego.Run()
kafka.Start()
}
|
package main
import (
help "hello/Helper"
"hello/ProdService"
"github.com/gin-gonic/gin"
"github.com/micro/go-micro/registry"
"github.com/micro/go-micro/web"
"github.com/micro/go-plugins/registry/consul"
)
func main() {
//consul服务注册
consulreg := consul.NewRegistry(
registry.Addrs("192.168.238.131:8500"), //consul服务部署的地址
)
//gin路由
ginRouter := gin.Default()
vlGroup := ginRouter.Group("/v1")
{
vlGroup.Handle("POST", "/prods", func(context *gin.Context) {
var pr help.ProdsRequest
err := context.Bind(&pr)
if err != nil || pr.Size <= 0 {
pr = help.ProdsRequest{
2,
}
}
context.JSON(200,
gin.H{
"data": ProdService.NewProdList(pr.Size)})
})
}
//将gin整合到go-micro
server := web.NewService(
web.Name("prodservice"),
//web.Address(":8001"),
web.Handler(ginRouter),
web.Registry(consulreg), //注册服务到consul平台
)
server.Init()
server.Run()
}
//用docker启动一个服务【最简单的容器启动】
// 1.拉取镜像(这里使用单机)
// docker pull consul
// 2.启动一个服务端(为了演示我们只启动一个)
// sudo docker run -d --name=cs -p 8500:8500 consul agent -server -bootstrap -ui -client 0.0.0.0
// -d:后台执行
// -server:代表以服务端的方式启动
// -bootstrap:指定自己为leader,而不需要选举
// -ui:启动一个内置管理web界面
// -client:指定客户端可以访问的IP,设置为0.0.0.0则任意访问,否则默认本机可以访问
// (8500是后台UI端口,别忘了sudo iptable -l INPUT -p tcp --dport 8500 -j ACCEPT)
// 3.接下来就可以访问http://ip:8500/查看后台情况
|
package header
import (
"strconv"
"strings"
"time"
"github.com/kudrykv/latex-yearly-planner/app/components/calendar"
"github.com/kudrykv/latex-yearly-planner/app/components/hyper"
)
type Header struct {
Left Items
Right Items
}
type Items []Item
type Item interface {
Display() string
}
func (i Items) ColSetup(left bool) string {
if left {
return "|" + strings.Join(strings.Split(strings.Repeat("l", len(i)), ""), "|")
}
return strings.Join(strings.Split(strings.Repeat("r", len(i)), ""), "|") + "@{}"
}
func (i Items) Row() string {
out := make([]string, 0, len(i))
for _, item := range i {
out = append(out, item.Display())
}
return strings.Join(out, " & ")
}
type TextItem struct {
Name string
bold bool
ref bool
refPrefix string
refText string
}
func NewTextItem(name string) TextItem {
return TextItem{
Name: name,
}
}
func (t TextItem) Display() string {
var (
out string
ref string
)
if t.bold {
out = "\\textbf{" + t.Name + "}"
} else {
out = t.Name
}
if len(t.refText) > 0 {
ref = t.refText
} else {
ref = t.refPrefix + t.Name
}
if t.ref {
return hyper.Target(ref, out)
}
return hyper.Link(ref, out)
}
func (t TextItem) Ref(ref bool) TextItem {
t.ref = ref
return t
}
func (t TextItem) Bold(f bool) TextItem {
t.bold = f
return t
}
func (t TextItem) RefPrefix(refPrefix string) TextItem {
t.refPrefix = refPrefix
return t
}
func (t TextItem) RefText(refText string) TextItem {
t.refText = refText
return t
}
type ItemsGroup struct {
Items Items
delim string
}
func NewItemsGroup(items ...Item) ItemsGroup {
return ItemsGroup{
Items: items,
delim: "\\quad{}",
}
}
func (i ItemsGroup) Display() string {
list := make([]string, 0, len(i.Items))
for _, item := range i.Items {
list = append(list, item.Display())
}
return strings.Join(list, i.delim)
}
func (i ItemsGroup) Delim(delim string) ItemsGroup {
i.delim = delim
return i
}
type IntItem struct {
Val int
ref bool
}
func (i IntItem) Display() string {
var out string
s := strconv.Itoa(i.Val)
if i.ref {
out = hyper.Target(s, s)
} else {
out = hyper.Link(s, s)
}
return out
}
func (i IntItem) Ref() IntItem {
i.ref = true
return i
}
func NewIntItem(val int) IntItem {
return IntItem{Val: val}
}
type MonthItem struct {
Val time.Month
ref bool
shorten bool
}
func (m MonthItem) Display() string {
ref := m.Val.String()
text := ref
if m.shorten {
text = text[:3]
}
if m.ref {
return hyper.Target(ref, text)
}
return hyper.Link(ref, text)
}
func (m MonthItem) Ref() MonthItem {
m.ref = true
return m
}
func (m MonthItem) Shorten(f bool) MonthItem {
m.shorten = f
return m
}
func NewMonthItem(m time.Month) MonthItem {
return MonthItem{Val: m}
}
type TimeItem struct {
Val calendar.DayTime
Layout string
ref bool
refPrefix string
}
func (t TimeItem) Display() string {
if t.ref {
return hyper.Target(t.refPrefix+t.Val.Format(time.RFC3339), t.Val.Format(t.Layout))
}
return hyper.Link(t.refPrefix+t.Val.Format(time.RFC3339), t.Val.Format(t.Layout))
}
func (t TimeItem) SetLayout(layout string) TimeItem {
t.Layout = layout
return t
}
func (t TimeItem) Ref() TimeItem {
t.ref = true
return t
}
func (t TimeItem) RefPrefix(refPrefix string) TimeItem {
t.refPrefix = refPrefix
return t
}
func NewTimeItem(val calendar.DayTime) TimeItem {
return TimeItem{
Val: val,
Layout: time.RFC3339,
}
}
|
package ksqlparser
import "strings"
type caseWhenExpression struct {
When []*Condition
Then Expression
Else Expression
}
func (b *caseWhenExpression) String() string {
sb := []string{ReservedCaseWhen}
for _, w := range b.When {
sb = append(sb, w.String())
}
sb = append(sb, ReservedThen, b.Then.String())
if b.Else != nil {
sb = append(sb, ReservedElse, b.Else.String())
}
sb = append(sb, ReservedEnd)
return strings.Join(sb, " ")
}
|
package main
#Importing required modules
import (
"os"
"github.com/shomali11/slacker"
)
#Request writer
func handle(request *slacker.Request, response slacker.ResponseWriter) {
response.Reply("Hey!")
}
#Connecting to slack and providing a response
func main() {
bot := slacker.NewClient(os.Getenv("API_TOKEN"))
bot.Command("hello", "Say hello", handle)
err := bot.Listen()
if err != nil {
panic(err)
}
}
|
package main
import (
"fmt"
"log"
"net/http"
"path"
"strings"
)
type server struct {
kvs KVS
hasher Hasher
servStats *serverStats
}
// newServer creates a new server, with the specified hasher and key-value store.
func newServer(kvs KVS, hasher Hasher) *server {
return &server{
kvs: kvs,
hasher: hasher,
// create a redirect counter
servStats: newServerStats(),
}
}
// shorten is an HTTP handler that shortens the URL string specified as 'url' param.
func (s *server) shorten(w http.ResponseWriter, r *http.Request) {
// extract original URL.
org := r.URL.Query().Get("url")
if org == "" {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "empty url field")
return
}
// hash original url.
short := s.hasher.Hash(org)
log.Printf("/shorten org=%s short=%s", org, short)
// store short url/original url pair in the key-value store.
err := s.kvs.Store(short, org)
if err != nil {
// handle error
log.Printf("/shorten error: %s", err)
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "interval server error: %v", err)
return
}
// print hostname and shortened url in the response.
fullshort := path.Join(r.Host, short)
fmt.Fprintf(w, "shortened URL: %s\n", fullshort)
}
// redirect is an HTTP handler that redirect a known short URL to its original URL.
func (s *server) redirect(w http.ResponseWriter, r *http.Request) {
// consider we received a short url, extract it.
short := strings.Replace(r.URL.RequestURI(), "/", "", 1)
// load original url from the key-value store.
org, err := s.kvs.Load(short)
if err != nil {
log.Printf("/ error: loading short=%s", short)
w.WriteHeader(http.StatusNotFound)
fmt.Fprintf(w, "URL not found: %v", err)
return
}
// forge the redirection URL.
log.Printf("/ redirecting short=%s org=%s", short, org)
// increment redirect counter for that url
s.servStats.IncrRedirects(org)
http.Redirect(w, r, org, 307)
}
// stats is an HTTP handler printing current stats.
func (s *server) stats(w http.ResponseWriter, r *http.Request) {
// print the redirect counter into the response.
err := s.servStats.Show(w)
if err != nil {
log.Println("/stats: error showing stats:", err)
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "interval server error: %v", err)
}
}
|
// Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
package micropay
import (
"github.com/iotaledger/wasp/contracts"
"github.com/iotaledger/wasp/packages/coretypes/coreutil"
"github.com/iotaledger/wasp/packages/hashing"
"time"
)
const (
Name = "micropay"
description = "Micro payment PoC smart contract"
)
var (
Interface = &coreutil.ContractInterface{
Name: Name,
Description: description,
ProgramHash: hashing.HashStrings(Name),
}
)
func init() {
Interface.WithFunctions(initialize, []coreutil.ContractFunctionInterface{
coreutil.Func(FuncPublicKey, publicKey),
coreutil.Func(FuncAddWarrant, addWarrant),
coreutil.Func(FuncRevokeWarrant, revokeWarrant),
coreutil.Func(FuncCloseWarrant, closeWarrant),
coreutil.Func(FuncSettle, settle),
coreutil.ViewFunc(FuncGetChannelInfo, getWarrantInfo),
})
contracts.AddExampleProcessor(Interface)
}
const (
MinimumWarrantIotas = 500
FuncPublicKey = "publicKey"
FuncAddWarrant = "addWarrant"
FuncRevokeWarrant = "revokeWarrant"
FuncCloseWarrant = "closeWarrant"
FuncSettle = "settle"
FuncGetChannelInfo = "getWarrantInfo"
ParamPublicKey = "pk"
ParamPayerAddress = "pa"
ParamServiceAddress = "sa"
ParamPayments = "m"
ParamWarrant = "wa"
ParamRevoked = "re"
ParamLastOrd = "lo"
StateVarPublicKeys = "k"
StateVarLastOrdNum = "o"
WarrantRevokePeriod = 1 * time.Hour
)
|
/*
Copyright (c) 2019 VMware, Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package plugin
import (
"os"
"path/filepath"
"testing"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_AvailablePlugins(t *testing.T) {
tests := []struct {
homePath string
envVar string
}{
{
homePath: filepath.Join("/home", "user"),
envVar: "LISSIO_PLUGIN_PATH",
},
{
homePath: filepath.Join("/home", "xdg_config_path"),
envVar: "XDG_CONFIG_HOME",
},
}
for _, test := range tests {
defer os.Unsetenv(test.envVar)
fs := afero.NewMemMapFs()
c := &defaultConfig{
fs: fs,
homeFn: func() string {
return test.homePath
},
}
switch test.envVar {
case "LISSIO_PLUGIN_PATH":
customPath := "/example/test"
envPaths := customPath + ":/another/one"
os.Setenv(test.envVar, envPaths)
configPath := filepath.Join(test.homePath, ".config", configDir, "plugins")
err := fs.MkdirAll(configPath, 0700)
require.NoError(t, err, "unable to create test home directory")
if os.Getenv(test.envVar) != "" {
for _, path := range filepath.SplitList(envPaths) {
err := fs.MkdirAll(path, 0700)
require.NoError(t, err, "unable to create directory from environment variable")
}
}
stagePlugin := func(t *testing.T, path string, name string, mode os.FileMode) {
p := filepath.Join(path, name)
err = afero.WriteFile(fs, p, []byte("guts"), mode)
require.NoError(t, err)
}
stagePlugin(t, configPath, "z-plugin", 0755)
stagePlugin(t, configPath, "a-plugin", 0755)
stagePlugin(t, configPath, "not-a-plugin", 0600)
stagePlugin(t, customPath, "e-plugin", 0755)
got, err := AvailablePlugins(c)
require.NoError(t, err)
expected := []string{
"/example/test/e-plugin",
"/home/user/.config/lissio/plugins/a-plugin",
"/home/user/.config/lissio/plugins/z-plugin",
}
assert.Equal(t, expected, got)
case "XDG_CONFIG_HOME":
xdgPath := "/home/xdg_config_path"
os.Setenv(test.envVar, xdgPath)
configPath := filepath.Join(test.homePath, configDir, "plugins")
err := fs.MkdirAll(configPath, 0700)
require.NoError(t, err, "unable to create test home directory")
stagePlugin := func(t *testing.T, path string, name string, mode os.FileMode) {
p := filepath.Join(path, name)
err = afero.WriteFile(fs, p, []byte("guts"), mode)
require.NoError(t, err)
}
stagePlugin(t, configPath, "a-plugin", 0755)
got, err := AvailablePlugins(c)
require.NoError(t, err)
expected := []string{
"/home/xdg_config_path/lissio/plugins/a-plugin",
}
assert.Equal(t, expected, got)
}
}
}
func Test_AvailablePlugins_no_plugin_dir(t *testing.T) {
fs := afero.NewMemMapFs()
homePath := filepath.Join("/home", "user")
c := &defaultConfig{
fs: fs,
homeFn: func() string {
return homePath
},
}
got, err := AvailablePlugins(c)
require.NoError(t, err)
expected := []string(nil)
assert.Equal(t, expected, got)
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validator
import (
"context"
"testing"
"github.com/pingcap/tidb/parser/auth"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/stretchr/testify/require"
)
func TestValidateDictionaryPassword(t *testing.T) {
vars := variable.NewSessionVars(nil)
mock := variable.NewMockGlobalAccessor4Tests()
mock.SessionVars = vars
vars.GlobalVarsAccessor = mock
err := mock.SetGlobalSysVar(context.Background(), variable.ValidatePasswordDictionary, "abc;123;1234;5678;HIJK;中文测试;。,;!")
require.NoError(t, err)
testcases := []struct {
pwd string
result bool
}{
{"abcdefg", true},
{"abcd123efg", true},
{"abcd1234efg", false},
{"abcd12345efg", false},
{"abcd123efghij", true},
{"abcd123efghijk", false},
{"abcd123efghij中文测试", false},
{"abcd123。,;!", false},
}
for _, testcase := range testcases {
ok, err := ValidateDictionaryPassword(testcase.pwd, &vars.GlobalVarsAccessor)
require.NoError(t, err)
require.Equal(t, testcase.result, ok, testcase.pwd)
}
}
func TestValidateUserNameInPassword(t *testing.T) {
sessionVars := variable.NewSessionVars(nil)
sessionVars.User = &auth.UserIdentity{Username: "user", AuthUsername: "authuser"}
sessionVars.GlobalVarsAccessor = variable.NewMockGlobalAccessor4Tests()
testcases := []struct {
pwd string
warn string
}{
{"", ""},
{"user", "Password Contains User Name"},
{"authuser", "Password Contains User Name"},
{"resu000", "Password Contains Reversed User Name"},
{"resuhtua", "Password Contains Reversed User Name"},
{"User", ""},
{"authUser", ""},
{"Resu", ""},
{"Resuhtua", ""},
}
// Enable check_user_name
err := sessionVars.GlobalVarsAccessor.SetGlobalSysVar(context.Background(), variable.ValidatePasswordCheckUserName, "ON")
require.NoError(t, err)
for _, testcase := range testcases {
warn, err := ValidateUserNameInPassword(testcase.pwd, sessionVars)
require.NoError(t, err)
require.Equal(t, testcase.warn, warn, testcase.pwd)
}
// Disable check_user_name
err = sessionVars.GlobalVarsAccessor.SetGlobalSysVar(context.Background(), variable.ValidatePasswordCheckUserName, "OFF")
require.NoError(t, err)
for _, testcase := range testcases {
warn, err := ValidateUserNameInPassword(testcase.pwd, sessionVars)
require.NoError(t, err)
require.Equal(t, "", warn, testcase.pwd)
}
}
func TestValidatePasswordLowPolicy(t *testing.T) {
sessionVars := variable.NewSessionVars(nil)
sessionVars.GlobalVarsAccessor = variable.NewMockGlobalAccessor4Tests()
sessionVars.GlobalVarsAccessor.(*variable.MockGlobalAccessor).SessionVars = sessionVars
err := sessionVars.GlobalVarsAccessor.SetGlobalSysVar(context.Background(), variable.ValidatePasswordLength, "8")
require.NoError(t, err)
warn, err := ValidatePasswordLowPolicy("1234", &sessionVars.GlobalVarsAccessor)
require.NoError(t, err)
require.Equal(t, "Require Password Length: 8", warn)
warn, err = ValidatePasswordLowPolicy("12345678", &sessionVars.GlobalVarsAccessor)
require.NoError(t, err)
require.Equal(t, "", warn)
err = sessionVars.GlobalVarsAccessor.SetGlobalSysVar(context.Background(), variable.ValidatePasswordLength, "12")
require.NoError(t, err)
warn, err = ValidatePasswordLowPolicy("12345678", &sessionVars.GlobalVarsAccessor)
require.NoError(t, err)
require.Equal(t, "Require Password Length: 12", warn)
}
func TestValidatePasswordMediumPolicy(t *testing.T) {
sessionVars := variable.NewSessionVars(nil)
sessionVars.GlobalVarsAccessor = variable.NewMockGlobalAccessor4Tests()
sessionVars.GlobalVarsAccessor.(*variable.MockGlobalAccessor).SessionVars = sessionVars
err := sessionVars.GlobalVarsAccessor.SetGlobalSysVar(context.Background(), variable.ValidatePasswordMixedCaseCount, "1")
require.NoError(t, err)
err = sessionVars.GlobalVarsAccessor.SetGlobalSysVar(context.Background(), variable.ValidatePasswordSpecialCharCount, "2")
require.NoError(t, err)
err = sessionVars.GlobalVarsAccessor.SetGlobalSysVar(context.Background(), variable.ValidatePasswordNumberCount, "3")
require.NoError(t, err)
warn, err := ValidatePasswordMediumPolicy("!@A123", &sessionVars.GlobalVarsAccessor)
require.NoError(t, err)
require.Equal(t, "Require Password Lowercase Count: 1", warn)
warn, err = ValidatePasswordMediumPolicy("!@a123", &sessionVars.GlobalVarsAccessor)
require.NoError(t, err)
require.Equal(t, "Require Password Uppercase Count: 1", warn)
warn, err = ValidatePasswordMediumPolicy("!@Aa12", &sessionVars.GlobalVarsAccessor)
require.NoError(t, err)
require.Equal(t, "Require Password Digit Count: 3", warn)
warn, err = ValidatePasswordMediumPolicy("!Aa123", &sessionVars.GlobalVarsAccessor)
require.NoError(t, err)
require.Equal(t, "Require Password Non-alphanumeric Count: 2", warn)
warn, err = ValidatePasswordMediumPolicy("!@Aa123", &sessionVars.GlobalVarsAccessor)
require.NoError(t, err)
require.Equal(t, "", warn)
}
func TestValidatePassword(t *testing.T) {
sessionVars := variable.NewSessionVars(nil)
sessionVars.GlobalVarsAccessor = variable.NewMockGlobalAccessor4Tests()
sessionVars.GlobalVarsAccessor.(*variable.MockGlobalAccessor).SessionVars = sessionVars
sessionVars.User = &auth.UserIdentity{Username: "user", AuthUsername: "authuser"}
err := sessionVars.GlobalVarsAccessor.SetGlobalSysVar(context.Background(), variable.ValidatePasswordPolicy, "LOW")
require.NoError(t, err)
err = ValidatePassword(sessionVars, "1234")
require.Error(t, err)
err = ValidatePassword(sessionVars, "user1234")
require.Error(t, err)
err = ValidatePassword(sessionVars, "authuser1234")
require.Error(t, err)
err = ValidatePassword(sessionVars, "User1234")
require.NoError(t, err)
err = sessionVars.GlobalVarsAccessor.SetGlobalSysVar(context.Background(), variable.ValidatePasswordPolicy, "MEDIUM")
require.NoError(t, err)
err = ValidatePassword(sessionVars, "User1234")
require.Error(t, err)
err = ValidatePassword(sessionVars, "!User1234")
require.NoError(t, err)
err = ValidatePassword(sessionVars, "!User1234")
require.NoError(t, err)
err = sessionVars.GlobalVarsAccessor.SetGlobalSysVar(context.Background(), variable.ValidatePasswordPolicy, "STRONG")
require.NoError(t, err)
err = sessionVars.GlobalVarsAccessor.SetGlobalSysVar(context.Background(), variable.ValidatePasswordDictionary, "User")
require.NoError(t, err)
err = ValidatePassword(sessionVars, "!User1234")
require.Error(t, err)
err = ValidatePassword(sessionVars, "!ABcd1234")
require.NoError(t, err)
}
|
package server
import (
"bufio"
"fmt"
"net"
"strings"
"testing"
"github.com/danhale-git/craft/internal/mock"
)
func TestServer_Command(t *testing.T) {
mockClient := &mock.DockerContainerClient{}
s := &Server{ContainerAPIClient: mockClient}
conn, reader := net.Pipe()
mockClient.Conn = conn
mockClient.Reader = bufio.NewReader(reader)
c := []string{"arg1", "arg2", "arg3"}
go func() {
err := s.Command(c)
if err != nil {
t.Errorf("error returned for valid input")
}
}()
want := strings.Join(c, " ") + "\n"
got, err := mockClient.Reader.ReadString('\n')
if err != nil {
t.Errorf("error reading command input from mock client: %s", err)
}
if got != want {
t.Errorf("want: %s got: %s", want, got)
}
}
func TestServer_LogReader(t *testing.T) {
s := &Server{ContainerAPIClient: &mock.DockerContainerClient{}}
r, err := s.LogReader(20)
if err != nil {
t.Errorf("error returned for valid input: %s", err)
}
if r == nil {
t.Errorf("reader is nil")
}
_, err = r.ReadString('!')
if err != nil {
t.Errorf("error reading from valid reader: %s", err)
}
}
func TestContainerID(t *testing.T) {
s := &Server{ContainerAPIClient: &mock.DockerContainerClient{}}
for i := 1; i <= 3; i++ {
want := fmt.Sprintf("mc%d_ID", i)
got, err := containerID(fmt.Sprintf("mc%d", i), s)
if err != nil {
t.Errorf("error returned for valid input: %s", err)
}
if got != want {
t.Errorf("want: %s got: %s", want, got)
}
}
}
|
// Package memcache provides a client for the memcached cache server.
package memcache
import (
"bufio"
"bytes"
"errors"
"fmt"
"hash/crc32"
"net"
"strconv"
"sync"
"time"
)
const (
// DefaultTimeout is the default socket read/write timeout.
DefaultTimeout = 100 * time.Millisecond
// DefaultMaxIdleConns is the default maximum number of idle connections kept for any single address.
DefaultMaxIdleConns = 2
// arbitrary buffered channel size, for readability
buffered = 8
)
// ServerList is a simple ServerSelector. Its zero value is usable.
type ServerList struct {
mu sync.RWMutex
addrs []net.Addr
}
// ServerSelector is the interface that selects a memcache server as a function of the item's key.
// All ServerSelector implementations must be safe for concurrent use by multiple goroutines.
type ServerSelector interface {
// PickServer returns the server address that a given item should be shared onto.
PickServer(key string) (net.Addr, error)
Each(func(net.Addr) error) error
}
// conn is a connection to a server.
type conn struct {
nc net.Conn
rw *bufio.ReadWriter
addr net.Addr
c *Client
}
// Client is a memcache client. It is safe for unlocked use by multiple concurrent goroutines.
type Client struct {
// Timeout specifies the socket read/write timeout. If zero, DefaultTimeout is used.
Timeout time.Duration
// MaxIdleConns specifies the maximum number of idle connections that will be maintained per address. If less than one, DefaultMaxIdleConns will be used.
// Consider your expected traffic rates and latency carefully. This should be set to a number higher than your peak parallel requests.
MaxIdleConns int
Selector ServerSelector
Freeconn map[string][]*conn
lk sync.Mutex
}
// ConnectTimeoutError is the error type used when it takes
// too long to connect to the desired host. This level of
// detail can generally be ignored.
type ConnectTimeoutError struct {
Addr net.Addr
}
// staticAddr caches the Network() and String() values from any net.Addr.
type staticAddr struct {
ntw, str string
}
// keyBufPool returns []byte buffers for use by PickServer's call to crc32.ChecksumIEEE to avoid allocations.
// But doesn't avoid the copies, which at least are bounded in size and small
var keyBufPool = sync.Pool{
New: func() interface{} {
b := make([]byte, 256)
return &b
},
}
// PickServer is pick server
func (ss *ServerList) PickServer(key string) (net.Addr, error) {
ss.mu.RLock()
defer ss.mu.RUnlock()
if len(ss.addrs) == 0 {
return nil, ErrNoServers
}
if len(ss.addrs) == 1 {
return ss.addrs[0], nil
}
bufp := keyBufPool.Get().(*[]byte)
n := copy(*bufp, key)
cs := crc32.ChecksumIEEE((*bufp)[:n])
keyBufPool.Put(bufp)
return ss.addrs[cs%uint32(len(ss.addrs))], nil
}
// Each server is given equal weight. A server is given more weight if it's listed multiple times.
// Each iterates over each server calling the given function
func (ss *ServerList) Each(f func(net.Addr) error) error {
ss.mu.RLock()
defer ss.mu.RUnlock()
for _, a := range ss.addrs {
if err := f(a); nil != err {
return err
}
}
return nil
}
func (s *staticAddr) Network() string {
return s.ntw
}
func (s *staticAddr) String() string {
return s.str
}
func newStaticAddr(a net.Addr) net.Addr {
return &staticAddr{
ntw: a.Network(),
str: a.String(),
}
}
// SetServers changes a ServerList's set of servers at runtime and is safe for concurrent use by multiple goroutines.
// SetServers returns an error if any of the server names fail to resolve. No attempt is made to connect to the server.
// If any error is returned, no changes are made to the ServerList.
func (ss *ServerList) SetServers(servers ...string) error {
naddr := make([]net.Addr, len(servers))
for i, server := range servers {
tcpaddr, err := net.ResolveTCPAddr("tcp", server)
if err != nil {
return err
}
naddr[i] = newStaticAddr(tcpaddr)
}
ss.mu.Lock()
defer ss.mu.Unlock()
fmt.Println("ss.addrs", naddr)
ss.addrs = naddr
return nil
}
// NewFromSelector returns a new Client using the provided ServerSelector.
func NewFromSelector(ss ServerSelector) *Client {
return &Client{Selector: ss}
}
// New returns a memcache client using the provided server(s). If a server is listed multiple times, it gets a proportional amount of weight.
func New(server ...string) *Client {
ss := new(ServerList)
ss.SetServers(server...)
return NewFromSelector(ss)
}
func (c *Client) netTimeout() time.Duration {
if c.Timeout != 0 {
return c.Timeout
}
return DefaultTimeout
}
func (cn *conn) extendDeadline() {
cn.nc.SetDeadline(time.Now().Add(cn.c.netTimeout()))
}
func (c *Client) getFreeConn(addr net.Addr) (cn *conn, ok bool) {
c.lk.Lock()
defer c.lk.Unlock()
if c.Freeconn == nil {
return nil, false
}
freelist, ok := c.Freeconn[addr.String()]
if !ok || len(freelist) == 0 {
return nil, false
}
cn = freelist[len(freelist)-1]
c.Freeconn[addr.String()] = freelist[:len(freelist)-1]
return cn, true
}
func (c *Client) dial(addr net.Addr) (net.Conn, error) {
type connError struct {
cn net.Conn
err error
}
nc, err := net.DialTimeout(addr.Network(), addr.String(), c.netTimeout())
if err == nil {
return nc, nil
}
if ne, ok := err.(net.Error); ok && ne.Timeout() {
return nil, &ConnectTimeoutError{addr}
}
return nil, err
}
func (c *Client) getConn(addr net.Addr) (*conn, error) {
cn, ok := c.getFreeConn(addr)
if ok {
cn.extendDeadline()
return cn, nil
}
nc, err := c.dial(addr)
if err != nil {
return nil, err
}
cn = &conn{
nc: nc,
addr: addr,
rw: bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)),
c: c,
}
cn.extendDeadline()
return cn, nil
}
// resumableError returns true if err is only a protocol-level cache error.
// This is used to determine whether or not a server connection should be re-used or not.
// If an error occurs, by default we don't reuse the connection, unless it was just a cache error.
func resumableError(err error) bool {
switch err {
case ErrCacheMiss, ErrCASConflict, ErrNotStored, ErrMalformedKey:
return true
}
return false
}
func (c *Client) maxIdleConns() int {
if c.MaxIdleConns > 0 {
return c.MaxIdleConns
}
return DefaultMaxIdleConns
}
func (c *Client) putFreeConn(addr net.Addr, cn *conn) {
c.lk.Lock()
defer c.lk.Unlock()
if c.Freeconn == nil {
c.Freeconn = make(map[string][]*conn)
}
freelist := c.Freeconn[addr.String()]
if len(freelist) >= c.maxIdleConns() {
cn.nc.Close()
return
}
c.Freeconn[addr.String()] = append(freelist, cn)
}
// release returns this connection back to the client's free pool
func (cn *conn) release() {
cn.c.putFreeConn(cn.addr, cn)
}
// condRelease releases this connection if the error pointed to by err is nil (not an error) or is only a protocol level error (e.g. a cache miss).
// The purpose is to not recycle TCP connections that are bad.
func (cn *conn) condRelease(err *error) {
if *err == nil || resumableError(*err) {
cn.release()
} else {
cn.nc.Close()
}
}
// Replace writes the given item, but only if the server *does* already hold data for this key
func (c *Client) Replace(item *Item) error {
return c.onItem(item, (*Client).replace)
}
func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error {
return c.populateOne(rw, "replace", item)
}
// GetMulti is a batch version of Get.
// The returned map from keys to items may have fewer elements than the input slice, due to memcache cache misses. Each key must be at most 250 bytes in length.
// If no error is returned, the returned map will also be non-nil.
func (c *Client) GetMulti(keys []string) (map[string]*Item, error) {
var lk sync.Mutex
m := make(map[string]*Item)
addItemToMap := func(it *Item) {
lk.Lock()
defer lk.Unlock()
m[it.Key] = it
}
keyMap := make(map[net.Addr][]string)
for _, key := range keys {
if !legalKey(key) {
return nil, ErrMalformedKey
}
addr, err := c.Selector.PickServer(key)
if err != nil {
return nil, err
}
keyMap[addr] = append(keyMap[addr], key)
}
ch := make(chan error, buffered)
for addr, keys := range keyMap {
go func(addr net.Addr, keys []string) {
ch <- c.getFromAddr(addr, keys, addItemToMap)
}(addr, keys)
}
var err error
for range keyMap {
if ge := <-ch; ge != nil {
err = ge
}
}
return m, err
}
// Touch updates the expiry for the given key.
// The seconds parameter is either a Unix timestamp or, if seconds is less than 1 month, the number of seconds into the future at which time the item will expire.
// Zero means the item has no expiration time. ErrCacheMiss is returned if the key is not in the cache.
// The key must be at most 250 bytes in length.
func (c *Client) Touch(key string, seconds int32) (err error) {
return c.withKeyAddr(key, func(addr net.Addr) error {
return c.touchFromAddr(addr, []string{key}, seconds)
})
}
func (c *Client) touchFromAddr(addr net.Addr, keys []string, expiration int32) error {
return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
for _, key := range keys {
if _, err := fmt.Fprintf(rw, "touch %s %d\r\n", key, expiration); err != nil {
return err
}
if err := rw.Flush(); err != nil {
return err
}
line, err := rw.ReadSlice('\n')
if err != nil {
return err
}
switch {
case bytes.Equal(line, resultTouched):
break
case bytes.Equal(line, resultNotFound):
return ErrCacheMiss
default:
return fmt.Errorf("memcache: unexpected response line from touch: %q", string(line))
}
}
return nil
})
}
// Increment atomically increments key by delta.
// The return value is the new value after being incremented or an error.
// If the value didn't exist in memcached the error is ErrCacheMiss.
// The value in memcached must be an decimal number, or an error will be returned.
// On 64-bit overflow, the new value wraps around.
func (c *Client) Increment(key string, delta uint64) (newValue uint64, err error) {
return c.incrDecr("incr", key, delta)
}
func (c *Client) incrDecr(verb, key string, delta uint64) (uint64, error) {
var val uint64
err := c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
line, err := writeReadLine(rw, "%s %s %d\r\n", verb, key, delta)
if err != nil {
return err
}
switch {
case bytes.Equal(line, resultNotFound):
return ErrCacheMiss
case bytes.HasPrefix(line, resultClientErrorPrefix):
errMsg := line[len(resultClientErrorPrefix) : len(line)-2]
return errors.New("memcache: client error: " + string(errMsg))
}
val, err = strconv.ParseUint(string(line[:len(line)-2]), 10, 64)
if err != nil {
return err
}
return nil
})
return val, err
}
// Decrement atomically decrements key by delta.
// The return value is the new value after being decremented or an error.
// If the value didn't exist in memcached the error is ErrCacheMiss.
// The value in memcached must be an decimal number, or an error will be returned.
// On underflow, the new value is capped at zero and does not wrap around.
func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error) {
return c.incrDecr("decr", key, delta)
}
// Delete deletes the item with the provided key.
// The error ErrCacheMiss is returned if the item didn't already exist in the cache.
func (c *Client) Delete(key string) error {
return c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
return writeExpectf(rw, resultDeleted, "delete %s\r\n", key)
})
}
// DeleteAll deletes all items in the cache.
func (c *Client) DeleteAll() error {
return c.withKeyRw("", func(rw *bufio.ReadWriter) error {
return writeExpectf(rw, resultDeleted, "flush_all\r\n")
})
}
func (c *Client) withKeyRw(key string, fn func(*bufio.ReadWriter) error) error {
return c.withKeyAddr(key, func(addr net.Addr) error {
return c.withAddrRw(addr, fn)
})
}
func writeExpectf(rw *bufio.ReadWriter, expect []byte, format string, args ...interface{}) error {
line, err := writeReadLine(rw, format, args...)
if err != nil {
return err
}
switch {
case bytes.Equal(line, resultOK):
return nil
case bytes.Equal(line, expect):
return nil
case bytes.Equal(line, resultNotStored):
return ErrNotStored
case bytes.Equal(line, resultExists):
return ErrCASConflict
case bytes.Equal(line, resultNotFound):
return ErrCacheMiss
}
return fmt.Errorf("memcache: unexpected response line: %q", string(line))
}
// FlushAll is flush all.
func (c *Client) FlushAll() error {
return c.Selector.Each(c.flushAllFromAddr)
}
// flushAllFromAddr send the flush_all command to the given addr
func (c *Client) flushAllFromAddr(addr net.Addr) error {
return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
if _, err := fmt.Fprintf(rw, "flush_all\r\n"); err != nil {
return err
}
if err := rw.Flush(); err != nil {
return err
}
line, err := rw.ReadSlice('\n')
if err != nil {
return err
}
switch {
case bytes.Equal(line, resultOk):
break
default:
return fmt.Errorf("memcache: unexpected response line from flush_all: %q", string(line))
}
return nil
})
}
func legalKey(key string) bool {
if len(key) > 250 {
return false
}
for i := 0; i < len(key); i++ {
if key[i] <= ' ' || key[i] == 0x7f {
return false
}
}
return true
}
// CompareAndSwap writes the given item that was previously returned by Get, if the value was neither modified or evicted between the
// Get and the CompareAndSwap calls.
// The item's Key should not change between calls but all other item fields may differ.
// ErrCASConflict is returned if the value was modified in between the calls.
// ErrNotStored is returned if the value was evicted in between the calls.
func (c *Client) CompareAndSwap(item *Item) error {
return c.onItem(item, (*Client).cas)
}
func (c *Client) cas(rw *bufio.ReadWriter, item *Item) error {
return c.populateOne(rw, "cas", item)
}
func writeReadLine(rw *bufio.ReadWriter, format string, args ...interface{}) ([]byte, error) {
_, err := fmt.Fprintf(rw, format, args...)
if err != nil {
return nil, err
}
if err := rw.Flush(); err != nil {
return nil, err
}
line, err := rw.ReadSlice('\n')
return line, err
}
func (cte *ConnectTimeoutError) Error() string {
return "memcache: connect timeout to " + cte.Addr.String()
}
// Set func
// writes the given item unconditionally
func (c *Client) Set(item *Item) error {
return c.onItem(item, (*Client).set)
}
// Add func
// writes the given item, if no value already exists for its key
// ErrNotStored is returned if that condition is not met
func (c *Client) Add(item *Item) error {
return c.onItem(item, (*Client).add)
}
// Get func
// gets the item for the given key
// ErrCacheMiss is returned for a memcache cache miss
func (c *Client) Get(key string) (item *Item, err error) {
err = c.withKeyAddr(key, func(addr net.Addr) error {
return c.getFromAddr(addr, []string{key}, func(it *Item) { item = it })
})
if err == nil && item == nil {
err = ErrCacheMiss
}
return
}
|
// Package econtext provides Echo integration with golang.org/x/net/context.
package econtext
import (
"github.com/labstack/echo"
"golang.org/x/net/context"
)
type ctx struct {
c *echo.Context
context.Context
}
func (c ctx) Value(key interface{}) interface{} {
if key == ckey {
return c.c
}
if v := c.c.Get(key.(string)); v != nil {
return v
}
return c.Context.Value(key)
}
|
package timbler
import (
"log"
"os"
"path/filepath"
"testing"
"github.com/gin-gonic/gin"
)
func TestServer(t *testing.T) {
// start server
serve := gin.Default()
ws := &RealtimeWS{}
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
log.Fatal(err)
}
ws.InitWS(serve, dir)
serve.Run(":8080")
}
|
package main
import (
"fmt"
"go/ast"
"go/parser"
"go/token"
)
func main() {
expr := `a == 1 && b == 2`
fset := token.NewFileSet()
exprAst, err := parser.ParseExpr(expr)
if err != nil {
fmt.Println(err)
return
}
ast.Print(fset, exprAst)
}
|
package base
type BaseModel struct {
Id uint `gorm:"primary_key"json:"id"`
CreateTime LocalTime `json:"createTime"`
UpdateTime LocalTime `json:"updateTime"`
}
|
/*
获取黄金的行情,从新浪财经
黄金地址 : http://gu.sina.cn/m/?vt=1&cid=76613#/futures/foreign
纽约黄金 : <li data-symbol="hf_GC"
聚合数据的黄金数据接口API : http://web.juhe.cn:8080/finance/gold/shgold?key=您申请的APPKEY
每两分钟更新一次,这个接口可能也是爬取别的站的,但是调用比直接爬取新浪财经快,
作为备用方案
*/
package main
func goldMain(){
// 黄金行情的获取入口
for {
}
}
|
package deck
import (
"testing"
"math/rand"
)
func TestExampleCard(t *testing.T) {
AceHeart := Card{Rank: Ace, Suit: Heart}.String()
if AceHeart != "Ace of Hearts" {
t.Errorf("Failed")
}
TwoSpade := Card{Rank: Two, Suit: Spade}.String()
if TwoSpade != "Two of Spades" {
t.Errorf("Failed")
}
NineDiamond := Card{Rank: Nine, Suit: Diamond}.String()
if NineDiamond != "Nine of Diamonds" {
t.Errorf("Failed")
}
JackClub := Card{Rank: Jack, Suit: Club}.String()
if JackClub != "Jack of Clubs" {
t.Errorf("Failed")
}
Joker := Card{Suit: Joker}.String()
if Joker != "Joker" {
t.Errorf("Failed")
}
}
func TestNew (t *testing.T) {
cards := New()
if len(cards) != 52 {
t.Errorf("Card deck wrong size")
}
}
func TestDefaultSort(t *testing.T) {
cards := New(DefaultSort)
exp := Card{Rank: Ace, Suit: Spade}
if cards[0] != exp {
t.Error("Expected Ace of Spades as first card. Received:", cards[0])
}
}
func TestJokers(t *testing.T) {
cards := New(Jokers(3))
count := 0
for _, c:= range cards {
if c.Suit == Joker {
count++
}
}
if count != 3 {
t.Error("Expected 3 Jokers, received:", count)
}
}
func TestFilter(t *testing.T) {
filter := func(card Card) bool {
return card.Rank == 2 || card.Rank == Three
}
cards := New(Filter(filter))
for _, c := range cards {
if c.Rank == Two || c.Rank == Three {
t.Error("Expected all twos and threes to be filtered")
}
}
}
func TestDeck(t *testing.T) {
cards := New(Deck(3))
// 13 ranks * 4 suits * 3 decks
if len(cards) != 13*4*3 {
t.Errorf("Expected %d cards, received %d cards.", 13*4*3, len(cards))
}
}
func TestShuffle(t *testing.T) {
// make shuffleRand deterministic
// First call to shufleRand.Perm(52) should be:
// [40 35 ... ]
shuffleRand = rand.New(rand.NewSource(0))
orig := New()
first := orig[40]
second := orig[35]
cards := New(Shuffle)
if cards[0] != first {
t.Errorf("Expected the first card to be %s, received %s", first, cards[0])
}
if cards[1] != second {
t.Errorf("Expected the second card to be %s, received %s", second, cards[1])
}
}
|
package template
import (
"path/filepath"
"scaffold/core/input"
)
// Scaffold bootstraps a project
func Scaffold(ans *input.UserAnswers, templatePaths map[string]string) error {
// TODO: platform independent implementation
// TODO: replace project name in files
// TODO: bootstrap.sh script
dst, err := filepath.Abs(ans.Location)
if err != nil {
return err
}
err = copyFolder(templatePaths[ans.Template], dst)
return err
}
|
//complete reference from: https://github.com/gorilla/websocket/tree/master/examples/chat
package main
import (
"flag"
"log"
"net/http"
)
var addr = flag.String("addr", ":12345", "http server port address")
func homeHandler(w http.ResponseWriter, r *http.Request) {
log.Println("home:", r.URL)
if r.URL.Path != "/" {
http.Error(w, "Not found", 404)
return
}
if r.Method != "GET" {
http.Error(w, "Method not allowed", 405)
return
}
http.ServeFile(w, r, "home.html")
}
func main() {
flag.Parse()
server := initServer()
go server.run()
http.HandleFunc("/", homeHandler)
http.HandleFunc("/ws", func(w http.ResponseWriter, r *http.Request) {
handleClient(server, w, r)
})
err := http.ListenAndServe(*addr, nil)
if err != nil {
log.Fatal("ListenAndServe", err)
}
}
|
package main
import (
"sync"
"testing"
"time"
)
// 1 tx
func TestPattern1(t *testing.T) {
db := NewTestDB()
tx := NewTx(db)
if err := tx.Insert("key1", "value1"); err != nil {
t.Fatalf("failed to insert: %v\n", err)
}
if value, err := tx.Read("key1"); err != nil || value != "value1" {
t.Fatalf("failed to read: %v\n", err)
}
if err := tx.Insert("key2", "value2"); err != nil {
t.Fatalf("failed to insert: %v\n", err)
}
if err := tx.Update("key1", "new_value1"); err != nil {
t.Fatalf("failed to update: %v\n", err)
}
if err := tx.Insert("key3", "value3"); err != nil {
t.Fatalf("failed to insert: %v\n", err)
}
if err := tx.Delete("key2"); err != nil {
t.Fatalf("failed to delete: %v\n", err)
}
if err := tx.Commit(); err != nil {
t.Fatalf("failed to commit: %v", err)
}
tx.DestructTx()
tx = NewTx(db)
if err := tx.Insert("key4", "value4"); err != nil {
t.Fatalf("failed to insert: %v\n", err)
}
newTx := NewTx(db)
if _, err := newTx.Read("key4"); err == nil {
t.Fatal("data after commit exists")
}
}
// 2 tx concurrent
func TestPattern2(t *testing.T) {
db := NewTestDB()
v1 := &Version{
key: "key1",
value: "value1",
wTs: 0,
rTs: 0,
prev: nil,
deleted: false,
}
v2 := &Version{
key: "key2",
value: "value2",
wTs: 0,
rTs: 0,
prev: nil,
deleted: false,
}
db.index.Store("key1", &Record{
key: "key1",
last: v1,
mu: sync.Mutex{},
})
db.index.Store("key2", &Record{
key: "key2",
last: v2,
mu: sync.Mutex{},
})
tx1 := NewTx(db)
tx2 := NewTx(db)
// tx1: r1(1) i1(3) u1(2) /d1(1)/ c1(fail)
// tx2: /r2(1)/ r2(3) c2(success)
if value, err := tx1.Read("key1"); err != nil || value != "value1" {
t.Fatalf("failed to read: %v\n", err)
}
if value, err := tx2.Read("key1"); err != nil || value != "value1" {
t.Fatalf("failed to read: %v\n", err)
}
if err := tx1.Insert("key3", "value3"); err != nil {
t.Fatalf("failed to insert: %v\n", err)
}
if _, err := tx2.Read("key3"); err == nil {
t.Fatal("should be failed")
}
if err := tx1.Update("key2", "new_value2"); err != nil {
t.Fatalf("failed to update: %v\n", err)
}
if err := tx1.Delete("key1"); err != nil {
t.Errorf("failed to delete: %v", err)
}
if err := tx2.Commit(); err != nil {
t.Fatalf("failed to commit: %v", err)
}
if err := tx1.Commit(); err == nil {
t.Fatal("should be failed")
}
tx1.DestructTx()
tx2.DestructTx()
tx1 = NewTx(db)
// tx1: d1(1) c1(success)
if err := tx1.Delete("key1"); err != nil {
t.Errorf("failed to delete: %v", err)
}
if err := tx1.Commit(); err != nil {
t.Fatalf("failed to commit: %v", err)
}
tx1.DestructTx()
tx3 := NewTx(db)
if value, err := tx3.Read("key1"); err == nil {
t.Fatalf("should be failed: read %v", value)
}
if _, err := tx3.Read("key1"); err == nil {
t.Fatal("should be failed")
}
if value, err := tx3.Read("key2"); err != nil || value != "value2" {
t.Fatalf("failed to read: [ERROR] %v [VALUE], \n", err)
}
if value, err := tx3.Read("key2"); err != nil || value != "value2" {
t.Fatalf("failed to read: [ERROR] %v [VALUE], \n", err)
}
if value, err := tx3.Read("key3"); err == nil {
t.Fatalf("failed to read: %v returned\n", value)
}
if value, err := tx3.Read("key3"); err == nil {
t.Fatalf("failed to read: %v returned\n", value)
}
}
// 2 tx parallel
func TestPattern3(t *testing.T) {
db := NewTestDB()
v1 := &Version{
key: "key1",
value: "value1",
wTs: 0,
rTs: 0,
prev: nil,
deleted: false,
}
v2 := &Version{
key: "key2",
value: "value2",
wTs: 0,
rTs: 0,
prev: nil,
deleted: false,
}
v3 := &Version{
key: "key3",
value: "value3",
wTs: 0,
rTs: 0,
prev: nil,
deleted: false,
}
db.index.Store("key1", &Record{
key: "key1",
last: v1,
mu: sync.Mutex{},
})
db.index.Store("key2", &Record{
key: "key2",
last: v2,
mu: sync.Mutex{},
})
db.index.Store("key3", &Record{
key: "key3",
last: v3,
mu: sync.Mutex{},
})
tx1 := NewTx(db)
tx2 := NewTx(db)
wg := sync.WaitGroup{}
// tx1
wg.Add(1)
go func() {
defer wg.Done()
if value, err := tx1.Read("key1"); err != nil || value != "value1" {
t.Fatalf("failed to read: %v", err)
}
if err := tx1.Insert("key4", "value4"); err != nil {
t.Fatalf("failed to insert: %v", err)
}
time.Sleep(time.Second * 3)
if err := tx1.Commit(); err != nil {
t.Fatalf("failed to commit: %v", err)
}
tx1.DestructTx()
tx1 = NewTx(db)
}()
// tx2
wg.Add(1)
go func() {
defer wg.Done()
if value, err := tx2.Read("key1"); err != nil || value != "value1" {
t.Fatalf("failed to read: %v", err)
}
if err := tx2.Update("key2", "new_value2"); err != nil {
t.Fatalf("failed to update: %v", err)
}
if err := tx2.Delete("key3"); err != nil {
t.Fatalf("failed to delete: %v", err)
}
time.Sleep(time.Second * 3)
if err := tx2.Commit(); err != nil {
t.Fatalf("failed to commit: %v", err)
}
tx2.DestructTx()
tx2 = NewTx(db)
}()
wg.Wait()
if v, exist := db.index.Load("key1"); !exist || v.(*Record).last.value != "value1" {
t.Fatalf("wrong result: %v", v.(Record).last.value)
}
if v, exist := db.index.Load("key2"); !exist || v.(*Record).last.value != "new_value2" {
t.Fatalf("wrong result: %v", v.(Record).last.value)
}
if v, exist := db.index.Load("key3"); !exist || !(v.(*Record).last.value == "" && v.(*Record).last.deleted) {
t.Fatalf("wrong result: %v, should be deleted", v.(Record).last.value)
}
if v, exist := db.index.Load("key4"); !exist || v.(*Record).last.value != "value4" {
t.Fatalf("wrong result: %v", v.(Record).last.value)
}
// tx1: r(1) i(4) c(success)
// tx2: r(1) u(2) d(3) c(success)
// r1(1) r2(1) u2(2) d2(3) i1(4) c1 c2
// ========================================================================================
// tx1
if value, err := tx1.Read("key1"); err != nil || value != "value1" {
t.Fatalf("failed to read: %v", err)
}
wg.Add(1)
go func() {
defer wg.Done()
if err := tx1.Insert("key6", "value6"); err != nil {
t.Fatalf("failed to insert: %v", err)
}
time.Sleep(time.Second * 3)
if err := tx1.Commit(); err != nil {
t.Fatalf("failed to commit: %v", err)
}
tx1.DestructTx()
}()
// tx2
wg.Add(1)
go func() {
defer wg.Done()
if err := tx2.Update("key1", "new_value1"); err != nil {
t.Fatalf("failed to update: %v", err)
}
if err := tx2.Update("key2", "new_new_value2"); err != nil {
t.Fatalf("failed to update: %v", err)
}
if err := tx2.Delete("key4"); err != nil {
t.Fatalf("failed to delete: %v", err)
}
if err := tx2.Insert("key5", "value5"); err != nil {
t.Fatalf("failed to insert: %v", err)
}
time.Sleep(time.Second * 3)
tx2.Abort()
tx2.DestructTx()
}()
wg.Wait()
if v, exist := db.index.Load("key1"); !exist || v.(*Record).last.value != "value1" {
t.Fatalf("wrong result: %v", v.(*Record).last.value)
}
if v, exist := db.index.Load("key2"); !exist || v.(*Record).last.value != "new_value2" {
t.Fatalf("wrong result: %v", v.(*Record).last.value)
}
if v, exist := db.index.Load("key3"); !exist || !(v.(*Record).last.value == "" && v.(*Record).last.deleted) {
t.Fatalf("wrong result: %v, should be deleted", v.(*Record).last.value)
}
if v, exist := db.index.Load("key4"); !exist || v.(*Record).last.value != "value4" {
t.Fatalf("wrong result: %v", v.(*Record).last.value)
}
if v, exist := db.index.Load("key5"); exist {
t.Fatalf("wrong result: %v, should not be deleted", v.(*Record).last.value)
}
if v, exist := db.index.Load("key6"); !exist || v.(*Record).last.value != "value6" {
t.Fatalf("wrong result: %v", v.(*Record).last.value)
}
// tx1: r(1) i(6) c(success)
// tx2: u(1) u(2) d(4) i(5) a
// r1(1) u2(1) i1(6) u2(2) d2(4) c1 i2(5) c2
}
func TestLogicalDelete(t *testing.T) {
db := NewTestDB()
v1 := &Version{
key: "key1",
value: "value1",
wTs: 0,
rTs: 0,
prev: nil,
deleted: false,
}
db.index.Store("key1", &Record{
key: "key1",
last: v1,
mu: sync.Mutex{},
})
tx1 := NewTx(db)
tx2 := NewTx(db)
if err := tx1.Delete("key1"); err != nil {
t.Fatalf("failed to delete: %v\n", err)
}
if err := tx2.Delete("key1"); err != nil {
t.Fatalf("failed to delete: %v\n", err)
}
if err := tx1.Commit(); err != nil {
t.Fatalf("failed to commit: %v", err)
}
tx1.DestructTx()
tx1 = NewTx(db)
if record, exist := db.index.Load("key1"); !exist || !record.(*Record).last.deleted {
t.Fatal("logical delete failed")
}
if _, err := tx1.Read("key1"); err == nil {
t.Fatal("should be failed")
}
if err := tx2.Insert("key1", "value1"); err != nil {
t.Fatalf("failed to insert: %v\n", err)
}
if err := tx2.Commit(); err == nil {
t.Fatal("should be failed")
}
}
func TestTx_Read(t *testing.T) {
db := NewTestDB()
tx := NewTx(db)
// record in read-set
tx.readSet["test_read"] = &Version{
key: "test_read",
value: "ans",
wTs: 0,
rTs: 0,
prev: nil,
deleted: false,
}
if value, err := tx.Read("test_read"); err != nil || value != "ans" {
t.Errorf("failed to read data in read-set: %v\n", err)
}
// record in write-set
tx.writeSet["test_read"] = append(tx.writeSet["test_read"], &Operation{
cmd: INSERT,
version: &Version{
key: "test_read",
value: "ans",
wTs: 0,
rTs: 0,
prev: nil,
deleted: false,
},
})
if value, err := tx.Read("test_read"); err != nil || value != "ans" {
t.Errorf("failed to read data in write-set: %v\n", err)
}
// record in Index
tx.readSet = make(ReadSet)
tx.writeSet = make(WriteSet)
v := &Version{
key: "test_read",
value: "ans",
wTs: 0,
rTs: 0,
prev: nil,
deleted: false,
}
tx.db.index.Store("test_read", &Record{
key: "test_read",
last: v,
mu: sync.Mutex{},
})
if value, err := tx.Read("test_read"); err != nil || value != "ans" {
t.Errorf("failed to read data in Index: %v\n", err)
}
tx.DestructTx()
}
func TestTx_Insert(t *testing.T) {
db := NewTestDB()
tx := NewTx(db)
if err := tx.Insert("test_insert", "ans"); err != nil {
t.Errorf("failed to insert data: %v\n", err)
}
operations, exist := tx.writeSet["test_insert"]
op := operations[0]
if !exist {
t.Error("not exist")
}
if op.version.value != "ans" {
t.Error("failed to insert data (wrong value)")
}
tx.DestructTx()
}
func TestTx_Update(t *testing.T) {
db := NewTestDB()
tx := NewTx(db)
tx.writeSet["test_update"] = append(tx.writeSet["test_update"], &Operation{
cmd: INSERT,
version: &Version{
key: "test_update",
value: "ans",
wTs: 0,
rTs: 0,
prev: nil,
deleted: false,
},
})
if err := tx.Update("test_update", "new_ans"); err != nil {
t.Errorf("failed to update data: %v\n", err)
}
if value := tx.writeSet["test_update"][1].version.value; value != "new_ans" {
t.Errorf("failed to update (wrong value: %v)", value)
}
tx.DestructTx()
}
func TestTx_Delete(t *testing.T) {
db := NewTestDB()
tx := NewTx(db)
tx.writeSet["test_delete"] = append(tx.writeSet["test_delete"], &Operation{
cmd: INSERT,
version: &Version{
key: "test_update",
value: "ans",
wTs: 0,
rTs: 0,
prev: nil,
deleted: false,
},
})
if err := tx.Delete("test_delete"); err != nil {
t.Errorf("failed to delete data: %v\n", err)
}
if len(tx.writeSet) != 1 || tx.writeSet["test_delete"][1].cmd != DELETE {
t.Error("failed to delete data")
}
tx.DestructTx()
}
func TestTx_Commit(t *testing.T) {
db := NewTestDB()
v1 := &Version{
key: "test_commit1",
value: "ans1",
wTs: 0,
rTs: 0,
prev: nil,
deleted: false,
}
v2 := &Version{
key: "test_commit2",
value: "ans2",
wTs: 0,
rTs: 0,
prev: nil,
deleted: false,
}
db.index.Store("test_commit1", &Record{
key: "test_commit1",
last: v1,
mu: sync.Mutex{},
})
db.index.Store("test_commit2", &Record{
key: "test_commit2",
last: v2,
mu: sync.Mutex{},
})
tx := NewTx(db)
tx.writeSet["test_commit1"] = append(tx.writeSet["test_commit1"], &Operation{
cmd: UPDATE,
version: &Version{
key: "test_commit1",
value: "new_ans",
wTs: 0,
rTs: 0,
prev: nil,
deleted: false,
},
})
tx.writeSet["test_commit2"] = append(tx.writeSet["test_commit2"], &Operation{
cmd: DELETE,
version: &Version{
key: "test_commit2",
value: "",
wTs: 0,
rTs: 0,
prev: nil,
deleted: false,
},
})
if err := tx.Commit(); err != nil {
t.Fatalf("failed to commit: %v", err)
}
tx.DestructTx()
tx = NewTx(db)
if record, exist := tx.db.index.Load("test_commit1"); exist && record.(*Record).last.value != "new_ans" {
t.Errorf("update log is not committed: %v", record.(Record).last.value)
}
if record, exist := tx.db.index.Load("test_commit2"); exist && record.(*Record).last.value != "" {
t.Errorf("delete log is not committed: %v", record.(Record).last.value)
}
if len(tx.writeSet) != 0 {
t.Error("write-set is not cleared")
}
tx.DestructTx()
}
|
package response
import (
"net/http"
"github.com/jinzhu/gorm"
"github.com/labstack/echo"
)
// ModelError for decorating responses
type ModelError struct {
Message string `json:"message"`
}
// APIResponse - returns a decorated json response
func APIResponse(err error, c echo.Context, model interface{}) error {
var e ModelError
if gorm.IsRecordNotFoundError(err) {
e.Message = err.Error()
return c.JSON(http.StatusNotFound, e)
} else if err != nil {
e.Message = err.Error()
return c.JSON(http.StatusBadRequest, e)
} else {
return c.JSON(http.StatusOK, model)
}
}
|
package main
import "fmt"
//func 函数名(参数)(返回值){
// 函数体
//}
func f1(x int, y int) int {
return x + y
}
// 简写
func intSum(x, y int) int {
return x + y
}
func intSum2(x ...int) int {
fmt.Println(x) //x是一个切片
sum := 0
for _, v := range x {
sum = sum + v
}
return sum
}
func intSum3(x int, y ...int) int {
fmt.Println(x, y)
sum := x
for _, v := range y {
sum = sum + v
}
return sum
}
func calc(x, y int) (int, int) {
sum := x + y
sub := x - y
return sum, sub
}
func calc1(x, y int) (sum, sub int) {
sum = x + y
sub = x - y
return
}
//定义全局变量num
var num int64 = 10
func testGlobalVar() {
fmt.Printf("num=%d\n", num) //函数中可以访问全局变量num
}
func testNum() {
num := 100
fmt.Printf("num=%d\n", num) // 函数中优先使用局部变量
}
func fn(n int64) int64 {
if n <= 1 {
return 1
}
return n * fn(n-1)
}
func main() {
//a := f1(1,2)
//fmt.Println(a)
//sum := intSum2(1,2,3,4)
//s1,s2 := calc(15,10)
//fmt.Println(s1,s2)
//testNum()
abc := fn(3)
fmt.Println(abc)
}
|
package main
import (
_ "visitor/config"
_ "visitor/logger"
"fmt"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
"math/rand"
"time"
"visitor/app/server"
)
func main() {
// 设置随机数因子
rand.Seed(time.Now().Unix())
// 启动服务
port := viper.GetString("server.port")
log.Info("Server listening on ", port)
router := server.Init()
err := router.Run(":" + port)
if err != nil {
fmt.Println(err)
log.Info("Server Stop, Error: ", err)
}
}
|
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
// time: O(n), space: O(n)
func levelOrder(root *TreeNode) [][]int {
var res [][]int
if root == nil {
return res
}
q := []*TreeNode{root}
for len(q) > 0 {
var curLevel []int
l := len(q)
for i := 0; i < l; i++ {
n := q[0]
q = q[1:len(q)]
curLevel = append(curLevel, n.Val)
if n.Left != nil {
q = append(q, n.Left)
}
if n.Right != nil {
q = append(q, n.Right)
}
}
res = append(res, curLevel)
}
return res
}
|
package api
import (
"context"
pb_api "github.com/inari111/layered-architecture-example-2020/rpc/api"
)
type taskService struct {
}
func NewTaskService() pb_api.TaskService {
return &taskService{}
}
func (t *taskService) Create(ctx context.Context, request *pb_api.TaskCreateRequest) (*pb_api.TaskCreateResponse, error) {
return nil, nil
}
|
package types
import (
"bytes"
"context"
"fmt"
"time"
"github.com/golang/protobuf/jsonpb"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/batchcorp/plumber-schemas/build/go/protos/records"
"github.com/batchcorp/plumber/backends"
"github.com/batchcorp/plumber/relay"
"github.com/batchcorp/plumber/util"
)
type Relay struct {
Active bool `json:"-"`
Id string `json:"-"`
CancelCtx context.Context `json:"-"`
CancelFunc context.CancelFunc `json:"-"`
Backend backends.Backend `json:"-"`
Options *opts.RelayOptions `json:"config"`
log *logrus.Entry
}
// StartRelay starts a configured relay, it's workers, and the GRPC workers;
// StartRelay will block for "delay" duration, waiting for errors.
func (r *Relay) StartRelay(delay time.Duration) error {
r.log = logrus.WithField("pkg", "types/relay")
relayCh := make(chan interface{}, 1)
localErrCh := make(chan *records.ErrorRecord, 1)
// Needed to satisfy relay.Options{}, not used
stubMainCtx, stubCancelFunc := context.WithCancel(context.Background())
relayCfg := &relay.Config{
Token: r.Options.CollectionToken,
GRPCAddress: r.Options.XStreamdalGrpcAddress,
NumWorkers: 5,
Timeout: util.DurationSec(r.Options.XStreamdalGrpcTimeoutSeconds),
RelayCh: relayCh,
DisableTLS: r.Options.XStreamdalGrpcDisableTls,
BatchSize: r.Options.BatchSize,
Type: r.Backend.Name(),
ServiceShutdownCtx: r.CancelCtx,
MainShutdownCtx: stubMainCtx, // Needed to satisfy relay.Options{}, not used in server mode
MainShutdownFunc: stubCancelFunc, // Needed to satisfy relay.Options{}, not used in server mode
}
grpcRelayer, err := relay.New(relayCfg)
if err != nil {
return errors.Wrap(err, "unable to create new relayer instance")
}
// Launch gRPC Workers
if err := grpcRelayer.StartWorkers(r.CancelCtx); err != nil {
return errors.Wrap(err, "unable to start gRPC relay workers")
}
go func() {
// TODO: Need to update relayer to use error channel
if err := r.Backend.Relay(r.CancelCtx, r.Options, relayCh, nil); err != nil {
util.WriteError(r.log, localErrCh, fmt.Errorf("error during relay (id: %s): %s", r.Id, err))
r.CancelFunc()
}
r.log.Debugf("relay id '%s' exiting", r.Id)
}()
timeAfterCh := time.After(delay)
// Will block for =< delay
select {
case <-timeAfterCh:
r.log.Debugf("relay id '%s' success after %s wait", r.Id, delay.String())
break
case err := <-localErrCh:
return fmt.Errorf("relay startup failed for id '%s': %s", r.Id, err.Error)
}
return nil
}
// MarshalJSON marshals a tunnel to JSON
func (r *Relay) MarshalJSON() ([]byte, error) {
buf := bytes.NewBuffer([]byte(``))
m := jsonpb.Marshaler{}
if err := m.Marshal(buf, r.Options); err != nil {
return nil, errors.Wrap(err, "could not marshal opts.RelayOptions")
}
return buf.Bytes(), nil
}
// UnmarshalJSON unmarshals JSON into a tunnel struct
func (r *Relay) UnmarshalJSON(v []byte) error {
relay := &opts.RelayOptions{}
if err := jsonpb.Unmarshal(bytes.NewBuffer(v), relay); err != nil {
return errors.Wrap(err, "unable to unmarshal stored relay")
}
r.Options = relay
r.Id = relay.XRelayId
r.Active = relay.XActive
return nil
}
|
// Copyright 2021, Pulumi Corporation. All rights reserved.
// +build tools
// Place any runtime dependencies as imports in this file.
// Go modules will be forced to download and install them.
package tools
|
package golog_test
import (
"testing"
"time"
"github.com/bingoohuang/golog"
"github.com/sirupsen/logrus"
)
func TestSetupLogrus(t *testing.T) {
golog.SetupLogrus(nil, "level=debug,rotate=.yyyy-mm-dd-HH-mm-ss,maxAge=5s,gzipAge=3s", "")
for i := 0; i < 10; i++ {
logrus.Warnf("这是警告信息 %d", i)
logrus.Infof("这是普通信息 %d", i)
logrus.Debugf("这是调试信息 %d", i)
time.Sleep(1 * time.Second)
}
}
func TestSetupLogrusLayout(t *testing.T) {
layout := `%t{HH:mm:ss.SSS} %5l{length=5} %pid --- [GID=%gid] [%trace] %-20caller : %fields %msg%n`
golog.SetupLogrus(nil, "level=debug,rotate=.yyyy-mm-dd-HH-mm", layout)
for i := 0; i < 10; i++ {
logrus.Warnf("这是警告信息 %d", i)
logrus.Infof("这是普通信息 %d", i)
logrus.Debugf("这是调试信息 %d", i)
time.Sleep(1 * time.Second)
}
}
|
func climbStairs(n int) int {
if n == 1{
return 1
} else if n == 2{
return 2
}
steps := make([]int, n)
steps[0], steps[1] = 1, 2
for idx := 2; idx < n; idx++ {
steps[idx] = steps[idx-1] + steps[idx-2]
}
return steps[n-1]
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.