text
stringlengths 11
4.05M
|
|---|
// The MIT License (MIT)
// Copyright (c) 2014 Christopher Lillthors
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package Protocol
import (
"regexp"
)
const (
// Available command codes.
Balancecode = 1
Withdrawcode = 2
Depositcode = 3
Menucode = 4
RequestMenucode = 5
Debugcode = 6
NewMenucode = 7
// Available login codes
LoginCode = 10
LoginResponseOK = 11
LoginResponseError = 12
// Available response codes
BalanceResponseCode = 13
WithdrawResponseCode = 14
DepositResponseCode = 15
BalanceResponseErrorCode = 16
WithdrawResponseErrorCode = 17
DepositResponseErrorCode = 18
// Available logout codes
Logoutcode = 19
)
var (
CardnumberTest = regexp.MustCompile(`^(\d){4}$`) //4 digits.
PassnumberTest = regexp.MustCompile(`^(\d){2}$`) //2 digits.
MoneyTest = regexp.MustCompile(`^(\d+)$`) //at least one digit.
ScratchTest = regexp.MustCompile(`^(\d+){2}$`) //2 digits
)
// Message data, defines what a message is.
type Message struct {
Code uint8 //1 byte
Number uint16 //2 bytes
Payload uint32 //4 bytes
// Total is 7 bytes
}
type Language struct {
Title string `json:"title"`
Banner string `json:"banner"`
Interactions struct {
Cardnumber string `json:"input_cardnumber"`
Password string `json:"input_password"`
} `json:"interactions"`
InitialCommands struct {
Balance string `json:"balance"`
Widthdraw string `json:"withdraw"`
Deposit string `json:"deposit"`
Logout string `json:"logout"`
} `json:"initial_commands"`
Responses struct {
Balance string `json:"balance"`
Deposit string `json":deposit"`
Withdraw string `json:"withdraw"`
} `json:"responses"`
Errors struct {
Balance string `json:"balance"`
Deposit string `json:"deposit"`
Withdraw string `json:"withdraw"`
Login_error string `json"login_error"`
Invalid_command string `json:"invalid_command"`
Invalid_language string `json:"invalid_language"`
Server_disconnected string `json:"server_disconnected"`
Incorrect_Pin string `json:"incorrect_pin"`
} `json:"errors"`
}
type MenuData struct {
Version string `json:"version"`
Languages []string `json:"languages"`
Text map[string]Language `json:"text"`
}
// Menu data.
type Menu struct {
Code uint8 //1 byte
Payload [9]byte //9 byte
}
|
// Package tuplespace provides an implementations of a tuple space for Go.
//
// It provides both an in-process asynchronous tuplespace (use
// NewTupleSpace) and a RESTful server binary.
//
// Two storage backends are currently available: leveldb and in-memory.
//
// To use the in-process tuple space:
//
// import (
// "github.com/alecthomas/tuplespace"
// "github.com/alecthomas/tuplespace/store"
// )
//
// func main() {
// ts, _ := tuplespace.NewTupleSpace(store.NewMemoryStore())
// ts.Send(tuplespace.Tuple{"cmd", "uname -a"}, 0)
// tuple, _ := ts.Take(tuplespace.Tuple{"cmd", nil}, 0)
// println(tuple.String())
// }
//
// Using the client for the server is similarly simple:
//
// import (
// "github.com/alecthomas/tuplespace"
// "github.com/alecthomas/tuplespace/client"
// )
//
// func main() {
// ts, _ := client.NewTupleClient("http://127.0.0.1:2619/tuplespace/")
// ts.Send(tuplespace.Tuple{"cmd", "uname -a"}, 0)
// tuple, _ := ts.Take(tuplespace.Tuple{"cmd", nil}, 0)
// println(tuple.String())
// }
//
// Install and run the server:
//
// $ go get github.com/alecthomas/tuplespace/bin/tuplespaced
// $ tuplespaced --log-level=info
// [22:26:27 EST 2013/12/05] [INFO] Starting server on http://127.0.0.1:2619/tuplespace/
// [22:26:27 EST 2013/12/05] [INFO] Compacting database
//
// You can test the tuplespace service with a basic command-line client:
//
// $ go get github.com/alecthomas/tuplespace/bin/tuplespace
//
// Send a bunch of tuples:
//
// $ tuplespace --copies=1000 send '["cmd", "uname -a"]'
//
// Take all tuples:
//
// $ time tuplespace takeall '["cmd", null]' | wc
//
// Python bindings are also available:
//
// $ pip install tuplespace
// $ python
// >>> import tuplespace
// >>> ts = tuplespace.TupleSpace()
// >>> ts.send(('cmd', 'uname -a'))
// >>> ts.take(('cmd', None))
// ('cmd', 'uname -a')
//
package tuplespace
import (
"fmt"
log "github.com/alecthomas/log4go"
"strings"
"sync"
"sync/atomic"
"time"
)
type tupleVisitor func(tuple Tuple) int
type tupleWaiter struct {
match *TupleMatcher
matches chan []Tuple
timeout time.Time
actions int
err chan error
cancel chan *tupleWaiter
}
type tupleSend struct {
tuples []Tuple
timeout time.Time
}
func (t *tupleWaiter) String() string {
var timeout time.Duration
if !t.timeout.IsZero() {
timeout = t.timeout.Sub(time.Now())
}
actions := []string{}
if t.actions&ActionTake != 0 {
actions = append(actions, "TAKE")
} else {
actions = append(actions, "READ")
}
if t.actions&ActionOne != 0 {
actions = append(actions, "ONE")
} else {
actions = append(actions, "ALL")
}
return fmt.Sprintf("ReadOperationHandle{%s, %v, %v}", strings.Join(actions, "|"), t.match, timeout)
}
func (r *tupleWaiter) Cancel() {
r.cancel <- r
close(r.matches)
}
func (t *tupleWaiter) Get() chan []Tuple {
return t.matches
}
func (t *tupleWaiter) Error() chan error {
return t.err
}
func newWaiter(cancel chan *tupleWaiter, match *TupleMatcher, timeout time.Duration, actions int) *tupleWaiter {
var expires time.Time
if timeout != 0 {
expires = time.Now().Add(timeout)
}
return &tupleWaiter{
match: match,
matches: make(chan []Tuple, 1),
timeout: expires,
err: make(chan error, 1),
actions: actions,
cancel: cancel,
}
}
type TupleSpaceImpl struct {
store TupleStore
waiters map[*tupleWaiter]interface{}
waitersLock sync.RWMutex
cancel chan *tupleWaiter
shutdown chan bool
id uint64
stats TupleSpaceStats
statsUpdated *sync.Cond
}
// NewRawTupleSpace creates a new tuple store using the given storage backend.
func NewRawTupleSpace(store TupleStore) *TupleSpaceImpl {
ts := &TupleSpaceImpl{
waiters: make(map[*tupleWaiter]interface{}),
cancel: make(chan *tupleWaiter, 16),
shutdown: make(chan bool, 1),
statsUpdated: sync.NewCond(&sync.Mutex{}),
store: store,
}
go ts.run()
return ts
}
func (t *TupleSpaceImpl) run() {
statTimer := time.Tick(time.Millisecond * 100)
reportStatsTimer := time.Tick(time.Second * 2)
purgeTimer := time.Tick(time.Millisecond * 250)
for {
select {
case waiter := <-t.cancel:
t.cancelWaiter(waiter)
case <-purgeTimer:
// TODO: Implement timeouts using a heap rather than periodic checks.
t.purge()
case <-statTimer:
t.updateStats()
case <-reportStatsTimer:
t.reportStats()
case <-t.shutdown:
t.store.Shutdown()
return
}
}
}
func (t *TupleSpaceImpl) reportStats() {
log.Info("Stats: %s", &t.stats)
}
func (t *TupleSpaceImpl) cancelWaiter(waiter *tupleWaiter) {
log.Info("Cancelled waiter %s", waiter)
t.waitersLock.Lock()
defer t.waitersLock.Unlock()
delete(t.waiters, waiter)
waiter.err <- CancelledReader
}
func (t *TupleSpaceImpl) updateStats() {
t.statsUpdated.L.Lock()
defer t.statsUpdated.L.Unlock()
t.waitersLock.RLock()
defer t.waitersLock.RUnlock()
t.stats.Waiters = int64(len(t.waiters))
t.store.UpdateStats(&t.stats)
t.statsUpdated.Broadcast()
}
func (t *TupleSpaceImpl) processNewEntries(tuples []Tuple, timeout time.Time) error {
t.waitersLock.Lock()
defer t.waitersLock.Unlock()
atomic.AddInt64(&t.stats.TuplesSeen, int64(len(tuples)))
var takenCount int64
var readCount int64
matches := map[*tupleWaiter][]Tuple{}
for i, tuple := range tuples {
taken := false
for waiter := range t.waiters {
take := waiter.actions&ActionTake != 0
one := waiter.actions&ActionOne != 0
if tuple != nil && waiter.match.Match(tuple) {
if one && len(matches[waiter]) > 0 {
break
}
if take {
// If this tuple has already been taken, we can't take it again. However, reads can still succeed.
if taken {
continue
}
taken = true
// tuples[i] = nil
takenCount++
} else {
readCount++
}
matches[waiter] = append(matches[waiter], tuple)
}
}
if taken {
tuples[i] = nil
}
}
atomic.AddInt64(&t.stats.TuplesTaken, takenCount)
atomic.AddInt64(&t.stats.TuplesRead, readCount)
// Cleanup dead waiters.
for waiter, tuplesForWaiter := range matches {
waiter.matches <- tuplesForWaiter
delete(t.waiters, waiter)
}
// Remove entries taken by waiters.
remaining := make([]Tuple, 0, len(tuples))
for _, tuple := range tuples {
if tuple != nil {
remaining = append(remaining, tuple)
}
}
// Finally, store the remaining tuples.
_, err := t.store.Put(remaining, timeout)
return err
}
// When a new waiter arrives, we check if it matches existing tuples. If so,
// we return those tuples. If not, the waiter is added to the list of waiters
// that will match against future tuples.
func (t *TupleSpaceImpl) processNewWaiter(waiter *tupleWaiter) {
t.waitersLock.Lock()
defer t.waitersLock.Unlock()
atomic.AddInt64(&t.stats.WaitersSeen, 1)
one := waiter.actions&ActionOne != 0
take := waiter.actions&ActionTake != 0
limit := 0
if one {
limit = 1
}
stored, deletes, err := t.store.Match(waiter.match, limit)
if err != nil {
waiter.err <- err
return
}
matches := make([]Tuple, 0, len(stored))
taken := 0
for _, entry := range stored {
matches = append(matches, entry.Tuple)
if take {
taken++
deletes = append(deletes, entry)
}
if one {
break
}
}
if len(deletes) > 0 {
log.Fine("Deleting %d tuples. %d taken by %s, %d expired", len(deletes), taken, waiter, len(deletes)-taken)
t.store.Delete(deletes)
}
if len(matches) > 0 {
log.Fine("Waiter %s immediately returned %d matching tuples", waiter, len(matches))
if len(deletes) != 0 {
atomic.AddInt64(&t.stats.TuplesTaken, int64(len(deletes)))
} else {
atomic.AddInt64(&t.stats.TuplesRead, int64(len(matches)))
}
waiter.matches <- matches
} else {
log.Fine("Adding new waiter %s", waiter)
t.waiters[waiter] = nil
}
}
// Purge expired waiters.
func (t *TupleSpaceImpl) purge() {
t.waitersLock.Lock()
defer t.waitersLock.Unlock()
now := time.Now()
waiters := 0
for waiter := range t.waiters {
if !waiter.timeout.IsZero() && waiter.timeout.Before(now) {
delete(t.waiters, waiter)
waiter.err <- ReaderTimeout
waiters++
}
}
if waiters > 0 {
log.Fine("Purged %d waiters", waiters)
}
}
func (t *TupleSpaceImpl) SendMany(tuples []Tuple, timeout time.Duration) error {
log.Debug("Send(%+v, %s)", tuples, timeout)
var expires time.Time
if timeout != 0 {
expires = time.Now().Add(timeout)
}
return t.processNewEntries(tuples, expires)
}
func (t *TupleSpaceImpl) ReadOperation(match *TupleMatcher, timeout time.Duration, actions int) ReadOperationHandle {
waiter := newWaiter(t.cancel, match, timeout, actions)
log.Debug("ReadOperation(%s)", waiter)
t.processNewWaiter(waiter)
return waiter
}
func (t *TupleSpaceImpl) Shutdown() error {
t.shutdown <- true
return nil
}
func (t *TupleSpaceImpl) Stats() TupleSpaceStats {
t.statsUpdated.L.Lock()
defer t.statsUpdated.L.Unlock()
t.statsUpdated.Wait()
return t.stats
}
|
package orm
import (
"fmt"
"strings"
)
type ResultSet struct {
schema *Schema
Data MappedEntries
}
func (r *ResultSet) getTableData(tableName string) (*Table, []Entry, error) {
table, err := r.schema.GetTable(tableName)
if err != nil {
return nil, nil, err
}
data, found := r.Data[tableName]
if !found {
return nil, nil, fmt.Errorf("no data found for table named '%s'", tableName)
}
return table, data, nil
}
func (r *ResultSet) match(entry Entry, columnName string, columnValue interface{}) bool {
value, found := entry[columnName]
if !found {
return false
}
return columnValue == value
}
func (r *ResultSet) Strip(entry Entry, columns []string) map[string]interface{} {
stripped := Entry{}
for _, column := range columns {
stripped[column] = entry[column]
}
return stripped
}
func (r *ResultSet) Get(
tableName,
columnName string,
columnValue interface{},
) ([]map[string]interface{}, error) {
_, data, err := r.getTableData(tableName)
if err != nil {
return nil, err
}
entries := []map[string]interface{}{}
for _, entry := range data {
if !r.match(entry, columnName, columnValue) {
continue
}
entries = append(entries, entry)
}
return entries, nil
}
func (r *ResultSet) GetPK(tableName string, pk interface{}) (Entry, error) {
entries, err := r.Get(tableName, PKColumnName, pk)
if err != nil {
return nil, err
}
if len(entries) == 0 {
return nil, fmt.Errorf("unable to find data for '%s.%s'='%v'", tableName, PKColumnName, pk)
}
if len(entries) > 1 {
return nil, fmt.Errorf("too many results for for '%s.%s'='%v'", tableName, PKColumnName, pk)
}
return entries[0], nil
}
func (r *ResultSet) GetColumn(tableName, columnName string) (List, error) {
_, data, err := r.getTableData(tableName)
if err != nil {
return nil, err
}
list := List{}
for _, entry := range data {
value, found := entry[columnName]
if !found {
continue
}
list = append(list, value)
}
return list, nil
}
// processRow receives a single row (from results matrix) and split results into a new map based
// on table hints, present in each column name.
func (r *ResultSet) processRow(columnIDs map[string]int, row List) (EntryMap, error) {
hintedEntries := EntryMap{}
for hintedColumn, id := range columnIDs {
hintColumnSlice := strings.Split(hintedColumn, ".")
if len(hintColumnSlice) < 2 {
return nil, fmt.Errorf("unable to obtain hint and column name from '%s'", hintedColumn)
}
hint := hintColumnSlice[0]
column := strings.Join(hintColumnSlice[1:], ".")
if _, found := hintedEntries[hint]; !found {
hintedEntries[hint] = Entry{}
}
hintedEntries[hint][column] = row[id]
}
return hintedEntries, nil
}
// buildMappedMatrix creates the MappedMatrix data structure.
func (r *ResultSet) buildMappedMatrix(columnIDs map[string]int, matrix []List) error {
columnIDsLen := len(columnIDs)
pkCache := make(map[string][]interface{}, len(r.schema.Tables))
for _, row := range matrix {
rowLen := len(row)
if columnIDsLen != rowLen {
return fmt.Errorf("different amount of columns vs. row columns (%d/%d)",
columnIDsLen, rowLen)
}
// extracting a map splitting the different tables in different maps organized by hint
hintedEntries, err := r.processRow(columnIDs, row)
if err != nil {
return err
}
for _, table := range r.schema.Tables {
entry, found := hintedEntries[table.Hint]
if !found {
continue
}
pk, found := entry[PKColumnName]
if !found {
continue
}
// in all cases, the primary-key should not repeat, therefore ingnoring the one
// repeating is a way to avoid duplicated results due one-to-many relationships
if InterfaceSliceContains(pkCache[table.Hint], pk) {
continue
}
pkCache[table.Hint] = append(pkCache[table.Hint], pk)
r.Data[table.Name] = append(r.Data[table.Name], entry)
}
}
return nil
}
// NewResultSet instantiate an ResultSet.
func NewResultSet(schema *Schema, columnIDs map[string]int, matrix []List) (*ResultSet, error) {
r := &ResultSet{schema: schema, Data: MappedEntries{}}
if err := r.buildMappedMatrix(columnIDs, matrix); err != nil {
return nil, err
}
return r, nil
}
|
package operatorlister
import (
"fmt"
"sync"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
v1 "github.com/operator-framework/api/pkg/operators/v1"
listers "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/listers/operators/v1"
)
type UnionOperatorGroupLister struct {
csvListers map[string]listers.OperatorGroupLister
csvLock sync.RWMutex
}
// List lists all OperatorGroups in the indexer.
func (uol *UnionOperatorGroupLister) List(selector labels.Selector) (ret []*v1.OperatorGroup, err error) {
uol.csvLock.RLock()
defer uol.csvLock.RUnlock()
set := make(map[types.UID]*v1.OperatorGroup)
for _, cl := range uol.csvListers {
csvs, err := cl.List(selector)
if err != nil {
return nil, err
}
for _, csv := range csvs {
set[csv.GetUID()] = csv
}
}
for _, csv := range set {
ret = append(ret, csv)
}
return
}
// OperatorGroups returns an object that can list and get OperatorGroups.
func (uol *UnionOperatorGroupLister) OperatorGroups(namespace string) listers.OperatorGroupNamespaceLister {
uol.csvLock.RLock()
defer uol.csvLock.RUnlock()
// Check for specific namespace listers
if cl, ok := uol.csvListers[namespace]; ok {
return cl.OperatorGroups(namespace)
}
// Check for any namespace-all listers
if cl, ok := uol.csvListers[metav1.NamespaceAll]; ok {
return cl.OperatorGroups(namespace)
}
return &NullOperatorGroupNamespaceLister{}
}
func (uol *UnionOperatorGroupLister) RegisterOperatorGroupLister(namespace string, lister listers.OperatorGroupLister) {
uol.csvLock.Lock()
defer uol.csvLock.Unlock()
if uol.csvListers == nil {
uol.csvListers = make(map[string]listers.OperatorGroupLister)
}
uol.csvListers[namespace] = lister
}
func (l *operatorsV1Lister) RegisterOperatorGroupLister(namespace string, lister listers.OperatorGroupLister) {
l.operatorGroupLister.RegisterOperatorGroupLister(namespace, lister)
}
func (l *operatorsV1Lister) OperatorGroupLister() listers.OperatorGroupLister {
return l.operatorGroupLister
}
// NullOperatorGroupNamespaceLister is an implementation of a null OperatorGroupNamespaceLister. It is
// used to prevent nil pointers when no OperatorGroupNamespaceLister has been registered for a given
// namespace.
type NullOperatorGroupNamespaceLister struct {
listers.OperatorGroupNamespaceLister
}
// List returns nil and an error explaining that this is a NullOperatorGroupNamespaceLister.
func (n *NullOperatorGroupNamespaceLister) List(selector labels.Selector) (ret []*v1.OperatorGroup, err error) {
return nil, fmt.Errorf("cannot list OperatorGroups with a NullOperatorGroupNamespaceLister")
}
// Get returns nil and an error explaining that this is a NullOperatorGroupNamespaceLister.
func (n *NullOperatorGroupNamespaceLister) Get(name string) (*v1.OperatorGroup, error) {
return nil, fmt.Errorf("cannot get OperatorGroup with a NullOperatorGroupNamespaceLister")
}
|
package domain
type IdInt struct {
Id int64 `json:"id"`
}
type IdString struct {
Id string `json:"id"`
}
|
package main
import (
"os"
"os/signal"
"syscall"
"time"
log "github.com/sirupsen/logrus"
"github.com/bpmericle/go-webservice/cmd/server"
"github.com/bpmericle/go-webservice/config"
)
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
// Output to stdout instead of the default stderr
// Can be any io.Writer, see below for File example
log.SetOutput(os.Stdout)
// Only log the warning severity or above.
log.SetLevel(config.LogLevel())
}
func main() {
startTime := time.Now()
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
go func() {
sig := <-sigs
log.WithField("uptime", time.Since(startTime).String()).
WithField("signal", sig.String()).
Error("interrupt signal detected")
os.Exit(0)
}()
s := server.Server{
Port: config.Port(),
}
log.Fatal(s.ListenAndServe())
}
|
package main
import(
"fmt"
"time"
"math/rand"
)
func pinger(a,b int,c chan string){ //<- makes it unidirectional (only send , no receive)
fmt.Println("Tourist",a,"is online")
time.Sleep(time.Second*time.Duration(b))
fmt.Println("Tourist ",a,"is done having spent",b,"mins online")
//c <- "ping"
}
func printer(c chan string){
for{
//msg := <- c
//msg1 := <-d
//fmt.Println("The info is : ",msg)
time.Sleep(time.Second*1)
}
}
func main() {
var a[25]int
var x[25]int
var c chan string=make(chan string) //using a channel between 2 goroutines to sync them
x =[25]int{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25}
fmt.Println(x)
for k :=0;k<=24;k++{
a[k]=rand.Intn(8)+8
//fmt.Println(a[k]) //random nos for random delays for each tourist
}
for i:=0;i<8;i++{
go pinger(x[i],a[i],c)
}
//go ponger(c)
go printer(c)
var input string
fmt.Scanln(&input)
}
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package backoff
import (
"context"
"math/rand"
"time"
"tailscale.com/types/logger"
)
const MAX_BACKOFF_MSEC = 30000
type Backoff struct {
n int
// Name is the name of this backoff timer, for logging purposes.
name string
// logf is the function used for log messages when backing off.
logf logger.Logf
// NewTimer is the function that acts like time.NewTimer().
// You can override this in unit tests.
NewTimer func(d time.Duration) *time.Timer
// LogLongerThan sets the minimum time of a single backoff interval
// before we mention it in the log.
LogLongerThan time.Duration
}
func NewBackoff(name string, logf logger.Logf) Backoff {
return Backoff{
name: name,
logf: logf,
NewTimer: time.NewTimer,
}
}
func (b *Backoff) BackOff(ctx context.Context, err error) {
if ctx.Err() == nil && err != nil {
b.n++
// n^2 backoff timer is a little smoother than the
// common choice of 2^n.
msec := b.n * b.n * 10
if msec > MAX_BACKOFF_MSEC {
msec = MAX_BACKOFF_MSEC
}
// Randomize the delay between 0.5-1.5 x msec, in order
// to prevent accidental "thundering herd" problems.
msec = rand.Intn(msec) + msec/2
dur := time.Duration(msec) * time.Millisecond
if dur >= b.LogLongerThan {
b.logf("%s: backoff: %d msec\n", b.name, msec)
}
t := b.NewTimer(dur)
select {
case <-ctx.Done():
t.Stop()
case <-t.C:
}
} else {
// not a regular error
b.n = 0
}
}
|
package main
import (
"bufio"
"fmt"
"gopkg.in/mgo.v2"
"os"
"strconv"
"time"
//"time"
//"io"
"io/ioutil"
"log"
"net/http"
//"strconv"
"strings"
)
type Record_row struct {
//Id string `json:"id" bson:"_id,omitempty"`
Base_bbl int64 `json:"base_bbl" bson:"base_bbl"`
Bin int64 `json:"bin" bson:"bin"`
Cnstrct_yr int64 `json:"cnstrct_yr" bson:"cnstrct_yr"`
Doitt_id int64 `json:"doitt_id" bson:"doitt_id"`
Feat_code int64 `json:"feat_code" bson:"feat_code"`
Geomsource string `json:"geomsource" bson:"geomsource"`
Groundelev int64 `json:"groundelev" bson:"groundelev"`
Heightroof float64 `json:"heightroof" bson:"heightroof"`
Lstmoddate string `json:"lstmoddate" bson:"lstmoddate"`
Lststatype string `json:"lststatype" bson:"lststatype"`
Mpluto_bbl int64 `json:"mpluto_bbl" bson:"mpluto_bbl"`
Name string `json:"name" bson:"name"`
Shape_area float64 `json:"shape_area" bson:"shape_area"`
Shape_len float64 `json:"shape_len" bson:"shape_len"`
The_geom string `json:"the_geom" bson:"the_geom"`
}
func main() {
rowCount := os.Args[1]
fmt.Println("RowCount Entered: ",rowCount)
maxWait := time.Duration(10 * time.Second)
session, err := mgo.DialWithTimeout("localhost:27017",maxWait)
if err != nil {
fmt.Println("Unable to connect to local mongo instance!")
}
session.SetMode(mgo.Monotonic, true)
c := session.DB("mydb").C("record_data")
c.DropCollection()
if ( c != nil ) {
fmt.Println("Got a collection object")
}
defer session.Close()
fileUrl := "http://data.cityofnewyork.us/resource/mtik-6c5q.csv?$limit="+rowCount
resp, err := http.Get(fileUrl)
if err != nil {
fmt.Println("No Data Fetched")
} else
{
fmt.Println("Data Fetcthed")
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
bodyBytes, err2 := ioutil.ReadAll(resp.Body)
if err2 != nil {
fmt.Println("hell")
}
bodyString := string(bodyBytes)
//fmt.Println(bodyString)
scanner := bufio.NewScanner(strings.NewReader(bodyString))
for scanner.Scan() {
record:= scanner.Text()
s:=strings.Split(record,",")
if strings.Replace(strings.Replace(s[0],"\"","",-1),"\\","",-1)=="base_bbl" {
continue;
}
field0,_:= strconv.ParseInt(strings.Replace(strings.Replace(s[0],"\"","",-1),"\\","",-1), 10, 64)
field1,_:= strconv.ParseInt(strings.Replace(strings.Replace(s[1],"\"","",-1),"\\","",-1), 10, 64)
field2,_:= strconv.ParseInt(strings.Replace(strings.Replace(s[2],"\"","",-1),"\\","",-1), 10, 64)
field3,_:= strconv.ParseInt(strings.Replace(strings.Replace(s[3],"\"","",-1),"\\","",-1), 10, 64)
field4,_:= strconv.ParseInt(strings.Replace(strings.Replace(s[4],"\"","",-1),"\\","",-1), 10, 64)
field5:= strings.Replace(strings.Replace(s[5],"\"","",-1),"\\","",-1)
field6,_:= strconv.ParseInt(strings.Replace(strings.Replace(s[6],"\"","",-1),"\\","",-1), 10, 64)
field7,_:= strconv.ParseFloat(strings.Replace(strings.Replace(s[7],"\"","",-1),"\\","",-1), 64)
field8:= strings.Replace(strings.Replace(s[8],"\"","",-1),"\\","",-1)
field9:= strings.Replace(strings.Replace(s[9],"\"","",-1),"\\","",-1)
field10,_:= strconv.ParseInt(strings.Replace(strings.Replace(s[10],"\"","",-1),"\\","",-1), 10, 64)
field11:= strings.Replace(strings.Replace(s[11],"\"","",-1),"\\","",-1)
field12,_:= strconv.ParseFloat(strings.Replace(strings.Replace(s[12],"\"","",-1),"\\","",-1), 64)
field13,_:= strconv.ParseFloat(strings.Replace(strings.Replace(s[13],"\"","",-1),"\\","",-1), 64)
field14:= strings.Replace(strings.Replace(s[14],"\"","",-1),"\\","",-1)
err = c.Insert(&Record_row{field0, field1, field2, field3, field4, field5,field6, float64(field7), field8,field9, field10, field11,float64(field12), float64(field13), field14})
if err != nil {
panic(err)
}
//log.Printf("%#v", record)
}
log.Printf("Data inserted into MongoDB (db: mydb, collection:record_data)")
}
}
|
package configs
import "github.com/spf13/viper"
func LoadConfig(path ...string) error {
dfPath := "configs/"
if len(path) != 0 {
dfPath = path[0]
}
viper.SetConfigName("config")
viper.AddConfigPath(dfPath)
viper.SetConfigType("yaml")
return viper.ReadInConfig()
}
|
package event
const (
// core events
CorePluginsInited = "core/plugins/inited"
CorePluginsStarted = "core/plugins/started"
CorePluginsStopped = "core/plugins/stopped"
// patterns
CorePlugins = "core/plugins/*"
)
|
package internal
import (
"chapter7/grpcjson/keyvalue"
"context"
"sync"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
//KeyValue 는 맵을 저장하고 있는 구조체이다.
type KeyValue struct {
mutex sync.RWMutex
m map[string]string
}
//NewKeyValue 함수는 맵과 컨트롤러를 초기화한다.
func NewKeyValue() *KeyValue {
return &KeyValue{
m: make(map[string]string),
}
}
//Set 함수는 키에 값을 저장한 다음 그 값을 리턴한다.
func (k *KeyValue) Set(ctx context.Context, r *keyvalue.SetKeyValueRequest) (*keyvalue.KeyValueResponse, error) {
k.mutex.Lock()
defer k.mutex.Unlock()
k.m[r.GetKey()] = r.GetValue()
return &keyvalue.KeyValueResponse{Value: r.GetValue()}, nil
}
//Get 함수는 입력받은 키에 해당하는 값을 가져오거나
//값이 없는 경우 찾을 수 없다고 응답한다.
func (k *KeyValue) Get(ctx context.Context, r *keyvalue.GetKeyValueRequest) (*keyvalue.KeyValueResponse, error) {
k.mutex.RLock()
defer k.mutex.RUnlock()
val, ok := k.m[r.GetKey()]
if !ok {
return nil, status.Errorf(codes.NotFound, "key no set")
}
return &keyvalue.KeyValueResponse{Value: val}, nil
}
|
package apiserver
import (
"context"
"errors"
"net"
"time"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/reflection"
"google.golang.org/grpc/status"
"github.com/prometheus/client_golang/prometheus"
"github.com/andywow/golang-lessons/lesson-calendar/internal/calendar/repository"
"github.com/andywow/golang-lessons/lesson-calendar/pkg/eventapi"
)
// options
type options struct {
logger *zap.Logger
storage *repository.EventRepository
}
// Option server options
type Option interface {
apply(*options)
}
// logger option
type loggerOption struct {
Log *zap.Logger
}
// storage option
type repositoryOption struct {
EventStorage *repository.EventRepository
}
type apiServerMetrics struct {
requests *prometheus.CounterVec
latency *prometheus.HistogramVec
errs *prometheus.CounterVec
}
// APIServer api server struct for grpc client interface
type APIServer struct {
logger *zap.SugaredLogger
eventStorage repository.EventRepository
metrics *apiServerMetrics
}
// CreateEvent create event
func (s APIServer) CreateEvent(ctx context.Context, in *eventapi.Event) (*eventapi.Event, error) {
s.logger.Info("create event processing")
s.metrics.requests.WithLabelValues("create").Inc()
startTime := time.Now()
defer s.addMetricLatency(startTime, "create")
if err := s.eventStorage.CheckIfTimeIsBusy(ctx, in); err != nil {
return nil, s.getStatusFromError(err, "create")
}
if err := s.eventStorage.CreateEvent(ctx, in); err != nil {
return nil, s.getStatusFromError(err, "create")
}
s.logger.Info("create event processed")
return in, nil
}
// UpdateEvent update event
func (s APIServer) UpdateEvent(ctx context.Context, in *eventapi.Event) (*eventapi.Event, error) {
s.logger.Infof("update event received for uuid: %s", in.Uuid)
s.metrics.requests.WithLabelValues("update").Inc()
startTime := time.Now()
defer s.addMetricLatency(startTime, "update")
if in.StartTime != nil {
if err := s.eventStorage.CheckIfTimeIsBusy(ctx, in); err != nil {
return nil, s.getStatusFromError(err, "update")
}
}
if err := s.eventStorage.UpdateEvent(ctx, in); err != nil {
return nil, s.getStatusFromError(err, "update")
}
s.logger.Infof("update event processed for uuid: %s", in.Uuid)
return in, nil
}
// DeleteEvent delete event
func (s APIServer) DeleteEvent(ctx context.Context, in *eventapi.EventDelete) (*eventapi.EventDeleteStatus, error) {
s.logger.Infof("delete event received for uuid: %s", in.Uuid)
s.metrics.requests.WithLabelValues("delete").Inc()
startTime := time.Now()
defer s.addMetricLatency(startTime, "delete")
if err := s.eventStorage.DeleteEvent(ctx, in.Uuid); err != nil {
return nil, s.getStatusFromError(err, "delete")
}
s.logger.Infof("delete event processed for uuid: %s", in.Uuid)
return &eventapi.EventDeleteStatus{}, nil
}
// GetEventsForDate get events for date
func (s APIServer) GetEventsForDate(ctx context.Context, in *eventapi.EventDate) (*eventapi.EventList, error) {
s.logger.Info("get event list for date received")
s.metrics.requests.WithLabelValues("get_for_date").Inc()
startTime := time.Now()
defer s.addMetricLatency(startTime, "get_for_date")
eventList, err := s.eventStorage.GetEventsForDate(ctx, *in.GetDate())
if err != nil {
return nil, s.getStatusFromError(err, "get_for_date")
}
s.logger.Info("get event list for date processed")
return &eventapi.EventList{Events: eventList}, err
}
// GetEventsForWeek get events for wwek
func (s APIServer) GetEventsForWeek(ctx context.Context, in *eventapi.EventDate) (*eventapi.EventList, error) {
s.logger.Info("get event list for week received")
s.metrics.requests.WithLabelValues("get_for_week").Inc()
startTime := time.Now()
defer s.addMetricLatency(startTime, "get_for_week")
eventList, err := s.eventStorage.GetEventsForWeek(ctx, *in.GetDate())
if err != nil {
return nil, s.getStatusFromError(err, "get_for_week")
}
s.logger.Info("get event list for week date processed")
return &eventapi.EventList{Events: eventList}, err
}
// GetEventsForMonth get events for month
func (s APIServer) GetEventsForMonth(ctx context.Context, in *eventapi.EventDate) (*eventapi.EventList, error) {
s.logger.Info("get event list for month received")
s.metrics.requests.WithLabelValues("get_for_month").Inc()
startTime := time.Now()
defer s.addMetricLatency(startTime, "get_for_month")
eventList, err := s.eventStorage.GetEventsForMonth(ctx, *in.GetDate())
if err != nil {
return nil, s.getStatusFromError(err, "get_for_month")
}
s.logger.Info("get event list for month processed")
return &eventapi.EventList{Events: eventList}, err
}
// StartServer start http server
func (s APIServer) StartServer(ctx context.Context, address string, opts ...Option) error {
options := options{
logger: zap.NewNop(),
}
for _, o := range opts {
o.apply(&options)
}
s.logger = options.logger.Sugar()
s.eventStorage = *options.storage
s.metrics = &apiServerMetrics{
requests: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "apiserver",
Name: "requests_total",
Help: "request total count",
}, []string{"method"}),
latency: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "apiserver",
Name: "latency_ms",
Help: "request count",
}, []string{"method"}),
errs: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "apiserver",
Name: "errors_total",
Help: "errors total count",
}, []string{"method", "type"}),
}
prometheus.MustRegister(s.metrics.requests)
prometheus.MustRegister(s.metrics.latency)
prometheus.MustRegister(s.metrics.errs)
listener, err := net.Listen("tcp", address)
if err != nil {
s.logger.Fatal("Failed to listen %v", err)
}
grpcServer := grpc.NewServer()
reflection.Register(grpcServer)
eventapi.RegisterApiServerServer(grpcServer, s)
go func() {
<-ctx.Done()
grpcServer.GracefulStop()
}()
return grpcServer.Serve(listener)
}
// WithLogger apply logger
func WithLogger(log *zap.Logger) Option {
return loggerOption{Log: log}
}
// WithRepository apply storage
func WithRepository(repository *repository.EventRepository) Option {
return repositoryOption{EventStorage: repository}
}
func (o loggerOption) apply(opts *options) {
opts.logger = o.Log
}
func (o repositoryOption) apply(opts *options) {
opts.storage = o.EventStorage
}
func (s APIServer) getStatusFromError(err error, method string) error {
if err != nil {
s.logger.Error(err)
if errors.Is(err, repository.ErrDateBusy) {
s.metrics.errs.WithLabelValues(method, "date_busy").Inc()
return status.Error(codes.AlreadyExists, repository.ErrDateBusy.Error())
}
if errors.Is(err, repository.ErrInvalidData) {
s.metrics.errs.WithLabelValues(method, "invalid_date").Inc()
return status.Error(codes.InvalidArgument, repository.ErrInvalidData.Error())
}
if errors.Is(err, repository.ErrStorageUnavailable) {
s.metrics.errs.WithLabelValues(method, "storage_unavailable").Inc()
return status.Error(codes.Unavailable, repository.ErrStorageUnavailable.Error())
}
s.metrics.errs.WithLabelValues(method, "internal").Inc()
return status.Error(codes.Internal, "internal error")
}
return nil
}
func (s APIServer) addMetricLatency(startTime time.Time, method string) {
s.metrics.latency.WithLabelValues(method).Observe(float64(time.Since(startTime).Milliseconds()))
}
|
/**
* Copyright (c) 2018 ZTE Corporation.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and the Apache License 2.0 which both accompany this distribution,
* and are available at http://www.eclipse.org/legal/epl-v10.html
* and http://www.apache.org/licenses/LICENSE-2.0
*
* Contributors:
* ZTE - initial Project
*/
package pilot
import (
"msb2pilot/models"
"testing"
)
func TestParseServiceToConfig(t *testing.T) {
cases := []struct {
services []*models.MsbService
publishServices map[string]*models.PublishService
want string
}{
{
services: []*models.MsbService{},
publishServices: map[string]*models.PublishService{},
want: `{
"apiVersion": "networking.istio.io/v1alpha3",
"kind": "VirtualService",
"metadata": {"name": "default-apigateway"},
"spec": {"hosts":["tService"],"http":[]}
}`,
},
{
services: []*models.MsbService{
&models.MsbService{
ConsulLabels: &models.ConsulLabels{
NameSpace: &models.NameSpace{
NameSpace: "service1namespace",
},
BaseInfo: &models.BaseInfo{
Version: "service1v1",
Url: "service1url",
},
},
ServiceName: "service1",
},
&models.MsbService{
ConsulLabels: &models.ConsulLabels{
NameSpace: &models.NameSpace{
NameSpace: "service2namespace",
},
BaseInfo: &models.BaseInfo{
Version: "service2v2",
Url: "service2url",
},
},
ServiceName: "service2",
},
},
publishServices: map[string]*models.PublishService{
"service1service1v1service1namespace": &models.PublishService{
ServiceName: "service1",
Version: "service1v1",
NameSpace: "service1namespace",
PublishUrl: "service1publishurl",
},
"service2service2v2service2namespace": &models.PublishService{
ServiceName: "service2",
Version: "service2v2",
NameSpace: "service2namespace",
PublishUrl: "service2publihurl",
},
},
want: `{
"apiVersion": "networking.istio.io/v1alpha3",
"kind": "VirtualService",
"metadata": {"name": "default-apigateway"},
"spec": {"hosts":["tService"],"http":[{
"match":{"uri": {"prefix": "service1publishurl"}},
"rewrite": {"uri": "service1url"},
"route": [{"destination": {"host": "service1"}}]
},{
"match":{"uri": {"prefix": "service2publihurl"}},
"rewrite": {"uri": "service2url"},
"route": [{"destination": {"host": "service2"}}]
}]}
}`,
},
}
for _, cas := range cases {
got := parseServiceToConfig("tService", cas.services, cas.publishServices)
if got != cas.want {
t.Errorf("parseServiceToConfig() => got %s, want %s", got, cas.want)
}
}
}
//func TestCreateHttpRoute(t *testing.T) {
// cases := []struct {
// sPath, tService, tPath, want string
// }{
// { // success demo
// sPath: "/",
// tService: "tService",
// tPath: "/",
// want: `{
//"match":{"uri": {"prefix": "/"}},
//"rewrite": {"uri": "/"},
//"route": [{"destination": {"host": "tService"}}]
//}`,
// },
// }
// for _, cas := range cases {
// got := createHttpRoute(cas.sPath, cas.tService, cas.tPath)
// if got != cas.want {
// t.Errorf("createHttpRoute(%s, %s, %s) => got %s, want %s", cas.sPath, cas.tService, cas.tPath, got, cas.want)
// }
// }
//}
|
package main
// https://www.hackerrank.com/challenges/common-child
import (
"bufio"
"fmt"
"os"
)
func main() {
scan := bufio.NewScanner(os.Stdin)
scan.Scan()
a := scan.Text()
scan.Scan()
b := scan.Text()
mem := make([][]int, len(a))
for i := range mem {
mem[i] = make([]int, len(b))
}
for i := range mem {
for j := range mem[i] {
var prev int
if i == 0 && j == 0 {
prev = 0
} else if i == 0 {
prev = mem[0][j-1]
} else if j == 0 {
prev = mem[i-1][0]
}
if a[i] != b[j] {
if i > 0 && j > 0 {
if mem[i-1][j] > mem[i][j-1] {
prev = mem[i-1][j]
} else {
prev = mem[i][j-1]
}
}
mem[i][j] = prev
} else {
if i > 0 && j > 0 {
prev = mem[i-1][j-1]
}
mem[i][j] = prev + 1
}
}
}
fmt.Println(mem[len(a)-1][len(b)-1])
}
|
package main
import (
"context"
"fmt"
"log"
"concurrency"
"golang.org/x/sync/errgroup"
)
func main() {
requests := concurrency.GenerateRequests(concurrency.Count)
DoAsync(context.TODO(), requests)
}
func DoAsync(ctx context.Context, requests [][]byte) {
// https://stackoverflow.com/questions/49879322/can-i-concurrently-write-different-slice-elements
resp := make([]string, concurrency.Count)
g, _ := errgroup.WithContext(ctx) // use `errgroup.Group` literal if you don't need to cancel context on the first error
g.SetLimit(concurrency.TotalWorkers)
for i, request := range requests {
// https://github.com/golang/go/wiki/CommonMistakes/#using-goroutines-on-loop-iterator-variables
id := i
req := request
log.Printf("sending request #%d", id)
g.Go(func() (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("recovered panic: %s", r)
}
}()
resp[id], err = Work(ctx, id, req)
if err != nil {
return err
}
return nil
})
}
err := g.Wait() // blocking
if err != nil {
fmt.Println("worker error:", err)
}
getResults(resp)
}
func Work(ctx context.Context, id int, req []byte) (string, error) {
defer log.Printf("worker #%d: stopping\n", id)
// if id == 42 {
// panic("42")
// }
err := ctx.Err()
if err != nil {
return "", err
}
return concurrency.Md5sum(req), nil
}
func getResults(res []string) {
log.Println("results:", res) // or write to DB...
}
|
package commands
import (
"github.com/c0caina/inaWarp/global"
"github.com/df-mc/dragonfly/server/cmd"
)
type WarpList struct {
List list
}
func (wl WarpList) Run(source cmd.Source, output *cmd.Output) {
warps, err := global.WarpSqlite.SelectAll()
if err != nil {
output.Errorf("[inaWarp] %v", err)
return
}
output.Print("-------- Warps --------")
for _, w := range warps {
output.Printf("%v coordinates: %v %v %v", w.Name, int(w.XYZ[0]), int(w.XYZ[1]), int(w.XYZ[2]))
}
}
type list string
func (s list) SubName() string {
return "list"
}
|
package models
import (
"github.com/astaxie/beego/orm"
"strconv"
"strings"
"time"
"tokensky_bg_admin/conf"
)
// OtcEntrustOrderQueryParam 用于查询的类
type OtcEntrustOrderQueryParam struct {
BaseQueryParam
Phone string `json:"phone"` //手机号 模糊查询
//发布时间
StartTime int64 `json:"startTime"` //开始时间
EndTime int64 `json:"endTime"` //截止时间
Status string `json:"status"` //状态
}
func (a *OtcEntrustOrder) TableName() string {
return OtcEntrustOrderTBName()
}
//委托单管理
type OtcEntrustOrder struct {
KeyId int `orm:"pk;column(key_id)"json:"keyId"form:"keyId"`
//委托类型 1买单 2卖单
EntrustType int `orm:"column(entrust_type)"json:"entrustType"form:"entrustType"`
//货币类型
CoinType string `orm:"column(coin_type)"json:"coinType"form:"coinType"`
//单价
UnitPrice float64 `orm:"column(unit_price)"json:"unitPrice"form:"unitPrice"`
//金钱单位
MoneyType string `orm:"column(money_type)"json:"moneyType"form:"moneyType"`
//数量
Quantity float64 `orm:"column(quantity)"json:"quantity"form:"quantity"`
//剩下数量
QuantityLeft float64 `orm:"column(quantity_left)"json:"quantityLeft"form:"quantityLeft"`
//最小交易额
Min float64 `orm:"column(min)"json:"min"form:"min"`
//最大交易额
Max float64 `orm:"column(max)"json:"max"form:"max"`
//买方手续费
VendeeServiceCharge float64 `orm:"column(vendee_service_charge)"json:"vendeeServiceCharge"form:"vendeeServiceCharge"`
//卖方手续费
VendorServiceCharge float64 `orm:"column(vendor_service_charge)"json:"vendorServiceCharge"form:"vendorServiceCharge"`
//支付方式 1支付宝 2微信 3银行卡
PayType string `orm:"column(pay_type)"json:"payType"form:"payType"`
//支付方式2
PayTypeList []int `orm:"-"json:"payTypeList"form:"payTypeList"`
//完成时间
FinishTime time.Time `orm:"type(datetime);column(finish_time)"json:"finishTime"form:"finishTime"`
//发布时间
PushTime time.Time `orm:"type(datetime);column(push_time)"json:"pushTime"form:"pushTime"`
//状态 1发布中 2已完成 0已取消 3系统自动取消
Status int `orm:"column(status)"json:"status"form:"status"`
//创建时间
CretaeTime time.Time `orm:"type(datetime);column(cretae_time)"json:"cretaeTime"form:"cretaeTime"`
//更新时间
UpdateTime time.Time `orm:"auto_now;type(datetime);column(update_time)"json:"updateTime"form:"updateTime"`
//自动完成时间
AutoCancelTime time.Time `orm:"type(datetime);column(auto_cancel_time)"json:"autoCancelTime"form:"autoCancelTime"`
//手机号
Phone string `orm:"-"json:"phone"form:"phone"`
NickName string `orm:"-"json:"-"form:"nickName"`
//用户Uid 连表用户 一对多关系
User *TokenskyUser `orm:"rel(fk)"json:"-"form:"-"`
}
// OtcEntrustOrderPageList 获取分页数据
func OtcEntrustOrderPageList(params *OtcEntrustOrderQueryParam) ([]*OtcEntrustOrder, int64) {
o := orm.NewOrm()
query := o.QueryTable(OtcEntrustOrderTBName())
data := make([]*OtcEntrustOrder, 0)
var total int64
//默认排序
sortorder := "key_id"
switch params.Sort {
case "key_id":
sortorder = "key_id"
}
switch params.Order {
case "":
sortorder = "-" + sortorder
case "desc":
sortorder = "-" + sortorder
default:
sortorder = sortorder
}
//状态
if params.Status != "" && params.Status != "-1" {
query = query.Filter("status__iexact", params.Status)
}
//时间段
if params.StartTime > 0 {
query = query.Filter("push_time__gte", time.Unix(params.StartTime, 0))
}
if params.EndTime > 0 {
query = query.Filter("push_time__lte", time.Unix(params.EndTime, 0))
}
//电话查询
if params.Phone != "" {
query = query.Filter("User__Phone__icontains", params.Phone)
}
total, _ = query.Count()
query.OrderBy(sortorder).Limit(params.Limit, (params.Offset-1)*params.Limit).RelatedSel().All(&data)
for _, obj := range data {
//电话
if obj.User != nil {
obj.Phone = obj.User.Phone
obj.NickName = obj.User.NickName
}
//支付方式
payList := strings.Split(obj.PayType, ",")
obj.PayTypeList = make([]int, conf.PAY_TYPE_MAX_NUM)
for _, str := range payList {
if con, err := strconv.Atoi(str); err == nil {
if con > 0 && con <= conf.PAY_TYPE_MAX_NUM {
obj.PayTypeList[con-1] = 1
}
}
}
}
return data, total
}
// OtcEntrustOrder 获取单条
func OtcEntrustOrderOneByKid(kid int) *OtcEntrustOrder {
m := OtcEntrustOrder{}
o := orm.NewOrm().QueryTable(OtcEntrustOrderTBName())
query := o.Filter("key_id__exact", kid)
err := query.One(&m)
if err != nil {
return nil
}
return &m
}
func OtcEntrustOrderOneByKid2(o orm.Ormer, kid int) *OtcEntrustOrder {
m := OtcEntrustOrder{}
query := o.QueryTable(OtcEntrustOrderTBName())
query = query.Filter("key_id__exact", kid)
err := query.One(&m)
if err != nil {
return nil
}
return &m
}
|
package request
import (
"testing"
)
var Result Request
func BenchmarkGetRequest(b *testing.B) {
b.ReportAllocs()
srvc := NewRequestService()
for i := 0; i < b.N; i++ {
Result = srvc.GetRequest()
}
}
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package policygen
// Schema is the policygen input schema.
// TODO(https://github.com/golang/go/issues/35950): Move this to its own file.
var Schema = []byte(`
title = "Policy Generator Config Schema"
additionalProperties = false
required = ["template_dir"]
properties = {
version = {
description = <<EOF
Optional constraint on the binary version required for this config.
See [syntax](https://www.terraform.io/docs/configuration/version-constraints.html).
EOF
type = "string"
}
template_dir = {
description = <<EOF
Absolute or relative path to the template directory. If relative, this path
is relative to the directory where the config file lives.
EOF
type = "string"
}
forseti_policies = {
description = "Key value pairs configure Forseti Policy Library constraints."
type = "object"
additionalProperties = false
required = ["targets"]
properties = {
targets = {
description = <<EOF
List of targets to apply the policies, e.g. organizations/**,
organizations/123/folders/456.
EOF
type = "array"
items = {
type = "string"
}
}
allowed_policy_member_domains = {
description = "The list of domains to allow users from, e.g. example.com"
type = "array"
items = {
type = "string"
}
}
}
}
}
`)
|
package dkim
import (
"context"
"crypto"
"errors"
"io"
"net/mail"
"path/filepath"
"runtime/trace"
"strings"
"time"
"github.com/emersion/go-message/textproto"
"github.com/emersion/go-msgauth/dkim"
"github.com/foxcpp/maddy/internal/address"
"github.com/foxcpp/maddy/internal/buffer"
"github.com/foxcpp/maddy/internal/config"
"github.com/foxcpp/maddy/internal/dns"
"github.com/foxcpp/maddy/internal/exterrors"
"github.com/foxcpp/maddy/internal/log"
"github.com/foxcpp/maddy/internal/module"
"github.com/foxcpp/maddy/internal/target"
"golang.org/x/net/idna"
"golang.org/x/text/unicode/norm"
)
const Day = 86400 * time.Second
var (
oversignDefault = []string{
// Directly visible to the user.
"Subject",
"Sender",
"To",
"Cc",
"From",
"Date",
// Affects body processing.
"MIME-Version",
"Content-Type",
"Content-Transfer-Encoding",
// Affects user interaction.
"Reply-To",
"In-Reply-To",
"Message-Id",
"References",
// Provide additional security benefit for OpenPGP.
"Autocrypt",
"Openpgp",
}
signDefault = []string{
// Mailing list information. Not oversigned to prevent signature
// breakage by aliasing MLMs.
"List-Id",
"List-Help",
"List-Unsubscribe",
"List-Post",
"List-Owner",
"List-Archive",
// Not oversigned since it can be prepended by intermediate relays.
"Resent-To",
"Resent-Sender",
"Resent-Message-Id",
"Resent-Date",
"Resent-From",
"Resent-Cc",
}
hashFuncs = map[string]crypto.Hash{
"sha256": crypto.SHA256,
}
)
type Modifier struct {
instName string
domain string
selector string
signer crypto.Signer
oversignHeader []string
signHeader []string
headerCanon dkim.Canonicalization
bodyCanon dkim.Canonicalization
sigExpiry time.Duration
hash crypto.Hash
senderMatch map[string]struct{}
multipleFromOk bool
log log.Logger
}
func New(_, instName string, _, inlineArgs []string) (module.Module, error) {
m := &Modifier{
instName: instName,
log: log.Logger{Name: "sign_dkim"},
}
switch len(inlineArgs) {
case 2:
m.domain = inlineArgs[0]
m.selector = inlineArgs[1]
case 0:
// whatever
case 1:
fallthrough
default:
return nil, errors.New("sign_dkim: wrong amount of inline arguments")
}
return m, nil
}
func (m *Modifier) Name() string {
return "sign_dkim"
}
func (m *Modifier) InstanceName() string {
return m.instName
}
func (m *Modifier) Init(cfg *config.Map) error {
var (
hashName string
keyPathTemplate string
newKeyAlgo string
senderMatch []string
)
cfg.Bool("debug", true, false, &m.log.Debug)
cfg.String("domain", false, false, m.domain, &m.domain)
cfg.String("selector", false, false, m.selector, &m.selector)
cfg.String("key_path", false, false, "dkim_keys/{domain}_{selector}.key", &keyPathTemplate)
cfg.StringList("oversign_fields", false, false, oversignDefault, &m.oversignHeader)
cfg.StringList("sign_fields", false, false, signDefault, &m.signHeader)
cfg.Enum("header_canon", false, false,
[]string{string(dkim.CanonicalizationRelaxed), string(dkim.CanonicalizationSimple)},
dkim.CanonicalizationRelaxed, (*string)(&m.headerCanon))
cfg.Enum("body_canon", false, false,
[]string{string(dkim.CanonicalizationRelaxed), string(dkim.CanonicalizationSimple)},
dkim.CanonicalizationRelaxed, (*string)(&m.bodyCanon))
cfg.Duration("sig_expiry", false, false, 5*Day, &m.sigExpiry)
cfg.Enum("hash", false, false,
[]string{"sha256"}, "sha256", &hashName)
cfg.Enum("newkey_algo", false, false,
[]string{"rsa4096", "rsa2048", "ed25519"}, "rsa2048", &newKeyAlgo)
cfg.EnumList("require_sender_match", false, false,
[]string{"envelope", "auth_domain", "auth_user", "off"}, []string{"envelope", "auth"}, &senderMatch)
cfg.Bool("allow_multiple_from", false, false, &m.multipleFromOk)
if _, err := cfg.Process(); err != nil {
return err
}
if m.domain == "" {
return errors.New("sign_domain: domain is not specified")
}
if m.selector == "" {
return errors.New("sign_domain: selector is not specified")
}
m.senderMatch = make(map[string]struct{}, len(senderMatch))
for _, method := range senderMatch {
m.senderMatch[method] = struct{}{}
}
if _, off := m.senderMatch["off"]; off && len(senderMatch) != 1 {
return errors.New("sign_domain: require_sender_match: 'off' should not be combined with other methods")
}
m.hash = hashFuncs[hashName]
if m.hash == 0 {
panic("sign_dkim.Init: Hash function allowed by config matcher but not present in hashFuncs")
}
keyValues := strings.NewReplacer("{domain}", m.domain, "{selector}", m.selector)
keyPath := keyValues.Replace(keyPathTemplate)
signer, newKey, err := m.loadOrGenerateKey(keyPath, newKeyAlgo)
if err != nil {
return err
}
if newKey {
dnsPath := keyPath + ".dns"
if filepath.Ext(keyPath) == ".key" {
dnsPath = keyPath[:len(keyPath)-4] + ".dns"
}
m.log.Printf("generated a new %s keypair, private key is in %s, TXT record with public key is in %s,\n"+
"put its contents into TXT record for %s._domainkey.%s to make signing and verification work",
newKeyAlgo, keyPath, dnsPath, m.selector, m.domain)
}
m.signer = signer
return nil
}
func (m *Modifier) fieldsToSign(h *textproto.Header) []string {
// Filter out duplicated fields from configs so they
// will not cause panic() in go-msgauth internals.
seen := make(map[string]struct{})
res := make([]string, 0, len(m.oversignHeader)+len(m.signHeader))
for _, key := range m.oversignHeader {
if _, ok := seen[strings.ToLower(key)]; ok {
continue
}
seen[strings.ToLower(key)] = struct{}{}
// Add to signing list once per each key use.
for field := h.FieldsByKey(key); field.Next(); {
res = append(res, key)
}
// And once more to "oversign" it.
res = append(res, key)
}
for _, key := range m.signHeader {
if _, ok := seen[strings.ToLower(key)]; ok {
continue
}
seen[strings.ToLower(key)] = struct{}{}
// Add to signing list once per each key use.
for field := h.FieldsByKey(key); field.Next(); {
res = append(res, key)
}
}
return res
}
type state struct {
m *Modifier
meta *module.MsgMetadata
log log.Logger
}
func (m *Modifier) ModStateForMsg(ctx context.Context, msgMeta *module.MsgMetadata) (module.ModifierState, error) {
return state{
m: m,
meta: msgMeta,
log: target.DeliveryLogger(m.log, msgMeta),
}, nil
}
func (m *Modifier) shouldSign(eai bool, msgId string, h *textproto.Header, mailFrom string, authName string) (string, bool) {
if _, off := m.senderMatch["off"]; off {
if !eai {
aDomain, err := idna.ToASCII(m.domain)
if err != nil {
m.log.Msg("not signing, cannot convert key domain domain into A-labels",
"from_addr", m.domain, "msg_id", msgId)
return "", false
}
return "@" + aDomain, true
}
return "@" + m.domain, true
}
fromVal := h.Get("From")
if fromVal == "" {
m.log.Msg("not signing, empty From", "msg_id", msgId)
return "", false
}
fromAddrs, err := mail.ParseAddressList(fromVal)
if err != nil {
m.log.Msg("not signing, malformed From field", "err", err, "msg_id", msgId)
return "", false
}
if len(fromAddrs) != 1 && !m.multipleFromOk {
m.log.Msg("not signing, multiple addresses in From", "msg_id", msgId)
return "", false
}
fromAddr := fromAddrs[0].Address
fromUser, fromDomain, err := address.Split(fromAddr)
if err != nil {
m.log.Msg("not signing, malformed address in From",
"err", err, "from_addr", fromAddr, "msg_id", msgId)
return "", false
}
if !dns.Equal(fromDomain, m.domain) {
m.log.Msg("not signing, From domain is not key domain",
"from_domain", fromDomain, "key_domain", m.domain, "msg_id", msgId)
return "", false
}
if _, do := m.senderMatch["envelope"]; do && !address.Equal(fromAddr, mailFrom) {
m.log.Msg("not signing, From address is not envelope address",
"from_addr", fromAddr, "envelope", mailFrom, "msg_id", msgId)
return "", false
}
if _, do := m.senderMatch["auth"]; do {
compareWith := norm.NFC.String(fromUser)
authName := norm.NFC.String(authName)
if strings.Contains(authName, "@") {
compareWith, _ = address.ForLookup(fromAddr)
}
if !strings.EqualFold(compareWith, authName) {
m.log.Msg("not signing, From address is not authenticated identity",
"from_addr", fromAddr, "auth_id", authName, "msg_id", msgId)
return "", false
}
}
// Don't include non-ASCII in the identifier if message is
// non-EAI.
if !eai {
aDomain, err := idna.ToASCII(fromDomain)
if err != nil {
m.log.Msg("not signing, cannot convert From domain into A-labels",
"from_addr", fromAddr, "msg_id", msgId)
return "", false
}
if !address.IsASCII(fromUser) {
return "@" + aDomain, true
}
return fromUser + "@" + aDomain, true
}
return fromAddr, true
}
func (s state) RewriteSender(ctx context.Context, mailFrom string) (string, error) {
return mailFrom, nil
}
func (s state) RewriteRcpt(ctx context.Context, rcptTo string) (string, error) {
return rcptTo, nil
}
func (s state) RewriteBody(ctx context.Context, h *textproto.Header, body buffer.Buffer) error {
defer trace.StartRegion(ctx, "sign_dkim/RewriteBody").End()
var authUser string
if s.meta.Conn != nil {
authUser = s.meta.Conn.AuthUser
}
id, ok := s.m.shouldSign(s.meta.SMTPOpts.UTF8, s.meta.ID, h, s.meta.OriginalFrom, authUser)
if !ok {
return nil
}
domain := s.m.domain
selector := s.m.selector
// If the message is non-EAI, we are not alloed to use domains in U-labels,
// attempt to convert.
if !s.meta.SMTPOpts.UTF8 {
var err error
domain, err = idna.ToASCII(s.m.domain)
if err != nil {
return exterrors.WithFields(err, map[string]interface{}{"modifier": "sign_dkim"})
}
selector, err = idna.ToASCII(s.m.selector)
if err != nil {
return exterrors.WithFields(err, map[string]interface{}{"modifier": "sign_dkim"})
}
}
opts := dkim.SignOptions{
Domain: domain,
Selector: selector,
Identifier: id,
Signer: s.m.signer,
Hash: s.m.hash,
HeaderCanonicalization: s.m.headerCanon,
BodyCanonicalization: s.m.bodyCanon,
HeaderKeys: s.m.fieldsToSign(h),
}
if s.m.sigExpiry != 0 {
opts.Expiration = time.Now().Add(s.m.sigExpiry)
}
signer, err := dkim.NewSigner(&opts)
if err != nil {
return exterrors.WithFields(err, map[string]interface{}{"modifier": "sign_dkim"})
}
if err := textproto.WriteHeader(signer, *h); err != nil {
signer.Close()
return exterrors.WithFields(err, map[string]interface{}{"modifier": "sign_dkim"})
}
r, err := body.Open()
if err != nil {
signer.Close()
return exterrors.WithFields(err, map[string]interface{}{"modifier": "sign_dkim"})
}
if _, err := io.Copy(signer, r); err != nil {
signer.Close()
return exterrors.WithFields(err, map[string]interface{}{"modifier": "sign_dkim"})
}
if err := signer.Close(); err != nil {
return exterrors.WithFields(err, map[string]interface{}{"modifier": "sign_dkim"})
}
h.AddRaw([]byte(signer.Signature()))
s.m.log.DebugMsg("signed", "identifier", id)
return nil
}
func (s state) Close() error {
return nil
}
func init() {
module.Register("sign_dkim", New)
}
|
package 二维数组
import (
"github.com/Lxy417165709/LeetCode-Golang/新刷题/matrix_util"
)
func findNumberIn2DArray(matrix [][]int, target int) bool {
height, width := matrix_util.GetHeightAndWidth(matrix)
column, row := width-1, 0
for column >= 0 && row <= height-1 {
reference := matrix[row][column]
if reference == target {
return true
}
if reference > target {
column--
}
if reference < target {
row++
}
}
return false
}
|
package main
import (
"fmt"
"log"
"net/http"
"path"
"html/template"
)
func main() {
fmt.Println("Starting application") //Printer melding om at applikasjonen starter
http.HandleFunc("/", Handler) //Setter path og bruker Handler
http.ListenAndServe(":8080", nil) //Setter port, og handler
}
func errorCheck(err error) { //Funksjon for feilhåndtering
if err != nil {
log.Fatal(err)
}
}
func Handler(w http.ResponseWriter, r *http.Request) { //Handler som parser og eksekverer html fil
path := path.Join("template", "index.html")
tmpl, _ := template.ParseFiles(path)
tmpl.Execute(w, "")
}
|
// Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package krusty_test
/*
import (
"testing"
kusttest_test "sigs.k8s.io/kustomize/api/testutils/kusttest"
)
var expected string = `
apiVersion: v1
data:
rcon-password: Q0hBTkdFTUUh
kind: Secret
metadata:
labels:
app: test-minecraft
chart: minecraft-1.2.0
heritage: Helm
release: test
name: test-minecraft
type: Opaque
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
annotations:
volume.alpha.kubernetes.io/storage-class: default
labels:
app: test-minecraft
chart: minecraft-1.2.0
heritage: Helm
release: test
name: test-minecraft-datadir
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
labels:
app: test-minecraft
chart: minecraft-1.2.0
heritage: Helm
release: test
name: test-minecraft
spec:
ports:
- name: minecraft
port: 25565
protocol: TCP
targetPort: minecraft
selector:
app: test-minecraft
type: LoadBalancer
`
func TestHelmChartInflationGenerator(t *testing.T) {
th := kusttest_test.MakeHarness(t)
th.WriteK("/app", `
helmChartInflationGenerator:
- chartName: minecraft
chartRepoUrl: https://kubernetes-charts.storage.googleapis.com
chartVersion: v1.2.0
releaseName: test
releaseNamespace: testNamespace
`)
m := th.Run("/app", th.MakeDefaultOptions())
th.AssertActualEqualsExpected(m, expected)
}
func TestHelmChartInflationGeneratorAsPlugin(t *testing.T) {
th := kusttest_test.MakeHarness(t)
th.WriteK("/app", `
generators:
- helm.yaml
`)
th.WriteF("/app/helm.yaml", `
apiVersion: builtin
kind: HelmChartInflationGenerator
metadata:
name: myMap
chartName: minecraft
chartRepoUrl: https://kubernetes-charts.storage.googleapis.com
chartVersion: v1.2.0
releaseName: test
releaseNamespace: testNamespace
`)
m := th.Run("/app", th.MakeDefaultOptions())
th.AssertActualEqualsExpected(m, expected)
}
*/
|
package key
import (
"io/ioutil"
"os"
"testing"
"time"
kyber "github.com/drand/kyber"
"github.com/drand/kyber/util/random"
"github.com/stretchr/testify/require"
)
func TestGroupSaveLoad(t *testing.T) {
n := 3
ids := make([]*Identity, n)
dpub := make([]kyber.Point, n)
for i := 0; i < n; i++ {
ids[i] = &Identity{
Key: KeyGroup.Point().Mul(KeyGroup.Scalar().Pick(random.New()), nil),
Addr: "--",
}
dpub[i] = ids[i].Key
}
group := LoadGroup(ids, &DistPublic{dpub}, DefaultThreshold(n))
group.Period = time.Second * 4
gtoml := group.TOML().(*GroupTOML)
require.NotNil(t, gtoml.PublicKey)
// faking distributed public key coefficients
groupFile, err := ioutil.TempFile("", "group.toml")
require.NoError(t, err)
groupPath := groupFile.Name()
groupFile.Close()
defer os.RemoveAll(groupPath)
require.NoError(t, Save(groupPath, group, false))
loaded := &Group{}
require.NoError(t, Load(groupPath, loaded))
require.Equal(t, len(loaded.Nodes), len(group.Nodes))
require.Equal(t, loaded.Threshold, group.Threshold)
require.True(t, loaded.PublicKey.Equal(group.PublicKey))
require.Equal(t, loaded.Period, group.Period)
}
|
/*
Sometimes when writing brainfuck code, you feel the need to make it longer than needed to encourage debugging. You could do it by just plopping a >< in there, but what fun is that? You'll need something longer and less NOPey to confuse anybody reading your code.
Quick introduction to Brainfuck
Brainfuck is an esoteric programming language created in 1993 by Urban Müller, and notable for its extreme minimalism. (Wikipedia)
Brainfuck is a language based on eight commands: +-><,.[]. The code is run on something like a Turing machine: an infinite tape on which values can be changed. In this challenge, we'll focus on the first four:
+ increment the value at the pointer
- decrement the value at the pointer
> move the pointer right
< move the pointer left
Brainfuck NOPs
A brainfuck NOP is a sequence of brainfuck characters that, when executed from any state, leads to no change in the state. They consist of the four characters mentioned above.
The Challenge
The challenge is to write a program or function that, when executed, generates a random brainfuck NOP of the given length.
Input
You will receive as input a nonnegative even integer n. (NOPs are impossible for odd n.)
Output
You will output a random brainfuck NOP of the length n.
Rules
The definition of NOP: when the output of the program is inserted at any point in a brainfuck program, the behavior of said program must not change in any way. In other words, it must not change the state of the interpreter.
Note that for example +>-< is incorrect, since it changes the values of the two cells without changing them back. Please test your solution for these before posting.
Also note that +>-<->+< is a NOP that can't be reduced to nothing just by removing >< <> +- -+. Thus, you can't use an algorithm that just inserts these inside each other.
Every valid NOP of the length n must have a nonzero chance of appearing in the output. The distribution does not have to be uniform, though.
The brainfuck interpreter in question has a doubly infinite tape of arbitrary precision cells. That is, you can go infinitely to the both directions, and increment/decrement each cell indefinitely.
The program must finish within 1 minute for n = 100 on my machine, so no generating all possible NOPs and picking one.
If given invalid input (non-integer, negative, odd, etc.) you may do anything you'd like, including crash.
Scoring
This is code-golf, so the shortest answer in bytes wins.
Examples
Here are all valid outputs for n = 4:
++-- +-+- +--+ --++ -+-+ -++-
>><< ><>< ><<> <<>> <><> <>><
><+- ><-+ <>+- <>-+
>+-< >-+< <+-> <-+>
+><- -><+ +<>- -<>+
+->< -+>< +-<> -+<>
Here are a few possible outputs for n = 20:
+>>->+<->-<<<->>++<<
>+>-<+<->+-<>->+<-<+
+--+-++--++-+--+-++-
>>>>>>>>>+-<<<<<<<<<
*/
package main
import (
"bytes"
"fmt"
"math/rand"
"time"
)
func main() {
rand.Seed(time.Now().UnixNano())
fmt.Println(gen(100))
}
func gen(n int) string {
if n&1 != 0 {
return ""
}
w := new(bytes.Buffer)
for i := 0; i < n; i += 2 {
switch rand.Intn(2) {
case 0:
w.WriteString("+-")
case 1:
w.WriteString("><")
}
}
return w.String()
}
|
/* Copyright (c) 2016 Jason Ish
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package api
import (
"encoding/json"
"github.com/jasonish/evebox/appcontext"
"github.com/jasonish/evebox/core"
"github.com/jasonish/evebox/server/auth"
"github.com/jasonish/evebox/server/router"
"github.com/jasonish/evebox/server/sessions"
"github.com/pkg/errors"
"net/http"
)
type ApiError struct {
Status int `json:"status"`
Message string `json:"message"`
}
func (e ApiError) Error() string {
return e.Message
}
type httpErrorResponse struct {
error
status int
}
func (r *httpErrorResponse) MarshalJSON() ([]byte, error) {
return json.Marshal(map[string]interface{}{
"status": r.status,
"error": map[string]interface{}{
"message": r.Error(),
},
})
}
func httpNotFoundResponse(message string) *httpErrorResponse {
return &httpErrorResponse{
error: errors.New(message),
status: http.StatusNotFound,
}
}
func newHttpErrorResponse(statusCode int, err error) *httpErrorResponse {
return &httpErrorResponse{
error: err,
status: statusCode,
}
}
type apiHandlerFunc func(w *ResponseWriter, r *http.Request) error
func apiFuncWrapper(handler apiHandlerFunc) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
err := handler(NewResponseWriter(w), r)
if err == nil {
return
}
w.Header().Set("content-type", "application/json")
encoder := json.NewEncoder(w)
status := http.StatusInternalServerError
switch err.(type) {
case *core.EventNotFoundError:
status = http.StatusNotFound
}
switch err := err.(type) {
case ApiError:
w.WriteHeader(err.Status)
encoder.Encode(err)
case *httpErrorResponse:
w.WriteHeader(err.status)
encoder.Encode(err)
default:
w.WriteHeader(http.StatusInternalServerError)
encoder.Encode(&httpErrorResponse{
error: err,
status: status,
})
}
})
}
// apiRouter wraps the provided router with some helper functions for
// registering API handlers of type apiHandlerFunc.
type apiRouter struct {
router *router.Router
}
func (r *apiRouter) GET(path string, handler apiHandlerFunc) {
r.router.GET(path, apiFuncWrapper(handler))
}
func (r *apiRouter) POST(path string, handler apiHandlerFunc) {
r.router.POST(path, apiFuncWrapper(handler))
}
func (r *apiRouter) OPTIONS(path string, handler apiHandlerFunc) {
r.router.OPTIONS(path, apiFuncWrapper(handler))
}
type ApiContext struct {
appContext *appcontext.AppContext
sessionStore *sessions.SessionStore
authenticator auth.Authenticator
}
func NewApiContext(appContext *appcontext.AppContext,
sessionStore *sessions.SessionStore, authenticator auth.Authenticator) *ApiContext {
return &ApiContext{
appContext: appContext,
sessionStore: sessionStore,
authenticator: authenticator,
}
}
func (c *ApiContext) InitRoutes(router *router.Router) {
r := apiRouter{router}
r.POST("/login", c.LoginHandler)
r.OPTIONS("/login", c.LoginOptions)
r.GET("/logout", c.LogoutHandler)
r.GET("/alerts", c.AlertsHandler)
r.POST("/alert-group/archive", c.AlertGroupArchiveHandler)
r.POST("/alert-group/star", c.EscalateAlertGroupHandler)
r.POST("/alert-group/unstar", c.DeEscalateAlertGroupHandler)
r.POST("/alert-group/comment", c.CommentOnAlertGroupHandler)
r.GET("/version", c.VersionHandler)
r.POST("/submit", c.SubmitHandler)
r.POST("/eve2pcap", c.Eve2PcapHandler)
r.POST("/query", c.QueryHandler)
r.GET("/config", c.ConfigHandler)
r.POST("/event/{id}/archive", c.ArchiveEventHandler)
r.POST("/event/{id}/escalate", c.EscalateEventHandler)
r.POST("/event/{id}/de-escalate", c.DeEscalateEventHandler)
r.POST("/event/{id}/comment", c.CommentOnEventHandler)
r.GET("/event/{id}", c.GetEventByIdHandler)
r.GET("/event-query", c.EventQueryHandler)
r.GET("/report/dns/requests/rrnames", c.ReportDnsRequestRrnames)
r.POST("/report/dns/requests/rrnames", c.ReportDnsRequestRrnames)
r.GET("/netflow", c.NetflowHandler)
r.GET("/report/agg", c.ReportAggs)
r.GET("/report/histogram", c.ReportHistogram)
r.POST("/find-flow", c.FindFlowHandler)
r.GET("/flow/histogram", c.FlowHistogram)
}
|
package handlers
import (
"encoding/json"
"io/ioutil"
"log"
"net/http"
"github.com/danielhood/quest.server.api/entities"
"github.com/danielhood/quest.server.api/repositories"
"github.com/danielhood/quest.server.api/services"
)
// Quest holds QuestService structure
type Quest struct {
svc services.QuestService
}
// NewQuest creates new instance of QuestService
func NewQuest(ur repositories.QuestRepo) *Quest {
return &Quest{services.NewQuestService(ur)}
}
func (h *Quest) ServeHTTP(w http.ResponseWriter, req *http.Request) {
h.enableCors(&w)
switch req.Method {
case "OPTIONS":
log.Print("/token:OPTIONS")
if req.Header.Get("Access-Control-Request-Method") != "" {
w.Header().Set("Allow", req.Header.Get("Access-Control-Request-Method"))
w.Header().Set("Access-Control-Allow-Methods", req.Header.Get("Access-Control-Request-Method"))
}
w.Header().Set("Access-Control-Allow-Headers", "authorization,access-control-allow-origin,content-type")
case "GET":
// Quest GET requires device or user level access
if req.Header.Get("QUEST_AUTH_TYPE") != "device" && req.Header.Get("QUEST_AUTH_TYPE") != "user" {
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return
}
log.Print("/quest:GET")
log.Print("GET params were:", req.URL.Query())
questCode := req.URL.Query().Get("key")
if len(questCode) == 0 {
quests, _ := h.svc.ReadAll()
questsBytes, _ := json.Marshal(quests)
w.Write(questsBytes)
} else {
quest, err := h.svc.Read(questCode)
if err != nil {
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}
questBytes, _ := json.Marshal(quest)
w.Write(questBytes)
}
case "POST":
// Quest POST requires user level access
if req.Header.Get("QUEST_AUTH_TYPE") != "user" {
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return
}
log.Print("/quest:POST")
var quest = h.parsePutRequest(w, req)
if quest == nil {
return
}
_ = h.svc.Create(quest)
questBytes, _ := json.Marshal(quest)
w.Write(questBytes)
case "PUT":
// Quest POST requires user level access
if req.Header.Get("QUEST_AUTH_TYPE") != "user" {
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return
}
log.Print("/quest:PUT")
var quest = h.parsePutRequest(w, req)
if quest == nil {
return
}
_ = h.svc.Update(quest)
questBytes, _ := json.Marshal(quest)
w.Write(questBytes)
case "DELETE":
// Quest DELETE requires user level access
if req.Header.Get("QUEST_AUTH_TYPE") != "user" {
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return
}
log.Print("/quest:DELETE")
var quest = h.parsePutRequest(w, req)
if quest == nil {
w.Write(nil)
return
}
_ = h.svc.Delete(quest)
w.Write(nil)
default:
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
}
}
func (h *Quest) parsePutRequest(w http.ResponseWriter, req *http.Request) *entities.Quest {
requestBody, err := ioutil.ReadAll(req.Body)
defer req.Body.Close()
if err != nil {
http.Error(w, "Unable to parse request body", http.StatusInternalServerError)
return nil
}
if len(requestBody) == 0 {
http.Error(w, "Empty Quest passed", http.StatusInternalServerError)
return nil
}
var quest entities.Quest
if err = json.Unmarshal(requestBody, &quest); err != nil {
http.Error(w, "Unable to parse Quest json", http.StatusInternalServerError)
return nil
}
if len(quest.Key) == 0 {
http.Error(w, "key not specified", http.StatusInternalServerError)
return nil
}
return &quest
}
func (h *Quest) enableCors(w *http.ResponseWriter) {
(*w).Header().Set("Access-Control-Allow-Origin", "*")
}
|
package main
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"local/notorious/logging"
"local/notorious/opts"
"os"
"strings"
)
func main() {
o, err := opts.Parse()
log := logging.Error(os.Stderr)
if err != nil {
log.Fatalf("parsing command-line options: %v. Try notorious --help for more information on command-line flags.", err)
}
err = Notorious(o, os.Stdin, os.Stdout, os.Stderr)
if err != nil {
log.Fatalf("running notorious: %v", err)
}
os.Exit(0)
}
// when main() is called, stdin, stdout, and stderr are what you'd expect them to be
func Notorious(o opts.Opts, stdin io.Reader, stdout, stderr io.Writer) error {
// this will choke on large files and might waste a lot of memory.
// ideally, we'd use a buffer of some kind and only read in enough to handle
// our lines of context before and after
b, err := ioutil.ReadAll(stdin)
if err != nil {
return err
}
logger := logging.Debug(stderr, o.Verbose)
lines := strings.Split(string(b), "\n")
toWrite := make([]bool, len(lines))
for i, line := range lines {
if o.Matches(line) {
logger.Printf("match: line %d: %s", i, line)
start := findMax(0, i-o.Context.Before)
end := findMin(len(lines), i+o.Context.After+1)
for i := start; i < end; i++ {
toWrite[i] = true
}
}
}
// go doesn't buffer stdin, out, or err by default.
// we're about to do a bunch of repeated writes very quickly, so it's quicker to buffer them.
out := bufio.NewWriter(stdout)
if o.LineNumbers {
for i, line := range lines {
if toWrite[i] {
fmt.Fprintf(out, "%d\t%s\n", i, line)
}
}
} else {
for i, line := range lines {
if toWrite[i] {
fmt.Fprintln(out, line)
}
}
}
// call Flush() when we're done, or some of the writes might never make it to
// the underlying io.Writer (in most cases, the OS)
return out.Flush()
}
func findMax(a, b int) int {
if a > b {
return a
}
return b
}
func findMin(a, b int) int {
if a > b {
return b
}
return a
}
|
package orm
import (
"testing"
"time"
"github.com/iGoogle-ink/gotil/xlog"
"github.com/iGoogle-ink/gotil/xtime"
)
var (
dsn = "root:root@tcp(mysql:3306)/school?parseTime=true&loc=Local&charset=utf8mb4"
)
type Student struct {
Id int `gorm:"column:id;primaryKey" xorm:"'id' pk"`
Name string `gorm:"column:name" xorm:"'name'"`
}
func (m *Student) TableName() string {
return "student"
}
func TestInitGormV2(t *testing.T) {
// 初始化 Gorm
gc1 := &MySQLConfig{
DSN: dsn,
MaxOpenConn: 10,
MaxIdleConn: 10,
MaxConnTimeout: xtime.Duration(10 * time.Second),
}
g := InitGormV2(gc1)
u := &Student{
Name: "jerry",
}
// create
err := g.Create(u).Error
if err != nil {
xlog.Error(err)
return
}
var uQs []*Student
// query
err = g.Table(u.TableName()).Where("uname = ?", "jerry").Find(&uQs).Error
if err != nil {
xlog.Error(err)
return
}
for _, v := range uQs {
xlog.Debug(v)
}
}
|
package operatorstatus
import (
"context"
"fmt"
"os"
"reflect"
"time"
configv1 "github.com/openshift/api/config/v1"
configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
olmv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient"
olmversion "github.com/operator-framework/operator-lifecycle-manager/pkg/version"
)
const (
clusterOperatorOLM = "operator-lifecycle-manager"
clusterOperatorCatalogSource = "operator-lifecycle-manager-catalog"
openshiftNamespace = "openshift-operator-lifecycle-manager"
clusterServiceVersionResource = "clusterserviceversions"
subscriptionResource = "subscriptions"
installPlanResource = "installplans"
)
func MonitorClusterStatus(name string, syncCh <-chan error, stopCh <-chan struct{}, opClient operatorclient.ClientInterface,
configClient configv1client.ConfigV1Interface, crClient versioned.Interface, log *logrus.Logger) {
var (
syncs int
successfulSyncs int
hasClusterOperator bool
)
go wait.Until(func() {
// slow poll until we see a cluster operator API, which could be never
if !hasClusterOperator {
opStatusGV := schema.GroupVersion{
Group: "config.openshift.io",
Version: "v1",
}
err := discovery.ServerSupportsVersion(opClient.KubernetesInterface().Discovery(), opStatusGV)
if err != nil {
log.Infof("ClusterOperator api not present, skipping update (%v)", err)
time.Sleep(time.Minute)
return
}
hasClusterOperator = true
}
// Sample the sync channel and see whether we're successfully retiring syncs as a
// proxy for "working" (we can't know when we hit level, but we can at least verify
// we are seeing some syncs succeeding). Once we observe at least one successful
// sync we can begin reporting available and level.
select {
case err, ok := <-syncCh:
if !ok {
// syncCh should only close if the Run() loop exits
time.Sleep(5 * time.Second)
log.Fatalf("Status sync channel closed but process did not exit in time")
}
syncs++
if err == nil {
successfulSyncs++
}
// grab any other sync events that have accumulated
for len(syncCh) > 0 {
if err := <-syncCh; err == nil {
successfulSyncs++
}
syncs++
}
// if we haven't yet accumulated enough syncs, wait longer
// TODO: replace these magic numbers with a better measure of syncs across all queueInformers
if successfulSyncs < 5 || syncs < 10 {
log.Printf("Waiting to observe more successful syncs")
return
}
}
// create the cluster operator in an initial state if it does not exist
existing, err := configClient.ClusterOperators().Get(context.TODO(), name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
log.Info("Existing operator status not found, creating")
created, createErr := configClient.ClusterOperators().Create(context.TODO(), &configv1.ClusterOperator{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Status: configv1.ClusterOperatorStatus{
Conditions: []configv1.ClusterOperatorStatusCondition{
{
Type: configv1.OperatorProgressing,
Status: configv1.ConditionTrue,
Message: fmt.Sprintf("Installing %s", olmversion.OLMVersion),
LastTransitionTime: metav1.Now(),
},
{
Type: configv1.OperatorDegraded,
Status: configv1.ConditionFalse,
LastTransitionTime: metav1.Now(),
},
{
Type: configv1.OperatorAvailable,
Status: configv1.ConditionFalse,
LastTransitionTime: metav1.Now(),
},
{
Type: configv1.OperatorUpgradeable,
Status: configv1.ConditionFalse,
LastTransitionTime: metav1.Now(),
},
},
},
}, metav1.CreateOptions{})
if createErr != nil {
log.Errorf("Failed to create cluster operator: %v\n", createErr)
return
}
created.Status.RelatedObjects, err = relatedObjects(name, opClient, crClient, log)
if err != nil {
log.Errorf("Failed to get related objects: %v", err)
}
existing = created
err = nil
}
if err != nil {
log.Errorf("Unable to retrieve cluster operator: %v", err)
return
}
// update the status with the appropriate state
previousStatus := existing.Status.DeepCopy()
previousOperatorVersion := func(vs []configv1.OperandVersion) string {
for _, v := range vs {
if v.Name == "operator" {
return v.Version
}
}
return ""
}(previousStatus.Versions)
targetOperatorVersion := os.Getenv("RELEASE_VERSION")
switch {
case successfulSyncs > 0:
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorDegraded,
Status: configv1.ConditionFalse,
})
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorProgressing,
Status: configv1.ConditionFalse,
Message: fmt.Sprintf("Deployed %s", olmversion.OLMVersion),
})
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorAvailable,
Status: configv1.ConditionTrue,
})
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorUpgradeable,
Status: configv1.ConditionTrue,
})
// we set the versions array when all the latest code is deployed and running - in this case,
// the sync method is responsible for guaranteeing that happens before it returns nil
if len(targetOperatorVersion) > 0 {
existing.Status.Versions = []configv1.OperandVersion{
{
Name: "operator",
Version: targetOperatorVersion,
},
{
Name: "operator-lifecycle-manager",
Version: olmversion.OLMVersion,
},
}
if targetOperatorVersion != previousOperatorVersion {
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorProgressing,
Status: configv1.ConditionTrue,
Message: fmt.Sprintf("Deployed %s", olmversion.OLMVersion),
})
}
} else {
existing.Status.Versions = nil
}
default:
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorDegraded,
Status: configv1.ConditionTrue,
Message: "Waiting for updates to take effect",
})
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorProgressing,
Status: configv1.ConditionFalse,
Message: fmt.Sprintf("Waiting to see update %s succeed", olmversion.OLMVersion),
})
setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorUpgradeable,
Status: configv1.ConditionFalse,
Message: "Waiting for updates to take effect",
})
// TODO: use % errors within a window to report available
}
// always update the related objects in case changes have occurred
existing.Status.RelatedObjects, err = relatedObjects(name, opClient, crClient, log)
if err != nil {
log.Errorf("Failed to get related objects: %v", err)
}
if !reflect.DeepEqual(previousStatus.RelatedObjects, existing.Status.RelatedObjects) {
diffString := diff.ObjectDiff(previousStatus.RelatedObjects, existing.Status.RelatedObjects)
log.Debugf("Update required for related objects: %v", diffString)
}
// update the status
if !reflect.DeepEqual(previousStatus, &existing.Status) {
if _, err := configClient.ClusterOperators().UpdateStatus(context.TODO(), existing, metav1.UpdateOptions{}); err != nil {
log.Errorf("Unable to update cluster operator status: %v", err)
}
}
// if we've reported success, we can sleep longer, otherwise we want to keep watching for
// successful
if successfulSyncs > 0 {
time.Sleep(25 * time.Second)
}
}, 5*time.Second, stopCh)
}
func setOperatorStatusCondition(conditions *[]configv1.ClusterOperatorStatusCondition, newCondition configv1.ClusterOperatorStatusCondition) {
if conditions == nil {
conditions = &[]configv1.ClusterOperatorStatusCondition{}
}
existingCondition := findOperatorStatusCondition(*conditions, newCondition.Type)
if existingCondition == nil {
newCondition.LastTransitionTime = metav1.NewTime(time.Now())
*conditions = append(*conditions, newCondition)
return
}
if existingCondition.Status != newCondition.Status {
existingCondition.Status = newCondition.Status
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
}
existingCondition.Reason = newCondition.Reason
existingCondition.Message = newCondition.Message
}
func findOperatorStatusCondition(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) *configv1.ClusterOperatorStatusCondition {
for i := range conditions {
if conditions[i].Type == conditionType {
return &conditions[i]
}
}
return nil
}
// relatedObjects returns RelatedObjects in the ClusterOperator.Status.
// RelatedObjects are consumed by https://github.com/openshift/must-gather
func relatedObjects(name string, opClient operatorclient.ClientInterface, crClient versioned.Interface, log *logrus.Logger) ([]configv1.ObjectReference, error) {
var objectReferences []configv1.ObjectReference
log.Debugf("Adding related objects for %v", name)
namespace := openshiftNamespace // hard-coded to constant
switch name {
case clusterOperatorOLM:
csvList, err := crClient.OperatorsV1alpha1().ClusterServiceVersions(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
for _, csv := range csvList.Items {
if csv.IsCopied() {
continue
}
objectReferences = append(objectReferences, configv1.ObjectReference{
Group: olmv1alpha1.GroupName,
Resource: clusterServiceVersionResource,
Namespace: csv.GetNamespace(),
Name: csv.GetName(),
})
}
case clusterOperatorCatalogSource:
subList, err := crClient.OperatorsV1alpha1().Subscriptions(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
installPlanList, err := crClient.OperatorsV1alpha1().InstallPlans(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
for _, sub := range subList.Items {
objectReferences = append(objectReferences, configv1.ObjectReference{
Group: olmv1alpha1.GroupName,
Resource: subscriptionResource,
Namespace: sub.GetNamespace(),
Name: sub.GetName(),
})
}
for _, ip := range installPlanList.Items {
objectReferences = append(objectReferences, configv1.ObjectReference{
Group: olmv1alpha1.GroupName,
Resource: installPlanResource,
Namespace: ip.GetNamespace(),
Name: ip.GetName(),
})
}
}
namespaces := configv1.ObjectReference{
Group: corev1.GroupName,
Resource: "namespaces",
Name: namespace,
}
objectReferences = append(objectReferences, namespaces)
return objectReferences, nil
}
|
package DAO
import (
"Work_5/object"
"github.com/jinzhu/gorm"
)
//创建评论记录
func CreateComment(comment *object.Comment, db *gorm.DB) object.ErrMessage {
//检查评论信息表是否存在,不存在则创建
if !db.HasTable(&comment) {
db.AutoMigrate(&comment)
}
//创建相应评论记录
db.Create(&comment)
return object.ErrMessage{}
}
|
package monkey
import (
"../properties"
"github.com/hajimehoshi/ebiten"
"github.com/hajimehoshi/ebiten/ebitenutil"
_ "image/png"
"log"
)
var image *ebiten.Image
//Monkey The Monkey is the lmain character from the game.
type Monkey struct {
//Position of the upper left corner of the monkey
X int
//Position of the left side of the monkey
Y int
//Width of the monkey
Width int
//Height of the monkey
Height int
//incrementation on the y axis
IncJump int
//if true the monkey is jumping
IsJumping bool
//direction true = right false=left
Direction bool
//X position for the Jumping start
JumpingStart int
//Image tro represent the monkey
Image *ebiten.Image
}
//Jump contains the precalculate Y of the quadratic function
var Jump [50]int
//Init monkey initialisation and building
func Init() *Monkey {
length := len(Jump)
for i := 0; i < length; i++ {
x := (float64(i+1) / float64(len(Jump))) * float64(43)
Jump[i] = int(-0.1082*x*x + 4.654*x)
}
monkey := new(Monkey)
monkey.Reset()
image, _, err := ebitenutil.NewImageFromFile("assets/monkey.png", ebiten.FilterDefault)
if err != nil {
log.Fatal(err)
}
monkey.Image = image
return monkey
}
//Reset reset the parameter to their initial state
func (m *Monkey) Reset() {
m.X = properties.Borderwidth - 32
m.Y = 57
m.Width = 32
m.Height = 43
m.IncJump = -1
m.IsJumping = false
m.Direction = false
}
//InitJump initiate the monkey jumping
func (m *Monkey) InitJump(direction bool) {
m.IsJumping = true
m.JumpingStart = m.X
m.Direction = direction
}
//Jump the monkey jumps
func (m *Monkey) Jump() {
if m.IncJump == len(Jump)-1 {
if m.Direction {
m.X = m.JumpingStart + 43
} else {
m.X = m.JumpingStart - 43
}
m.IsJumping = false
m.IncJump = -1
} else {
m.IncJump = m.IncJump + 1
m.Y = properties.Groundheight - m.Height - Jump[m.IncJump]
floatX := float64(m.IncJump) / float64(len(Jump)) * 43
if m.Direction {
m.X = int(floatX) + m.JumpingStart
} else {
m.X = m.JumpingStart - int(floatX)
}
}
}
|
package peermgr
import (
"encoding/json"
"fmt"
"io/ioutil"
"strings"
"testing"
"time"
"github.com/meshplus/bitxhub/internal/model/events"
"github.com/meshplus/bitxhub/internal/executor/contracts"
"github.com/Rican7/retry"
"github.com/Rican7/retry/strategy"
"github.com/meshplus/bitxhub/pkg/cert"
"github.com/golang/mock/gomock"
crypto2 "github.com/libp2p/go-libp2p-core/crypto"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/meshplus/bitxhub-kit/crypto"
"github.com/meshplus/bitxhub-kit/crypto/asym"
"github.com/meshplus/bitxhub-kit/crypto/asym/ecdsa"
"github.com/meshplus/bitxhub-kit/log"
"github.com/meshplus/bitxhub-model/pb"
"github.com/meshplus/bitxhub/internal/ledger/mock_ledger"
"github.com/meshplus/bitxhub/internal/repo"
ma "github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/require"
)
func genKeysAndConfig(t *testing.T, peerCnt int) ([]crypto2.PrivKey, []crypto.PrivateKey, []string) {
var nodeKeys []crypto2.PrivKey
var privKeys []crypto.PrivateKey
var peers []string
port := 5001
for i := 0; i < peerCnt; i++ {
key, err := asym.GenerateKeyPair(crypto.ECDSA_P256)
require.Nil(t, err)
libp2pKey, err := convertToLibp2pPrivKey(key)
require.Nil(t, err)
nodeKeys = append(nodeKeys, libp2pKey)
id, err := peer.IDFromPublicKey(libp2pKey.GetPublic())
require.Nil(t, err)
peer := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d/p2p/%s", port, id)
peers = append(peers, peer)
port++
privKey, err := asym.GenerateKeyPair(crypto.Secp256k1)
require.Nil(t, err)
privKeys = append(privKeys, privKey)
}
return nodeKeys, privKeys, peers
}
func convertToLibp2pPrivKey(privateKey crypto.PrivateKey) (crypto2.PrivKey, error) {
ecdsaPrivKey, ok := privateKey.(*ecdsa.PrivateKey)
if !ok {
return nil, fmt.Errorf("convert to libp2p private key: not ecdsa private key")
}
libp2pPrivKey, _, err := crypto2.ECDSAKeyPairFromKey(ecdsaPrivKey.K)
if err != nil {
return nil, err
}
return libp2pPrivKey, nil
}
func otherPeers(id uint64, addrs []string) map[uint64]*peer.AddrInfo {
m := make(map[uint64]*peer.AddrInfo)
for i, addr := range addrs {
if uint64(i) == id {
continue
}
addr, _ := ma.NewMultiaddr(addr)
pAddr, _ := peer.AddrInfoFromP2pAddr(addr)
m[uint64(i)] = pAddr
}
return m
}
func NewSwarms(t *testing.T, peerCnt int) []*Swarm {
var swarms []*Swarm
nodeKeys, privKeys, addrs := genKeysAndConfig(t, peerCnt)
mockCtl := gomock.NewController(t)
mockLedger := mock_ledger.NewMockLedger(mockCtl)
mockLedger.EXPECT().GetBlock(gomock.Any()).Return(&pb.Block{
BlockHeader: &pb.BlockHeader{
Number: 1,
},
}, nil).AnyTimes()
aer := contracts.AssetExchangeRecord{
Status: 0,
}
data, err := json.Marshal(aer)
require.Nil(t, err)
mockLedger.EXPECT().GetBlockSign(gomock.Any()).Return([]byte("sign"), nil).AnyTimes()
mockLedger.EXPECT().GetState(gomock.Any(), gomock.Any()).Return(true, data).AnyTimes()
agencyData, err := ioutil.ReadFile("testdata/agency.cert")
require.Nil(t, err)
nodeData, err := ioutil.ReadFile("testdata/node.cert")
require.Nil(t, err)
caData, err := ioutil.ReadFile("testdata/ca.cert")
require.Nil(t, err)
cert, err := cert.ParseCert(caData)
require.Nil(t, err)
for i := 0; i < peerCnt; i++ {
repo := &repo.Repo{
Key: &repo.Key{},
NetworkConfig: &repo.NetworkConfig{
N: uint64(peerCnt),
ID: uint64(i),
},
Certs: &repo.Certs{
NodeCertData: nodeData,
AgencyCertData: agencyData,
CACert: cert,
},
}
var local string
id, err := peer.IDFromPublicKey(nodeKeys[i].GetPublic())
require.Nil(t, err)
if strings.HasSuffix(addrs[i], id.String()) {
idx := strings.LastIndex(addrs[i], "/p2p/")
local = addrs[i][:idx]
}
repo.NetworkConfig.LocalAddr = local
repo.Key.Libp2pPrivKey = nodeKeys[i]
repo.Key.PrivKey = privKeys[i]
repo.NetworkConfig.OtherNodes = otherPeers(uint64(i), addrs)
swarm, err := New(repo, log.NewWithModule("p2p"), mockLedger)
require.Nil(t, err)
err = swarm.Start()
require.Nil(t, err)
swarms = append(swarms, swarm)
}
return swarms
}
func TestSwarm_Send(t *testing.T) {
peerCnt := 4
swarms := NewSwarms(t, peerCnt)
time.Sleep(2 * time.Second)
msg := &pb.Message{
Type: pb.Message_GET_BLOCK,
Data: []byte("1"),
}
var res *pb.Message
var err error
err = retry.Retry(func(attempt uint) error {
res, err = swarms[0].Send(2, msg)
if err != nil {
swarms[0].logger.Errorf(err.Error())
return err
}
return nil
}, strategy.Wait(50*time.Millisecond))
require.Nil(t, err)
require.Equal(t, pb.Message_GET_BLOCK_ACK, res.Type)
var block pb.Block
err = block.Unmarshal(res.Data)
require.Nil(t, err)
require.Equal(t, uint64(1), block.BlockHeader.Number)
fetchBlockSignMsg := &pb.Message{
Type: pb.Message_FETCH_BLOCK_SIGN,
Data: []byte("1"),
}
err = retry.Retry(func(attempt uint) error {
res, err = swarms[0].Send(3, fetchBlockSignMsg)
if err != nil {
swarms[0].logger.Errorf(err.Error())
return err
}
return nil
}, strategy.Wait(50*time.Millisecond))
require.Nil(t, err)
require.Equal(t, pb.Message_FETCH_BLOCK_SIGN_ACK, res.Type)
require.Nil(t, err)
require.NotNil(t, res.Data)
fetchAESMsg := &pb.Message{
Type: pb.Message_FETCH_ASSET_EXCHANEG_SIGN,
Data: []byte("1"),
}
err = retry.Retry(func(attempt uint) error {
res, err = swarms[2].Send(3, fetchAESMsg)
if err != nil {
swarms[0].logger.Errorf(err.Error())
return err
}
return nil
}, strategy.Wait(50*time.Millisecond))
require.Nil(t, err)
require.Equal(t, pb.Message_FETCH_ASSET_EXCHANGE_SIGN_ACK, res.Type)
require.Nil(t, err)
require.NotNil(t, res.Data)
fetchIBTPSignMsg := &pb.Message{
Type: pb.Message_FETCH_IBTP_SIGN,
Data: []byte("1"),
}
err = retry.Retry(func(attempt uint) error {
res, err = swarms[3].Send(1, fetchIBTPSignMsg)
if err != nil {
swarms[0].logger.Errorf(err.Error())
return err
}
return nil
}, strategy.Wait(50*time.Millisecond))
require.Nil(t, err)
require.Equal(t, pb.Message_FETCH_IBTP_SIGN_ACK, res.Type)
require.Nil(t, err)
require.NotNil(t, res.Data)
}
func TestSwarm_AsyncSend(t *testing.T) {
peerCnt := 4
swarms := NewSwarms(t, peerCnt)
time.Sleep(2 * time.Second)
orderMsgCh := make(chan events.OrderMessageEvent)
orderMsgSub := swarms[2].SubscribeOrderMessage(orderMsgCh)
defer orderMsgSub.Unsubscribe()
msg := &pb.Message{
Type: pb.Message_CONSENSUS,
Data: []byte("1"),
}
var err error
err = retry.Retry(func(attempt uint) error {
err = swarms[0].AsyncSend(2, msg)
if err != nil {
swarms[0].logger.Errorf(err.Error())
return err
}
return nil
}, strategy.Wait(50*time.Millisecond))
require.Nil(t, err)
require.NotNil(t, <-orderMsgCh)
}
|
package main
import (
"fmt"
"net"
"strconv"
"encoding/binary"
"runtime"
"time"
"flag"
"asocks"
"io"
)
func handleConnection(conn *net.TCPConn) {
err := getRequest(conn)
if err != nil {
fmt.Println("err:", err)
conn.Close()
}
}
func getRequest(conn *net.TCPConn) (err error){
var n int
buf := make([]byte, 257)
// 第一个字节表示噪音的数量
bufOneByte := make([]byte, 1)
if n, err = io.ReadFull(conn, bufOneByte); err != nil {
return
}
encodeData(bufOneByte)
noiseLength := int(bufOneByte[0])
noiseBuf := make([]byte, noiseLength)
// 忽略掉噪音
if n, err = io.ReadAtLeast(conn, noiseBuf, noiseLength); err != nil {
return
}
if n, err = io.ReadAtLeast(conn, buf, 2); err != nil {
return
}
encodeData(buf)
addressType := buf[0]
reqLen := 0;
switch addressType {
case 1:
// ipv4
reqLen = 1 + 4 + 2
case 3:
// domain
reqLen = 1 + 1 + int(buf[1]) + 2
case 4:
// ipv6
reqLen = 1 + 16 + 2
default:
// unnormal, close conn
err = fmt.Errorf("error ATYP:%d\n", buf[0])
return
}
if n < reqLen {
if _, err = io.ReadFull(conn, buf[n : reqLen]); err != nil {
return
}
encodeData(buf[n:reqLen])
}
var host string;
switch addressType {
case 1:
// ipv4
host = net.IP(buf[1:5]).String()
case 3:
// domain
dstAddr := buf[2 : 2 + int(buf[1])]
host = string(dstAddr)
case 4:
// ipv6
host = net.IP(buf[1:17]).String()
}
port := binary.BigEndian.Uint16(buf[reqLen - 2 : reqLen])
host = net.JoinHostPort(host, strconv.Itoa(int(port)))
fmt.Println("dst:", host)
var remote *net.TCPConn
remoteAddr, _ := net.ResolveTCPAddr("tcp", host)
if remote, err = net.DialTCP("tcp", nil, remoteAddr); err != nil {
return
}
// 如果有额外的数据,转发给remote。正常情况下是没有额外数据的,但如果客户端通过端口转发连接服务端,就会有
if n > reqLen {
if _, err = remote.Write(buf[reqLen : n]); err != nil {
return
}
}
finish := make(chan bool, 2)
go pipeThenClose(conn, remote, finish)
pipeThenClose(remote, conn, finish)
<- finish
<- finish
conn.Close()
remote.Close()
return nil
}
func pipeThenClose(src, dst *net.TCPConn, finish chan bool) {
defer func(){
src.CloseRead()
dst.CloseWrite()
finish <- true
}()
buf := asocks.GetBuffer()
defer asocks.GiveBuffer(buf)
for {
src.SetReadDeadline(time.Now().Add(60 * time.Second))
n, err := src.Read(buf);
if n > 0 {
data := buf[0:n]
encodeData(data)
if _, err := dst.Write(data); err != nil {
break
}
}
if err != nil {
break
}
}
}
func encodeData(data []byte) {
for i, _ := range data {
data[i] ^= 128;
}
}
func main() {
var localAddr string
flag.StringVar(&localAddr, "l", "0.0.0.0:8388", "监听端口")
flag.Parse()
numCPU := runtime.NumCPU()
runtime.GOMAXPROCS(numCPU)
bindAddr, err := net.ResolveTCPAddr("tcp", localAddr)
if err != nil {
fmt.Printf("resolve %s failed. err:%s\n", localAddr, err)
return
}
ln, err := net.ListenTCP("tcp", bindAddr)
if err != nil {
fmt.Println("listen error:", err)
return
}
defer ln.Close()
fmt.Println("listening ", ln.Addr())
for {
conn, err := ln.AcceptTCP()
if err != nil {
fmt.Println("accept error:", err)
continue
}
go handleConnection(conn)
}
}
|
// Code generated from /Users/xguzman/Projects/dice/formula/Dice.g4 by ANTLR 4.7.2. DO NOT EDIT.
package parser // Dice
import "github.com/antlr/antlr4/runtime/Go/antlr"
// A complete Visitor for a parse tree produced by DiceParser.
type DiceVisitor interface {
antlr.ParseTreeVisitor
// Visit a parse tree produced by DiceParser#formula.
VisitFormula(ctx *FormulaContext) interface{}
// Visit a parse tree produced by DiceParser#extensions.
VisitExtensions(ctx *ExtensionsContext) interface{}
// Visit a parse tree produced by DiceParser#count.
VisitCount(ctx *CountContext) interface{}
// Visit a parse tree produced by DiceParser#sides.
VisitSides(ctx *SidesContext) interface{}
// Visit a parse tree produced by DiceParser#modifier.
VisitModifier(ctx *ModifierContext) interface{}
// Visit a parse tree produced by DiceParser#parameter.
VisitParameter(ctx *ParameterContext) interface{}
// Visit a parse tree produced by DiceParser#parameters.
VisitParameters(ctx *ParametersContext) interface{}
// Visit a parse tree produced by DiceParser#funcname.
VisitFuncname(ctx *FuncnameContext) interface{}
// Visit a parse tree produced by DiceParser#funccall.
VisitFunccall(ctx *FunccallContext) interface{}
}
|
// +build gofuzz
package api
import (
"bytes"
"net/http"
"net/http/httptest"
)
func init() {
Routes()
}
// Fuzz is executed by the go-fuzz tool. Input data modifications are provided and
// used to validate API call.
func Fuzz(data []byte) int {
r := httptest.NewRequest("POST", "/process", bytes.NewBuffer(data))
w := httptest.NewRecorder()
http.DefaultServeMux.ServeHTTP(w, r)
if w.Code != http.StatusOK {
// Report the data that produced this error as not interesting.
return 0
}
// Report the data that did not cause an error as interesting.
return 1
}
|
package main
import (
"math"
"github.com/fr3fou/beep/beep"
"github.com/gen2brain/raylib-go/raygui"
rl "github.com/gen2brain/raylib-go/raylib"
)
const (
topMargin = 671
)
type Key struct {
rl.Rectangle
Texture rl.Texture2D
PressedTexture rl.Texture2D
beep.SingleNote
KeyboardKey int
IsSemitone bool
IsActive bool
}
func NewKey(note beep.SingleNote, isSemitone bool, texture rl.Texture2D, pressedTexture rl.Texture2D, keyboardKey int) Key {
return Key{SingleNote: note, IsSemitone: isSemitone, Texture: texture, PressedTexture: pressedTexture, KeyboardKey: keyboardKey}
}
func (k *Key) Draw() {
if !k.IsActive {
rl.DrawTexturePro(k.Texture, rl.NewRectangle(0, 0, float32(k.Texture.Width), float32(k.Texture.Height)), k.Rectangle, rl.NewVector2(0, 0), 0, rl.White)
} else {
rl.DrawTexturePro(k.PressedTexture, rl.NewRectangle(0, 0, float32(k.Texture.Width), float32(k.Texture.Height)), k.Rectangle, rl.NewVector2(0, 0), 0, rl.White)
}
}
const (
sampleRate = 48000
streamBuffer = 4096
)
func main() {
width := int32(896)
height := int32(896)
rl.InitWindow(width, height, "sonus - a simple music synth")
rl.InitAudioDevice()
defer rl.CloseAudioDevice()
stream := rl.InitAudioStream(sampleRate, 32, 1)
defer rl.CloseAudioStream(stream)
data := make([]float32, streamBuffer)
rl.PlayAudioStream(stream)
volume := float32(0.125)
_keys := []Key{}
whiteKeys := []Key{}
blackKeys := []Key{}
startOctave := 1
lastOctave := 4
octaveCount := lastOctave - startOctave + 1 // +1 because it's inclusive
whiteWidth := int(width / (int32(octaveCount) * 7)) // 7 is white keys per octave
blackWidth := int(0.75 * float64(whiteWidth))
generatorMap := map[string]beep.Generator{
"Sin": math.Sin,
"Sawtooth": beep.Sawtooth(2 * math.Pi),
"Square": beep.Square(1),
"Triangle": beep.Triangle(2 * math.Pi),
}
generators := []string{"Sin", "Sawtooth", "Square", "Triangle"}
generatorIndex := 0
adsr := beep.NewEasedADSR(func(t float64) float64 { return t * t }, beep.NewRatios(0.25, 0.25, 0.25, 0.25), 1.25, 0.25)
whiteTexture := rl.LoadTexture("assets/white.png")
blackTexture := rl.LoadTexture("assets/black.png")
whitePressedTexture := rl.LoadTexture("assets/white_pressed.png")
blackPressedTexture := rl.LoadTexture("assets/black_pressed.png")
sinTexture := rl.LoadTexture("assets/sin.png")
sawtoothTexture := rl.LoadTexture("assets/sawtooth.png")
squareTexture := rl.LoadTexture("assets/square.png")
triangleTexture := rl.LoadTexture("assets/triangle.png")
raygui.LoadGuiStyle("assets/zahnrad.style")
for i := startOctave; i <= lastOctave; i++ {
_keys = append(_keys,
NewKey(beep.C(i, beep.NoDuration, 0), false, whiteTexture, whitePressedTexture, -1),
NewKey(beep.CS(i, beep.NoDuration, 0), true, blackTexture, blackPressedTexture, -1),
NewKey(beep.D(i, beep.NoDuration, 0), false, whiteTexture, whitePressedTexture, -1),
NewKey(beep.DS(i, beep.NoDuration, 0), true, blackTexture, blackPressedTexture, -1),
NewKey(beep.E(i, beep.NoDuration, 0), false, whiteTexture, whitePressedTexture, -1),
NewKey(beep.F(i, beep.NoDuration, 0), false, whiteTexture, whitePressedTexture, -1),
NewKey(beep.FS(i, beep.NoDuration, 0), true, blackTexture, blackPressedTexture, -1),
NewKey(beep.G(i, beep.NoDuration, 0), false, whiteTexture, whitePressedTexture, -1),
NewKey(beep.GS(i, beep.NoDuration, 0), true, blackTexture, blackPressedTexture, -1),
NewKey(beep.A(i, beep.NoDuration, 0), false, whiteTexture, whitePressedTexture, -1),
NewKey(beep.AS(i, beep.NoDuration, 0), true, blackTexture, blackPressedTexture, -1),
NewKey(beep.B(i, beep.NoDuration, 0), false, whiteTexture, whitePressedTexture, -1),
)
}
keyboardKeys := []int{
// ---
rl.KeyA,
rl.KeyW,
rl.KeyS,
rl.KeyE,
rl.KeyD,
rl.KeyF,
rl.KeyT,
rl.KeyG,
rl.KeyY,
rl.KeyH,
rl.KeyU,
rl.KeyJ,
// ---
rl.KeyK,
rl.KeyO,
rl.KeyL,
rl.KeyP,
rl.KeySemicolon,
rl.KeyApostrophe,
rl.KeyRightBracket,
rl.KeyBackSlash,
-1,
-1,
-1,
-1,
}
for i := 0; i < 12*2; i++ { // loop for 2 octaves
_keys[i+12].KeyboardKey = keyboardKeys[i]
}
for _, key := range _keys {
if !key.IsSemitone {
whiteKeys = append(whiteKeys, key)
} else {
blackKeys = append(blackKeys, key)
}
}
for i := range whiteKeys {
rect := rl.NewRectangle(
float32(i*whiteWidth),
float32(topMargin),
float32(whiteWidth),
float32(height-int32(topMargin)),
)
whiteKeys[i].Rectangle = rect
}
counter := 0
gapCount := 0
for i := range blackKeys {
if counter == 2 || counter == 5 {
gapCount++
}
if counter == 5 {
counter = 0
}
rect := rl.NewRectangle(
float32(whiteWidth-blackWidth/2+i*whiteWidth+gapCount*whiteWidth),
float32(topMargin),
float32(blackWidth),
float32(height-int32(topMargin))*0.6,
)
blackKeys[i].Rectangle = rect
counter++
}
rl.SetTargetFPS(60)
iconScale := float32(0.5)
for !rl.WindowShouldClose() {
if rl.IsAudioStreamProcessed(stream) {
zeroSamples(data)
for _, k := range blackKeys {
if k.IsActive {
samples := make([]float64, streamBuffer)
for i := 0; i < streamBuffer; i++ {
phi := (k.Frequency / sampleRate)
t := 2.0 * math.Pi * float64(i) * (phi - math.Floor(phi))
samples[i] += k.SampleAt(t, sampleRate, generatorMap[generators[generatorIndex]])
}
beep.ApplyADSR(samples, adsr)
for i := range samples {
data[i] += float32(samples[i])
}
}
}
for _, k := range whiteKeys {
if k.IsActive {
samples := make([]float64, streamBuffer)
for i := 0; i < streamBuffer; i++ {
phi := (k.Frequency / sampleRate)
t := 2.0 * math.Pi * float64(i) * (phi - math.Floor(phi))
samples[i] += k.SampleAt(t, sampleRate, generatorMap[generators[generatorIndex]])
}
beep.ApplyADSR(samples, adsr)
for i := range samples {
data[i] += float32(samples[i])
}
}
}
rl.UpdateAudioStream(stream, data, streamBuffer)
}
pos := rl.GetMousePosition()
rl.BeginDrawing()
rl.ClearBackground(rl.Black)
// Handling presses
if rl.IsMouseButtonDown(rl.MouseLeftButton) {
hasFound := false
for i, key := range blackKeys {
if rl.CheckCollisionPointRec(pos, key.Rectangle) {
hasFound = true
blackKeys[i].IsActive = true
continue
}
blackKeys[i].IsActive = false
}
for i, key := range whiteKeys {
if !hasFound && rl.CheckCollisionPointRec(pos, key.Rectangle) {
whiteKeys[i].IsActive = true
continue
}
whiteKeys[i].IsActive = false
}
} else {
for i := range blackKeys {
blackKeys[i].IsActive = false
blackKeys[i].SingleNote.Volume = float64(volume)
}
for i := range whiteKeys {
whiteKeys[i].IsActive = false
whiteKeys[i].SingleNote.Volume = float64(volume)
}
}
for i, key := range whiteKeys {
if rl.IsKeyDown(int32(key.KeyboardKey)) {
whiteKeys[i].IsActive = true
continue
}
}
for i, key := range blackKeys {
if rl.IsKeyDown(int32(key.KeyboardKey)) {
blackKeys[i].IsActive = true
continue
}
}
// Rendering white keys
for i, key := range whiteKeys {
key.Draw()
rl.DrawRectangle(int32(i*whiteWidth), int32(topMargin), 1, height-int32(topMargin), rl.Gray)
}
// Rendering black keys
for _, key := range blackKeys {
key.Draw()
}
// Rendering settings
generatorIndex = generatorInput(sinTexture, sawtoothTexture, squareTexture, triangleTexture, generatorIndex, generators, iconScale)
// adsr = beep.NewIdentityADSR()
volume = volumeInput(volume)
// Rendering soundwave
for i := 0; i < 4*100+4*3; i++ {
rl.DrawPixelV(rl.NewVector2(float32(50+i), 50+50+3+float32(float32(sinTexture.Height)*iconScale)+50+100*data[i]), rl.Red)
}
// Rendering decorations
rl.DrawLineEx(rl.NewVector2(0, float32(topMargin)), rl.NewVector2(float32(width), float32(topMargin)), 3, rl.Red)
rl.DrawText("sonus", int32(width-rl.MeasureText("sonus", 50)-50), int32(50), 50, rl.White)
rl.EndDrawing()
}
rl.CloseWindow()
}
func generatorInput(sinTexture, sawtoothTexture, squareTexture, triangleTexture rl.Texture2D, generatorIndex int, generators []string, iconScale float32) int {
rl.DrawTextureEx(sinTexture, rl.NewVector2(
100*0+50-(iconScale*float32(sawtoothTexture.Width))/2+50,
50+50+5,
), 0, float32(iconScale), rl.Red)
rl.DrawTextureEx(sawtoothTexture, rl.NewVector2(
100*1+3+50-(iconScale*float32(sawtoothTexture.Width))/2+50,
50+50+5,
), 0, float32(iconScale), rl.Red)
rl.DrawTextureEx(squareTexture, rl.NewVector2(
100*2+3+50-(iconScale*float32(squareTexture.Width))/2+50,
50+50+5,
), 0, float32(iconScale), rl.Red)
rl.DrawTextureEx(triangleTexture, rl.NewVector2(
100*3+3+50-(iconScale*float32(triangleTexture.Width))/2+50,
50+50+5,
), 0, float32(iconScale), rl.Red)
return raygui.ToggleGroup(rl.NewRectangle(50, 50, 100, 50), generators, generatorIndex)
}
func adsrInput(ratios beep.ADSRRatios) beep.ADSRRatios {
return ratios
}
func volumeInput(volume float32) float32 {
return raygui.SliderBar(rl.NewRectangle(50, topMargin-75, 4*100+4*3, 25), volume, 0, 0.3)
}
func zeroSamples(data []float32) {
for i := range data {
data[i] = 0.0
}
}
func float64toFloat32(in []float64) []float32 {
samples := make([]float32, len(in))
for i, v := range in {
samples[i] = float32(v)
}
return samples
}
func float32toFloat64(in []float32) []float64 {
samples := make([]float64, len(in))
for i, v := range in {
samples[i] = float64(v)
}
return samples
}
|
package migrate
// Index defines the postgres Index.
type Index struct {
// Name is the index name.
Name string
// Type defines the index type.
Type IndexType
// Columns are the columns specified for the index.
Columns []*Column
}
// IndexType defines the postgres index type.
type IndexType int
const (
// BTree is the Postgres B-Tree index type - default.
BTree IndexType = iota
// Hash is the postgres Hash index type.
Hash
// GiST is the postgres GiST index type.
GiST
// GIN is the postgres GIN index type.
GIN
)
const (
// BTreeTag is the BTree index type tag.
BTreeTag = "btree"
// HashTag is the Hash index type tag.
HashTag = "hash"
// GiSTTag is the GiST index type tag.
GiSTTag = "gist"
// GINTag is the GIN index type tag.
GINTag = "gin"
)
|
package main
import (
"fmt"
"log"
"math/rand"
"time"
)
func worker(c *TTLCache) {
//start a never ending loop...
for {
//...that sets and gets random keys/values
//from the shared TTLCache
i := rand.Intn(20)
k := fmt.Sprintf("%d", i)
log.Printf("setting %s=%d", k, i)
c.Set(k, i, time.Second*5)
i2 := c.Get(k).(int)
log.Printf("got %d", i2)
time.Sleep(time.Millisecond * time.Duration(i))
}
}
func main() {
//seed the pseudo-random number generator
rand.Seed(time.Now().UnixNano())
//create a new shared TTLCache
c := NewTTLCache()
//start 10 workers on separate goroutines
//each worker will be running concurrently
//with the others
for i := 0; i < 10; i++ {
//the `go` keyword before a function call
//will start that function on a new goroutine
//and continue executing the rest of this code
//concurrently
go worker(c)
}
//since the workers are running on separate
//goroutines from the main one, and since
//a go program will exit after the main()
//function ends, use time.Sleep() to wait
//for a while before exiting so that we can
//see the unprotected cache fail
time.Sleep(time.Hour)
}
|
// 多个goroutine初始化顺序
package main
import "fmt"
var c = make(chan bool)
func main() {
go printName("jd", c)
go printName("fish", c)
for {
select {
case single := <-c:
fmt.Println(single)
}
}
}
func printName(name string, c chan bool) {
fmt.Println(name)
c <- true
}
|
package main
import (
"go-grpc/pb"
"go-grpc/services"
"log"
"net"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
)
func main() {
// Cria o listener e a porta que irá ouvir
lis, err := net.Listen("tcp", "localhost:50051")
if err != nil {
log.Fatalf("Cold not connect: %v", err)
}
grpcServer := grpc.NewServer()
// Registra serviço
pb.RegisterUserServiceServer(grpcServer, services.NewUserService())
reflection.Register(grpcServer)
if err := grpcServer.Serve(lis); err != nil {
log.Fatalf("error grpc server: %v", err)
}
}
|
package s3
import (
"bytes"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestHelper(t *testing.T) {
config := Config{
AccessKeyID: "",
Endpoint: "",
Region: "",
SecretAccessKey: "",
BucketName: "",
SSL: false,
}
Convey("New", t, func() {
Convey("Config validation error", func() {
s3, err := New(config)
// config validation error
So(err, ShouldNotBeNil)
So(s3, ShouldBeNil)
})
Convey("NewWithRegion error", func() {
// minio.NewWithRegion throws error if endpoint is an invalid host
config.Endpoint = "invalid:host:xxx"
config.AccessKeyID = "x"
config.SecretAccessKey = "x"
config.Region = "x"
config.BucketName = "x"
s3, err := New(config)
So(err, ShouldNotBeNil)
So(s3, ShouldBeNil)
})
Convey("Success", func() {
config.Endpoint = "localhost"
config.AccessKeyID = "x"
config.SecretAccessKey = "x"
config.Region = "x"
config.BucketName = "x"
s3, err := New(config)
So(err, ShouldBeNil)
So(s3, ShouldNotBeNil)
})
})
Convey("CreateBucket", t, func() {
config := Config{
AccessKeyID: "x",
Endpoint: "localhost",
Region: "x",
SecretAccessKey: "x",
BucketName: "x",
SSL: false,
}
Convey("Invalid bucket name", func() {
s3, err := New(config)
So(err, ShouldBeNil)
// invalid bucket name (too short), minio makebucket throws error
err = s3.CreateBucket("x")
So(err, ShouldNotBeNil)
})
Convey("Directory created", func() {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Hello world!")
}))
url := strings.TrimPrefix(server.URL, "http://")
config := Config{
AccessKeyID: "x",
Endpoint: url,
Region: "x",
SecretAccessKey: "x",
BucketName: "x",
SSL: false,
}
s3, err := New(config)
So(err, ShouldBeNil)
err = s3.CreateBucket("x43563")
So(err, ShouldBeNil)
})
Convey("Disabled S3", func() {
s3 := helper{
Enabled: false,
}
err := s3.CreateBucket("x")
So(err, ShouldNotBeNil)
})
})
Convey("CreateDirectory", t, func() {
Convey("Disabled S3", func() {
s3 := helper{
Enabled: false,
}
err := s3.CreateDirectory("x", "asd")
So(err, ShouldNotBeNil)
})
Convey("PutObject", func() {
Convey("Success", func() {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "{}")
}))
url := strings.TrimPrefix(server.URL, "http://")
config := Config{
AccessKeyID: "x",
Endpoint: url,
Region: "x",
SecretAccessKey: "x",
BucketName: "x",
SSL: false,
}
s3, err := New(config)
So(err, ShouldBeNil)
bucket := "x43563"
err = s3.CreateDirectory(bucket, "1234678")
So(err, ShouldBeNil)
})
Convey("Fail SetBucketPolicy", func() {
i := 0
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if i > 0 {
w.WriteHeader(400)
}
fmt.Fprintln(w, "{}")
i++
}))
url := strings.TrimPrefix(server.URL, "http://")
config := Config{
AccessKeyID: "x",
Endpoint: url,
Region: "x",
SecretAccessKey: "x",
BucketName: "x",
SSL: false,
}
s3, err := New(config)
So(err, ShouldBeNil)
bucket := "x43563"
err = s3.CreateDirectory(bucket, "1234678")
So(err, ShouldBeNil)
})
Convey("Error", func() {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(400)
fmt.Fprintln(w, "{\"error\": \"Invalid blabla\"}")
}))
url := strings.TrimPrefix(server.URL, "http://")
config := Config{
AccessKeyID: "x",
Endpoint: url,
Region: "x",
SecretAccessKey: "x",
BucketName: "x",
SSL: false,
}
s3, err := New(config)
So(err, ShouldBeNil)
bucket := "x43563"
err = s3.CreateDirectory(bucket, "1234678")
So(err, ShouldNotBeNil)
})
})
})
Convey("CreateFile", t, func() {
Convey("Disabled S3", func() {
s3 := helper{
Enabled: false,
}
bucket := "string"
directory := "string"
fileName := "string"
content := bytes.NewReader([]byte("asdf"))
length := int64(60)
mime := "string"
err := s3.CreateFile(bucket, directory, fileName, content, length, mime)
So(err, ShouldNotBeNil)
})
Convey("Success", func() {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "{}")
}))
url := strings.TrimPrefix(server.URL, "http://")
config := Config{
AccessKeyID: "x",
Endpoint: url,
Region: "x",
SecretAccessKey: "x",
BucketName: "x",
SSL: false,
}
s3, err := New(config)
So(err, ShouldBeNil)
bucket := "string"
directory := "string"
fileName := "string.png"
content := bytes.NewReader([]byte("asdf"))
length := content.Len()
mime := "image/png"
err = s3.CreateFile(bucket, directory, fileName, content, int64(length), mime)
So(err, ShouldBeNil)
})
Convey("Fail PutObject", func() {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(400)
}))
url := strings.TrimPrefix(server.URL, "http://")
config := Config{
AccessKeyID: "x",
Endpoint: url,
Region: "x",
SecretAccessKey: "x",
BucketName: "x",
SSL: false,
}
s3, err := New(config)
So(err, ShouldBeNil)
bucket := "string"
directory := "string"
fileName := "string.png"
content := bytes.NewReader([]byte("asdf"))
length := content.Len()
mime := "image/png"
err = s3.CreateFile(bucket, directory, fileName, content, int64(length), mime)
So(err, ShouldNotBeNil)
})
})
Convey("GetS3Host", t, func() {
endpoint := "localhost"
config := Config{
AccessKeyID: "x",
Endpoint: endpoint,
Region: "x",
SecretAccessKey: "x",
BucketName: "x",
SSL: false,
}
s3, err := New(config)
So(err, ShouldBeNil)
So(s3.GetS3Host(), ShouldEqual, endpoint)
})
Convey("BucketExists", t, func() {
Convey("invalid response", func() {
endpoint := "invalidhost-asdasd"
config := Config{
AccessKeyID: "x",
Endpoint: endpoint,
Region: "x",
SecretAccessKey: "x",
BucketName: "x",
SSL: false,
}
s3, err := New(config)
So(err, ShouldBeNil)
res, err := s3.BucketExists("somebucketname")
So(res, ShouldBeFalse)
So(err, ShouldNotBeNil)
})
Convey("Disabled S3", func() {
s3 := helper{
Enabled: false,
}
res, err := s3.BucketExists("x")
So(err, ShouldNotBeNil)
So(res, ShouldBeFalse)
})
})
}
|
package libkv
import (
"bytes"
"encoding/base64"
"encoding/json"
"net/http"
"net/url"
"path"
"regexp"
"strings"
"github.com/serverless/event-gateway/event"
"github.com/serverless/event-gateway/function"
"github.com/serverless/event-gateway/internal/pathtree"
istrings "github.com/serverless/event-gateway/internal/strings"
"github.com/serverless/event-gateway/metadata"
"github.com/serverless/event-gateway/subscription"
"github.com/serverless/libkv/store"
"go.uber.org/zap"
validator "gopkg.in/go-playground/validator.v9"
)
// CreateSubscription creates subscription.
func (service Service) CreateSubscription(sub *subscription.Subscription) (*subscription.Subscription, error) {
err := validateSubscription(sub)
if err != nil {
return nil, err
}
sub.ID = newSubscriptionID(sub)
_, err = service.SubscriptionStore.Get(subscriptionPath(sub.Space, sub.ID), &store.ReadOptions{Consistent: true})
if err == nil {
return nil, &subscription.ErrSubscriptionAlreadyExists{
ID: sub.ID,
}
}
if sub.Type == subscription.TypeSync {
err = service.checkForPathConflict(sub.Space, sub.Method, sub.Path, sub.EventType)
if err != nil {
return nil, err
}
}
_, err = service.GetEventType(sub.Space, sub.EventType)
if err != nil {
return nil, err
}
_, err = service.GetFunction(sub.Space, sub.FunctionID)
if err != nil {
return nil, err
}
buf, err := json.Marshal(sub)
if err != nil {
return nil, err
}
_, _, err = service.SubscriptionStore.AtomicPut(subscriptionPath(sub.Space, sub.ID), buf, nil, nil)
if err != nil {
return nil, err
}
service.Log.Debug("Subscription created.", zap.Object("subscription", sub))
return sub, nil
}
// UpdateSubscription updates subscription.
func (service Service) UpdateSubscription(id subscription.ID, newSub *subscription.Subscription) (*subscription.Subscription, error) {
if err := validateSubscription(newSub); err != nil {
return nil, err
}
oldSub, err := service.GetSubscription(newSub.Space, id)
if err != nil {
return nil, err
}
err = validateSubscriptionUpdate(newSub, oldSub)
if err != nil {
return nil, err
}
_, err = service.GetFunction(newSub.Space, newSub.FunctionID)
if err != nil {
return nil, err
}
buf, err := json.Marshal(newSub)
if err != nil {
return nil, &subscription.ErrSubscriptionValidation{Message: err.Error()}
}
err = service.SubscriptionStore.Put(subscriptionPath(newSub.Space, newSub.ID), buf, nil)
if err != nil {
return nil, err
}
service.Log.Debug("Subscription updated.", zap.Object("subscription", newSub))
return newSub, nil
}
// DeleteSubscription deletes subscription.
func (service Service) DeleteSubscription(space string, id subscription.ID) error {
sub, err := service.GetSubscription(space, id)
if err != nil {
return err
}
err = service.SubscriptionStore.Delete(subscriptionPath(space, id))
if err != nil {
return &subscription.ErrSubscriptionNotFound{ID: sub.ID}
}
service.Log.Debug("Subscription deleted.", zap.Object("subscription", sub))
return nil
}
// ListSubscriptions returns array of all Subscription.
func (service Service) ListSubscriptions(space string, filters ...metadata.Filter) (subscription.Subscriptions, error) {
subs := []*subscription.Subscription{}
kvs, err := service.SubscriptionStore.List(spacePath(space), &store.ReadOptions{Consistent: true})
if err != nil && err.Error() != errKeyNotFound {
return nil, err
}
for _, kv := range kvs {
s := &subscription.Subscription{}
dec := json.NewDecoder(bytes.NewReader(kv.Value))
err = dec.Decode(s)
if err != nil {
return nil, err
}
if !s.Metadata.Check(filters...) {
continue
}
subs = append(subs, s)
}
return subscription.Subscriptions(subs), nil
}
// GetSubscription return single subscription.
func (service Service) GetSubscription(space string, id subscription.ID) (*subscription.Subscription, error) {
rawsub, err := service.SubscriptionStore.Get(subscriptionPath(space, id), &store.ReadOptions{Consistent: true})
if err != nil {
if err.Error() == errKeyNotFound {
return nil, &subscription.ErrSubscriptionNotFound{ID: id}
}
return nil, err
}
sub := &subscription.Subscription{}
dec := json.NewDecoder(bytes.NewReader(rawsub.Value))
err = dec.Decode(sub)
if err != nil {
return nil, err
}
return sub, err
}
func (service Service) checkForPathConflict(space, method, path string, eventType event.TypeName) error {
tree := pathtree.NewNode()
kvs, _ := service.SubscriptionStore.List(spacePath(space), &store.ReadOptions{Consistent: true})
for _, kv := range kvs {
sub := &subscription.Subscription{}
err := json.NewDecoder(bytes.NewReader(kv.Value)).Decode(sub)
if err != nil {
return err
}
if sub.Type == subscription.TypeSync && sub.Method == method && sub.EventType == eventType {
// add existing paths to check
tree.AddRoute(sub.Path, FunctionKey{Space: sub.Space, ID: function.ID("")})
}
}
err := tree.AddRoute(path, FunctionKey{Space: space, ID: function.ID("")})
if err != nil {
return &subscription.ErrPathConfict{Message: err.Error()}
}
return nil
}
func validateSubscription(sub *subscription.Subscription) error {
sub.Path = istrings.EnsurePrefix(sub.Path, "/")
if sub.Space == "" {
sub.Space = defaultSpace
}
if sub.Method == "" {
sub.Method = http.MethodPost
} else {
sub.Method = strings.ToUpper(sub.Method)
}
validate := validator.New()
validate.RegisterValidation("urlPath", urlPathValidator)
validate.RegisterValidation("eventType", eventTypeValidator)
validate.RegisterValidation("space", spaceValidator)
err := validate.Struct(sub)
if err != nil {
return &subscription.ErrSubscriptionValidation{Message: err.Error()}
}
return nil
}
func toSegments(route string) []string {
segments := strings.Split(route, "/")
// remove first "" element
_, segments = segments[0], segments[1:]
return segments
}
// nolint: gocyclo
func isPathInConflict(existing, new string) bool {
existingSegments := toSegments(existing)
newSegments := toSegments(new)
for i, newSegment := range newSegments {
// no segment at this stage, no issue
if len(existingSegments) < i+1 {
return false
}
existing := existingSegments[i]
existingIsParam := strings.HasPrefix(existing, ":")
existingIsWildcard := strings.HasPrefix(existing, "*")
newIsParam := strings.HasPrefix(newSegment, ":")
newIsWildcard := strings.HasPrefix(newSegment, "*")
// both segments static
if !existingIsParam && !existingIsWildcard && !newIsParam && !newIsWildcard {
// static are the same and it's the end of the path
if existing == newSegment && len(existingSegments) == i+1 {
return false
}
continue
}
if existingIsWildcard {
return true
}
// different parameters
if existingIsParam && newIsParam && existing != newSegment {
return true
}
if existingIsParam && !newIsParam {
return true
}
}
return true
}
func newSubscriptionID(sub *subscription.Subscription) subscription.ID {
var raw string
if sub.Type == subscription.TypeAsync {
raw = string(sub.Type) + "," + string(sub.EventType) + "," + string(sub.FunctionID) + "," + url.PathEscape(sub.Path) + "," + sub.Method
} else {
raw = string(sub.Type) + "," + string(sub.EventType) + "," + url.PathEscape(sub.Path) + "," + sub.Method
}
return subscription.ID(base64.RawURLEncoding.EncodeToString([]byte(raw)))
}
func subscriptionPath(space string, id subscription.ID) string {
return spacePath(space) + string(id)
}
// urlPathValidator validates if field contains URL path
func urlPathValidator(fl validator.FieldLevel) bool {
return path.IsAbs(fl.Field().String())
}
// eventTypeValidator validates if field contains event name
func eventTypeValidator(fl validator.FieldLevel) bool {
return regexp.MustCompile(`^[a-zA-Z0-9\.\-_]+$`).MatchString(fl.Field().String())
}
func validateSubscriptionUpdate(newSub *subscription.Subscription, oldSub *subscription.Subscription) error {
if newSub.Type != oldSub.Type {
return &subscription.ErrInvalidSubscriptionUpdate{Field: "Type"}
}
if newSub.EventType != oldSub.EventType {
return &subscription.ErrInvalidSubscriptionUpdate{Field: "EventType"}
}
if newSub.FunctionID != oldSub.FunctionID {
return &subscription.ErrInvalidSubscriptionUpdate{Field: "FunctionID"}
}
if newSub.Path != oldSub.Path {
return &subscription.ErrInvalidSubscriptionUpdate{Field: "Path"}
}
if newSub.Method != oldSub.Method {
return &subscription.ErrInvalidSubscriptionUpdate{Field: "Method"}
}
return nil
}
|
package Bootstrap
import (
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
"github.com/kylesliu/gin-demo/App/Repositories/Services"
"github.com/kylesliu/gin-demo/Bootstrap/config"
"github.com/kylesliu/gin-demo/Routes"
"net/http"
"strconv"
"time"
)
func GetApp() *http.Server {
route := gin.New()
//gin.SetMode(config.ServerSetting.RunMode)
gin.SetMode("debug")
route.Use(cors.Default())
route.Use(gin.Logger())
route.Use(gin.Recovery())
config.Setup()
Services.MySQLSetup()
route = Routes.InitRouter(route)
server := &http.Server{
Addr: config.ServerConfig.HttpAddr + ":" + strconv.Itoa(config.ServerConfig.HttpPort),
Handler: route,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
}
return server
}
|
package mongodb
import (
"context"
"testing"
"github.com/go-ocf/kit/security/certManager"
"github.com/go-ocf/cloud/cloud2cloud-connector/store"
"github.com/kelseyhightower/envconfig"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newStore(ctx context.Context, t *testing.T, cfg Config) *Store {
var cmconfig certManager.Config
err := envconfig.Process("DIAL", &cmconfig)
assert.NoError(t, err)
dialCertManager, err := certManager.NewCertManager(cmconfig)
require.NoError(t, err)
defer dialCertManager.Close()
tlsConfig := dialCertManager.GetClientTLSConfig()
s, err := NewStore(ctx, cfg, WithTLS(tlsConfig))
require.NoError(t, err)
return s
}
func TestStore_UpdateLinkedCloud(t *testing.T) {
type args struct {
sub store.LinkedCloud
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "not found",
args: args{
sub: store.LinkedCloud{
ID: "testIDnotFound",
Name: "testName",
ClientID: "testClientID",
ClientSecret: "testClientSecret",
Scopes: []string{"testScopes"},
Endpoint: store.Endpoint{
AuthUrl: "testAuthUrl",
TokenUrl: "testTokenUrl",
},
},
},
wantErr: true,
},
{
name: "valid",
args: args{
sub: store.LinkedCloud{
ID: "testID",
Name: "testNameUpdated",
ClientID: "testClientID",
ClientSecret: "testClientSecret",
Scopes: []string{"testScopes"},
Audience: "testAudience",
Endpoint: store.Endpoint{
AuthUrl: "testAuthUrl",
TokenUrl: "testTokenUrl",
},
},
},
},
}
require := require.New(t)
var config Config
err := envconfig.Process("", &config)
require.NoError(err)
ctx := context.Background()
s := newStore(ctx, t, config)
require.NoError(err)
defer s.Clear(ctx)
assert := assert.New(t)
err = s.InsertLinkedCloud(ctx, store.LinkedCloud{
ID: "testID",
Name: "testName",
ClientID: "testClientID",
ClientSecret: "testClientSecret",
Scopes: []string{"testScopes"},
Endpoint: store.Endpoint{
AuthUrl: "testAuthUrl",
TokenUrl: "testTokenUrl",
},
})
require.NoError(err)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := s.UpdateLinkedCloud(ctx, tt.args.sub)
if tt.wantErr {
assert.Error(err)
} else {
assert.NoError(err)
}
})
}
}
func TestStore_RemoveLinkedCloud(t *testing.T) {
type args struct {
LinkedCloudId string
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "not found",
args: args{
LinkedCloudId: "notFound",
},
wantErr: true,
},
{
name: "valid",
args: args{
LinkedCloudId: "testID",
},
},
}
require := require.New(t)
var config Config
err := envconfig.Process("", &config)
require.NoError(err)
ctx := context.Background()
s := newStore(ctx, t, config)
defer s.Clear(ctx)
assert := assert.New(t)
err = s.InsertLinkedCloud(ctx, store.LinkedCloud{
ID: "testID",
Name: "testName",
ClientID: "testClientID",
ClientSecret: "testClientSecret",
Scopes: []string{"testScopes"},
Endpoint: store.Endpoint{
AuthUrl: "testAuthUrl",
TokenUrl: "testTokenUrl",
},
})
require.NoError(err)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := s.RemoveLinkedCloud(ctx, tt.args.LinkedCloudId)
if tt.wantErr {
assert.Error(err)
} else {
assert.NoError(err)
}
})
}
}
type testLinkedCloudHandler struct {
lcs []store.LinkedCloud
}
func (h *testLinkedCloudHandler) Handle(ctx context.Context, iter store.LinkedCloudIter) (err error) {
var sub store.LinkedCloud
for iter.Next(ctx, &sub) {
h.lcs = append(h.lcs, sub)
}
return iter.Err()
}
func TestStore_LoadLinkedClouds(t *testing.T) {
lcs := []store.LinkedCloud{{
ID: "testID",
Name: "testName",
ClientID: "testClientID",
ClientSecret: "testClientSecret",
Scopes: []string{"testScopes"},
Audience: "testAudience",
Endpoint: store.Endpoint{
AuthUrl: "testAuthUrl",
TokenUrl: "testTokenUrl",
},
},
{
ID: "testID2",
Name: "testName",
ClientID: "testClientID",
ClientSecret: "testClientSecret",
Scopes: []string{"testScopes"},
Endpoint: store.Endpoint{
AuthUrl: "testAuthUrl",
TokenUrl: "testTokenUrl",
},
},
}
type args struct {
query store.Query
}
tests := []struct {
name string
args args
wantErr bool
want []store.LinkedCloud
}{
{
name: "all",
args: args{
query: store.Query{},
},
want: lcs,
},
{
name: "id",
args: args{
query: store.Query{ID: lcs[0].ID},
},
want: []store.LinkedCloud{lcs[0]},
},
{
name: "not found",
args: args{
query: store.Query{ID: "not found"},
},
},
}
require := require.New(t)
var config Config
err := envconfig.Process("", &config)
require.NoError(err)
ctx := context.Background()
s := newStore(ctx, t, config)
require.NoError(err)
defer s.Clear(ctx)
assert := assert.New(t)
for _, l := range lcs {
err = s.InsertLinkedCloud(ctx, l)
require.NoError(err)
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var h testLinkedCloudHandler
err := s.LoadLinkedClouds(ctx, tt.args.query, &h)
if tt.wantErr {
assert.Error(err)
} else {
assert.NoError(err)
assert.Equal(tt.want, h.lcs)
}
})
}
}
|
package main
import (
"fmt"
"log"
"net/http"
"strings"
)
func (conn *Connection) adminAuthMiddleware(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
log.Println("Admin Middleware")
// 1. Get cookie. If there is no cookie, redirect to login
session, err := store.Get(r, CookieName)
if err != nil {
fmt.Printf("Error getting cookie %s\n", err.Error())
login(w, r)
}
if session.Values["accessToken"] == nil {
fmt.Println("No token. Redirect to login")
login(w, r)
}
log.Println("We have an access token")
// 2. Get access token and email from cookie
token := session.Values["accessToken"].(string)
email := session.Values["email"].(string)
// 3. Get user from Db by email.
user, err := conn.getUserFromDb(email)
if err != nil {
// User not found or some other error. Redirect to Index
fmt.Println("No user with that address, so create user")
err := conn.createUser(email, token)
if err != nil {
http.Redirect(w, r, "/", http.StatusPermanentRedirect)
next(w, r)
return
}
}
log.Println("Email is in Db")
// // 3. Email is OK. Check if token is in Db, otherwise add it
isOk := tokenIsValid(user, token)
if isOk == false {
err := conn.createToken(email, token)
if err != nil {
next(w, r)
}
}
log.Println("Token is in Db")
next(w, r)
}
func tokenIsValid(user User, token string) bool {
for _, t := range user.Tokens {
fmt.Println(t)
if strings.Compare(token, t) == 0 {
return true
}
}
return false
}
func login(w http.ResponseWriter, r *http.Request) {
fmt.Println("Redirect to login")
http.Redirect(w, r, "/googlelogin", http.StatusPermanentRedirect)
}
|
/*
struct2elasticMapping enables developers to quickly build a
ElasticSearch Mapping from Go types.
Currently the Mapping is broken down to basic types (int,
byte, string, etc.) and support for more complex types like
time.Time (date), IP-Addresses, etc. is missing.
Using the special Tag "elastic" for structure members it
is possible to determine the type, analyzer and indexer
for the field.
// Format: "Type,Analyzer,Index"
type TestStruct struct {
Field1 int64 `json:"title,omitempty" elastic:"date,standard,analyzed"`
Subject string `json:"subject" elastic:",whitespace,not_analyzed"`
Body string `json:"body" elastic:",german"`
}
Currently it enables developers to quickly build a working
Mapping that can then be refined by the developer during
testing.
package main
import (
"fmt"
nmap "github.com/lair-framework/go-nmap"
s2m "github.com/marpie/struct2elasticMapping"
)
// Format: "Type,Analyzer,Index"
type TestStruct struct {
Field1 int64 `json:"title,omitempty" elastic:"date,standard,analyzed"`
Subject string `json:"subject" elastic:",whitespace,not_analyzed"`
Body string `json:"body" elastic:",german"`
}
func main() {
name, mapping, err := s2m.Analyze(nmap.NmapRun{}, "json")
//name, mapping, err := s2m.Analyze(TestStruct{}, "json")
if err != nil {
panic(err.Error())
}
s, err := s2m.MappingAsJson(name, mapping)
if err != nil {
panic(err.Error())
}
fmt.Println(string(s))
}
*/
package struct2elasticMapping
|
package main
import "fmt"
type person struct {
name string
age int
}
func main() {
fmt.Println(person{"satish",29})
fmt.Println(person{name:"kumar",age:22})
fmt.Println(person{name:"Fred"})
fmt.Println(person{age:20})
//fmt.Println(person(20)) //cannot use 20 (type int) as type string in field value
s:= person{name:"Sean", age:50}
fmt.Println(s.name)
s.age = 55 // Structs are mutable
fmt.Println(s.age)
}
|
package base
import (
"io"
"log"
"log/syslog"
"net/url"
)
type Bootstrap struct {
name string
version Version
logging *LoggingAdapter
}
func NewBootstrap(name string) *Bootstrap {
return &Bootstrap{name: name}
}
func (b *Bootstrap) Version(major, minor, build int) *Bootstrap {
b.version.Major = major
b.version.Minor = minor
b.version.Build = build
return b
}
func (b *Bootstrap) Major(major int) *Bootstrap {
b.version.Major = major
return b
}
func (b *Bootstrap) Minor(minor int) *Bootstrap {
b.version.Minor = minor
return b
}
func (b *Bootstrap) Build(build int) *Bootstrap {
b.version.Build = build
return b
}
func (b *Bootstrap) Revision(rev string) *Bootstrap {
b.version.Revison = rev
return b
}
func (b *Bootstrap) LogPrefix(prefix string) *Bootstrap {
b.logging = LogAdapter
log.SetPrefix(prefix)
return b
}
func (b *Bootstrap) LogOutput(w io.Writer) *Bootstrap {
b.logging = LogAdapter
log.SetOutput(w)
return b
}
func (b *Bootstrap) GLog() *Bootstrap {
b.logging = GLogAdapter
return b
}
func (b *Bootstrap) Syslog() *Bootstrap {
w, err := syslog.New(syslog.LOG_LOCAL0, "")
if err != nil {
panic(err)
}
b.logging = newSyslogAdapter(w)
return b
}
func (b *Bootstrap) SyslogDial(uri *url.URL) *Bootstrap {
w, err := syslog.Dial(uri.Scheme, uri.Host, syslog.LOG_LOCAL0, "")
if err != nil {
panic(err)
}
b.logging = newSyslogAdapter(w)
return b
}
|
package routes
import (
"go-kemas/controllers"
"github.com/gin-gonic/gin"
"gorm.io/gorm"
)
func SetupRoutes(db *gorm.DB) *gin.Engine {
r := gin.Default()
r.Use(func(c *gin.Context) {
c.Set("db", db)
})
r.GET("/tasks", controllers.FindTasks)
r.POST("/tasks", controllers.CreateTask)
r.GET("/tasks/:id", controllers.FindTask)
r.PATCH("/tasks/:id", controllers.UpdateTask)
r.DELETE("tasks/:id", controllers.DeleteTask)
return r
}
|
package data
import (
"github.com/bububa/oppo-omni/core"
"github.com/bububa/oppo-omni/model/data"
)
// 小游戏-图表
func QQuickAppGame(clt *core.SDKClient, req *data.QQuickAppGameRequest) (*data.QQuickAppGameResult, error) {
req.SetResourceName("data")
req.SetResourceAction("Q/quickApp/game")
var ret data.QQuickAppGameResponse
err := clt.Post(req, &ret)
if err != nil {
return nil, err
}
return ret.Data, nil
}
|
package main
import (
"container/list"
"fmt"
)
func main() {
a := list.New()
a.PushBack(2)
a.PushBack(4)
a.PushBack(3)
b := list.New()
b.PushBack(5)
b.PushBack(6)
b.PushBack(4)
show(a) // 2 -> 4 -> 3
show(b) // 5 -> 6 -> 4
c := addTwoNumbers(a, b)
show(c) // 7 -> 0 -> 8
fmt.Println()
a.Remove(a.Back())
show(a) // 2 -> 4
show(b) // 5 -> 6 -> 4
c = addTwoNumbers(a, b)
show(c) // 7 -> 0 -> 5
fmt.Println()
a.Remove(a.Front())
show(a) // 4
show(b) // 5 -> 6 -> 4
c = addTwoNumbers(a, b)
show(c) // 9 -> 6 -> 4
fmt.Println()
a.Remove(a.Front())
show(a) //
show(b) // 5 -> 6 -> 4
c = addTwoNumbers(a, b)
show(c) // 5 -> 6 -> 4
fmt.Println()
}
// Add Two Numbers - LeetCode.
// You are given two non-empty linked lists representing two non-negative integers.
// The digits are stored in reverse order and each of their nodes contain a
// single digit. Add the two numbers and return it as a linked list.
// ```
// 2 -> 4 -> 3
// +
// 5 -> 6 -> 4
// =
// 7 -> 0 -> 8
// (342 + 465 = 807)
// ```
func addTwoNumbers(list1, list2 *list.List) *list.List {
r := list.New()
a := list1.Front()
b := list2.Front()
carry := 0
for a != nil || b != nil {
av := 0
bv := 0
if a != nil {
av, _ = a.Value.(int)
a = a.Next()
}
if b != nil {
bv, _ = b.Value.(int)
b = b.Next()
}
digit := carry + av + bv
carry = digit / 10
r.PushBack(digit % 10)
}
if carry > 0 {
r.PushBack(carry)
}
return r
}
func show(a *list.List) {
space := ""
for elm := a.Front(); elm != nil; elm = elm.Next() {
fmt.Print(space, elm.Value)
space = " -> "
}
fmt.Println()
}
|
package main
import (
"google_sheet_parser/App/models"
"google_sheet_parser/App/routers"
"log"
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
_ "github.com/jinzhu/gorm/dialects/mysql"
)
func main() {
corsConfig := cors.DefaultConfig()
corsConfig.AllowHeaders = append(corsConfig.AllowHeaders, "Authorization")
corsConfig.AllowAllOrigins = true
// Register database
// log.Println("Registering database..")
// models.OpenDB(os.Getenv("DBCONN"), false) // Pass false for not logging dababase queries
// defer models.CloseDB(models.DB)
log.SetOutput(gin.DefaultWriter) // logs gin
log.SetFlags(log.LstdFlags | log.Lshortfile) // logs with timestamp and file name and line number
router := gin.Default()
router.Use(gin.Logger())
router.Use(gin.Recovery())
models.InitGoogleSheetClient()
router = routers.InitRouters(router)
//log.Println("AppName is: ", config.Get("server.appName").(string))
router.Run(":7070")
}
|
/*
==========two structures with the same fields will not be consideredidentical in Go if their
fields are not in exactly the same order.
*/
package main
import "fmt"
type s1 struct {
name string
age int
profile string
qualifications []string
}
func main() {
//var p1 = s1{name: "Ankita", age: 22, profile: "Reactjs Developer", qualifications: ["MCA","BSC"]}
p1 := s1{name: "Ankita", age: 22, profile: "Reactjs Developer", qualifications: []string{"Bsc", "MCA"}}
fmt.Println(p1.qualifications)
fmt.Println(p1)
p2 := [9]s1{}
p2[0] = s1{name: "harsh", age: 20, profile: "machine learning", qualifications: []string{"btech", "mtech"}}
p2[1] = s1{name: "somi", age: 20, profile: "teacher", qualifications: []string{"bsc", "msc"}}
fmt.Println(p2[0].qualifications)
fmt.Println(p2[0])
fmt.Println(p2[1].qualifications)
fmt.Println(p2[1])
fmt.Println(p2)
}
|
package service
import (
"github.com/SungKing/blogsystem/models/dao"
"github.com/SungKing/blogsystem/models/entity"
)
type TagService struct {
}
var tagDao = new(dao.TagDao)
func (*TagService) Insert(t entity.Tag)(int64,error) {
return tagDao.Insert(t)
}
func (*TagService) Query(pageIndex int32,pageSize int32) (tags []entity.Tag) {
return tagDao.Query(pageIndex,pageSize)
}
func (*TagService) Find(id int32) entity.Tag{
return tagDao.Find(id)
}
|
package compute
import (
"net/http"
"os"
"reflect"
"testing"
"github.com/databrickslabs/databricks-terraform/common"
"github.com/databrickslabs/databricks-terraform/internal/qa"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGetOrCreateRunningCluster_AzureAuth(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/list",
Response: map[string]interface{}{},
},
{
Method: "GET",
ReuseRequest: true,
Resource: "/api/2.0/clusters/list-node-types",
Response: NodeTypeList{
[]NodeType{
{
NodeTypeID: "Standard_F4s",
InstanceTypeID: "Standard_F4s",
MemoryMB: 8192,
NumCores: 4,
NodeInstanceType: &NodeInstanceType{
LocalDisks: 1,
InstanceTypeID: "Standard_F4s",
LocalDiskSizeGB: 16,
LocalNVMeDisks: 0,
},
},
{
NodeTypeID: "Standard_L80s_v2",
InstanceTypeID: "Standard_L80s_v2",
MemoryMB: 655360,
NumCores: 80,
NodeInstanceType: &NodeInstanceType{
LocalDisks: 2,
InstanceTypeID: "Standard_L80s_v2",
LocalDiskSizeGB: 160,
LocalNVMeDisks: 1,
},
},
},
},
},
{
Method: "POST",
Resource: "/api/2.0/clusters/create",
ExpectedRequest: Cluster{
AutoterminationMinutes: 10,
ClusterName: "mount",
NodeTypeID: "Standard_F4s",
NumWorkers: 1,
SparkVersion: CommonRuntimeVersion(),
},
Response: ClusterID{
ClusterID: "bcd",
},
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=bcd",
Response: ClusterInfo{
State: "RUNNING",
},
},
})
defer server.Close()
require.NoError(t, err)
client.AzureAuth.ResourceID = "/a/b/c"
clusterInfo, err := NewClustersAPI(client).GetOrCreateRunningCluster("mount")
require.NoError(t, err)
assert.NotNil(t, clusterInfo)
}
func TestGetOrCreateRunningCluster_Existing_AzureAuth(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/list",
Response: ClusterList{
Clusters: []ClusterInfo{
{
ClusterID: "abc",
State: "TERMINATED",
AutoterminationMinutes: 10,
ClusterName: "mount",
NodeTypeID: "Standard_F4s",
NumWorkers: 1,
SparkVersion: CommonRuntimeVersion(),
},
},
},
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: "TERMINATED",
},
},
{
Method: "POST",
Resource: "/api/2.0/clusters/start",
ExpectedRequest: ClusterID{
ClusterID: "abc",
},
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: "RUNNING",
},
},
})
defer server.Close()
require.NoError(t, err)
client.AzureAuth.ResourceID = "/a/b/c"
clusterInfo, err := NewClustersAPI(client).GetOrCreateRunningCluster("mount")
require.NoError(t, err)
assert.NotNil(t, clusterInfo)
}
func TestWaitForClusterStatus_RetryOnNotFound(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: common.APIErrorBody{
Message: "Nope",
},
Status: 404,
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: "RUNNING",
},
},
})
defer server.Close()
require.NoError(t, err)
client.AzureAuth.ResourceID = "/a/b/c"
clusterInfo, err := NewClustersAPI(client).waitForClusterStatus("abc", ClusterStateRunning)
require.NoError(t, err)
assert.NotNil(t, clusterInfo)
}
func TestWaitForClusterStatus_StopRetryingEarly(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: common.APIErrorBody{
Message: "I am a teapot",
},
Status: 418,
},
})
defer server.Close()
require.NoError(t, err)
_, err = NewClustersAPI(client).waitForClusterStatus("abc", ClusterStateRunning)
require.Error(t, err)
require.Contains(t, err.Error(), "I am a teapot")
}
func TestWaitForClusterStatus_NotReachable(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateUnknown,
StateMessage: "Something strange is going on",
},
},
})
defer server.Close()
require.NoError(t, err)
client.AzureAuth.ResourceID = "/a/b/c"
_, err = NewClustersAPI(client).waitForClusterStatus("abc", ClusterStateRunning)
require.Error(t, err)
assert.Contains(t, err.Error(), "abc is not able to transition from UNKNOWN to RUNNING: Something strange is going on.")
}
func TestWaitForClusterStatus_NormalRetry(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStatePending,
},
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateRunning,
},
},
})
defer server.Close()
require.NoError(t, err)
clusterInfo, err := NewClustersAPI(client).waitForClusterStatus("abc", ClusterStateRunning)
require.NoError(t, err)
assert.Equal(t, ClusterStateRunning, string(clusterInfo.State))
}
func TestEditCluster_Pending(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStatePending,
ClusterID: "abc",
},
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateRunning,
ClusterID: "abc",
},
},
{
Method: "POST",
Resource: "/api/2.0/clusters/edit",
Response: Cluster{
ClusterID: "abc",
ClusterName: "Morty",
},
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateRunning,
},
},
})
defer server.Close()
require.NoError(t, err)
clusterInfo, err := NewClustersAPI(client).Edit(Cluster{
ClusterID: "abc",
ClusterName: "Morty",
})
require.NoError(t, err)
assert.Equal(t, ClusterStateRunning, string(clusterInfo.State))
}
func TestEditCluster_Terminating(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateTerminating,
ClusterID: "abc",
},
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateTerminated,
ClusterID: "abc",
},
},
{
Method: "POST",
Resource: "/api/2.0/clusters/edit",
Response: Cluster{
ClusterID: "abc",
ClusterName: "Morty",
},
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateRunning,
},
},
})
defer server.Close()
require.NoError(t, err)
clusterInfo, err := NewClustersAPI(client).Edit(Cluster{
ClusterID: "abc",
ClusterName: "Morty",
})
require.NoError(t, err)
assert.Equal(t, ClusterStateTerminated, string(clusterInfo.State))
}
func TestEditCluster_Error(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateError,
ClusterID: "abc",
StateMessage: "I am a teapot",
},
},
})
defer server.Close()
require.NoError(t, err)
_, err = NewClustersAPI(client).Edit(Cluster{
ClusterID: "abc",
ClusterName: "Morty",
})
require.Error(t, err)
assert.Contains(t, err.Error(), "I am a teapot")
}
func TestStartAndGetInfo_Pending(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStatePending,
ClusterID: "abc",
},
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateRunning,
ClusterID: "abc",
},
},
})
defer server.Close()
require.NoError(t, err)
clusterInfo, err := NewClustersAPI(client).StartAndGetInfo("abc")
require.NoError(t, err)
assert.Equal(t, ClusterStateRunning, string(clusterInfo.State))
}
func TestStartAndGetInfo_Terminating(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateTerminating,
ClusterID: "abc",
},
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateTerminated,
ClusterID: "abc",
},
},
{
Method: "POST",
Resource: "/api/2.0/clusters/start",
ExpectedRequest: ClusterID{
ClusterID: "abc",
},
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateRunning,
ClusterID: "abc",
},
},
})
defer server.Close()
require.NoError(t, err)
clusterInfo, err := NewClustersAPI(client).StartAndGetInfo("abc")
require.NoError(t, err)
assert.Equal(t, ClusterStateRunning, string(clusterInfo.State))
}
func TestStartAndGetInfo_Error(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateError,
StateMessage: "I am a teapot",
},
},
{
Method: "POST",
Resource: "/api/2.0/clusters/start",
ExpectedRequest: ClusterID{
ClusterID: "abc",
},
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateRunning,
ClusterID: "abc",
},
},
})
defer server.Close()
require.NoError(t, err)
clusterInfo, err := NewClustersAPI(client).StartAndGetInfo("abc")
require.NoError(t, err)
assert.Equal(t, ClusterStateRunning, string(clusterInfo.State))
}
func TestStartAndGetInfo_StartingError(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateError,
StateMessage: "I am a teapot",
},
},
{
Method: "POST",
Resource: "/api/2.0/clusters/start",
ExpectedRequest: ClusterID{
ClusterID: "abc",
},
Response: common.APIErrorBody{
Message: "I am a teapot!",
},
Status: 418,
},
})
defer server.Close()
require.NoError(t, err)
_, err = NewClustersAPI(client).StartAndGetInfo("abc")
require.Error(t, err)
assert.Contains(t, err.Error(), "I am a teapot")
}
func TestPermanentDelete_Pinned(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "POST",
Resource: "/api/2.0/clusters/delete",
ExpectedRequest: ClusterID{
ClusterID: "abc",
},
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateTerminated,
},
},
{
Method: "POST",
Resource: "/api/2.0/clusters/permanent-delete",
ExpectedRequest: ClusterID{
ClusterID: "abc",
},
Response: common.APIErrorBody{
Message: "unpin the cluster first",
},
Status: 400,
},
{
Method: "POST",
Resource: "/api/2.0/clusters/unpin",
ExpectedRequest: ClusterID{
ClusterID: "abc",
},
},
{
Method: "POST",
Resource: "/api/2.0/clusters/permanent-delete",
ExpectedRequest: ClusterID{
ClusterID: "abc",
},
},
})
defer server.Close()
require.NoError(t, err)
err = NewClustersAPI(client).PermanentDelete("abc")
require.NoError(t, err)
}
func TestClustersAPI_List(t *testing.T) {
tests := []struct {
name string
response string
responseStatus int
wantURI string
want interface{}
wantErr bool
}{
{
name: "List test",
response: `{
"clusters":[
{
"cluster_name":"autoscaling-cluster",
"spark_version":"5.3.x-scala2.11",
"node_type_id":"i3.xlarge",
"autoscale":{
"min_workers":2,
"max_workers":50
}
},
{
"cluster_name":"autoscaling-cluster2",
"spark_version":"5.3.x-scala2.11",
"node_type_id":"i3.xlarge",
"autoscale":{
"min_workers":2,
"max_workers":50
}
}
]
}`,
responseStatus: http.StatusOK,
wantURI: "/api/2.0/clusters/list",
want: []ClusterInfo{
{
ClusterName: "autoscaling-cluster",
SparkVersion: "5.3.x-scala2.11",
NodeTypeID: "i3.xlarge",
AutoScale: &AutoScale{
MinWorkers: 2,
MaxWorkers: 50,
},
},
{
ClusterName: "autoscaling-cluster2",
SparkVersion: "5.3.x-scala2.11",
NodeTypeID: "i3.xlarge",
AutoScale: &AutoScale{
MinWorkers: 2,
MaxWorkers: 50,
},
},
},
wantErr: false,
},
{
name: "List failure test",
response: ``,
responseStatus: http.StatusBadRequest,
wantURI: "/api/2.0/clusters/list",
want: []ClusterInfo{},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
qa.AssertRequestWithMockServer(t, nil, http.MethodGet, tt.wantURI, nil, tt.response, tt.responseStatus, tt.want, tt.wantErr, func(client *common.DatabricksClient) (interface{}, error) {
return NewClustersAPI(client).List()
})
})
}
}
func TestClustersAPI_ListZones(t *testing.T) {
tests := []struct {
name string
response string
responseStatus int
wantURI string
want interface{}
wantErr bool
}{
{
name: "ListZones test",
response: `{
"zones": [
"us-west-2b",
"us-west-2c",
"us-west-2a"
],
"default_zone": "us-west-2b"
}`,
responseStatus: http.StatusOK,
wantURI: "/api/2.0/clusters/list-zones",
want: ZonesInfo{
Zones: []string{"us-west-2b",
"us-west-2c",
"us-west-2a"},
DefaultZone: "us-west-2b",
},
wantErr: false,
},
{
name: "ListZones failure test",
response: ``,
responseStatus: http.StatusBadRequest,
wantURI: "/api/2.0/clusters/list-zones",
want: ZonesInfo{},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
qa.AssertRequestWithMockServer(t, nil, http.MethodGet, tt.wantURI, nil, tt.response, tt.responseStatus, tt.want, tt.wantErr, func(client *common.DatabricksClient) (interface{}, error) {
return NewClustersAPI(client).ListZones()
})
})
}
}
func TestClustersAPI_ListNodeTypes(t *testing.T) {
tests := []struct {
name string
response string
responseStatus int
wantURI string
want interface{}
wantErr bool
}{
{
name: "ListNodeTypes test",
response: `{
"node_types": [
{
"node_type_id": "r3.xlarge",
"memory_mb": 31232,
"num_cores": 4.0,
"description": "r3.xlarge (deprecated)",
"instance_type_id": "r3.xlarge",
"is_deprecated": false,
"category": "Memory Optimized",
"support_ebs_volumes": true,
"support_cluster_tags": true,
"num_gpus": 0,
"node_instance_type": {
"instance_type_id": "r3.xlarge",
"local_disks": 1,
"local_disk_size_gb": 80
},
"is_hidden": false,
"support_port_forwarding": true,
"display_order": 1,
"is_io_cache_enabled": false
},
{
"node_type_id": "r3.2xlarge",
"memory_mb": 62464,
"num_cores": 8.0,
"description": "r3.2xlarge (deprecated)",
"instance_type_id": "r3.2xlarge",
"is_deprecated": false,
"category": "Memory Optimized",
"support_ebs_volumes": true,
"support_cluster_tags": true,
"num_gpus": 0,
"node_instance_type": {
"instance_type_id": "r3.2xlarge",
"local_disks": 1,
"local_disk_size_gb": 160
},
"is_hidden": false,
"support_port_forwarding": true,
"display_order": 1,
"is_io_cache_enabled": false
}
]
}`,
responseStatus: http.StatusOK,
wantURI: "/api/2.0/clusters/list-node-types",
want: []NodeType{
{
NodeTypeID: "r3.xlarge",
MemoryMB: 31232,
NumCores: 4.0,
Description: "r3.xlarge (deprecated)",
InstanceTypeID: "r3.xlarge",
IsDeprecated: false,
Category: "Memory Optimized",
SupportEBSVolumes: true,
SupportClusterTags: true,
NumGPUs: 0,
NodeInstanceType: &NodeInstanceType{
InstanceTypeID: "r3.xlarge",
LocalDisks: 1,
LocalDiskSizeGB: 80,
},
IsHidden: false,
SupportPortForwarding: true,
DisplayOrder: 1,
IsIOCacheEnabled: false,
},
{
NodeTypeID: "r3.2xlarge",
MemoryMB: 62464,
NumCores: 8.0,
Description: "r3.2xlarge (deprecated)",
InstanceTypeID: "r3.2xlarge",
IsDeprecated: false,
Category: "Memory Optimized",
SupportEBSVolumes: true,
SupportClusterTags: true,
NumGPUs: 0,
NodeInstanceType: &NodeInstanceType{
InstanceTypeID: "r3.2xlarge",
LocalDisks: 1,
LocalDiskSizeGB: 160,
},
IsHidden: false,
SupportPortForwarding: true,
DisplayOrder: 1,
IsIOCacheEnabled: false,
},
},
wantErr: false,
},
{
name: "ListNodeTypes failure test",
response: ``,
responseStatus: http.StatusBadRequest,
wantURI: "/api/2.0/clusters/list-node-types",
want: []NodeType{},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
qa.AssertRequestWithMockServer(t, nil, http.MethodGet, tt.wantURI, nil, tt.response, tt.responseStatus, tt.want, tt.wantErr, func(client *common.DatabricksClient) (interface{}, error) {
return NewClustersAPI(client).ListNodeTypes()
})
})
}
}
func TestClustersAPI_Restart(t *testing.T) {
type args struct {
ClusterID string `json:"cluster_id,omitempty"`
}
tests := []struct {
name string
response string
responseStatus int
args args
want interface{}
wantErr bool
}{
{
name: "Restart test",
response: ``,
responseStatus: http.StatusOK,
args: args{
ClusterID: "my-cluster-id",
},
want: nil,
wantErr: false,
},
{
name: "Restart faulure test",
response: "",
responseStatus: http.StatusBadRequest,
args: args{
ClusterID: "my-cluster-id",
},
want: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var input args
qa.AssertRequestWithMockServer(t, &tt.args, http.MethodPost, "/api/2.0/clusters/restart", &input, tt.response, tt.responseStatus, tt.want, tt.wantErr, func(client *common.DatabricksClient) (interface{}, error) {
return nil, NewClustersAPI(client).Restart(tt.args.ClusterID)
})
})
}
}
func TestClustersAPI_Pin(t *testing.T) {
type args struct {
ClusterID string `json:"cluster_id,omitempty"`
}
tests := []struct {
name string
response string
responseStatus int
args args
want interface{}
wantErr bool
}{
{
name: "Pin test",
response: ``,
responseStatus: http.StatusOK,
args: args{
ClusterID: "my-cluster-id",
},
want: nil,
wantErr: false,
},
{
name: "Pin faulure test",
response: "",
responseStatus: http.StatusBadRequest,
args: args{
ClusterID: "my-cluster-id",
},
want: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var input args
qa.AssertRequestWithMockServer(t, &tt.args, http.MethodPost, "/api/2.0/clusters/pin", &input, tt.response, tt.responseStatus, tt.want, tt.wantErr, func(client *common.DatabricksClient) (interface{}, error) {
return nil, NewClustersAPI(client).Pin(tt.args.ClusterID)
})
})
}
}
func TestClustersAPI_Unpin(t *testing.T) {
type args struct {
ClusterID string `json:"cluster_id,omitempty"`
}
tests := []struct {
name string
response string
responseStatus int
args args
want interface{}
wantErr bool
}{
{
name: "Unpin test",
response: ``,
responseStatus: http.StatusOK,
args: args{
ClusterID: "my-cluster-id",
},
want: nil,
wantErr: false,
},
{
name: "Unpin faulure test",
response: "",
responseStatus: http.StatusBadRequest,
args: args{
ClusterID: "my-cluster-id",
},
want: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var input args
qa.AssertRequestWithMockServer(t, &tt.args, http.MethodPost, "/api/2.0/clusters/unpin", &input, tt.response, tt.responseStatus, tt.want, tt.wantErr, func(client *common.DatabricksClient) (interface{}, error) {
return nil, NewClustersAPI(client).Unpin(tt.args.ClusterID)
})
})
}
}
func TestAccListClustersIntegration(t *testing.T) {
cloudEnv := os.Getenv("CLOUD_ENV")
if cloudEnv == "" {
t.Skip("Acceptance tests skipped unless env 'CLOUD_ENV' is set")
}
client := common.CommonEnvironmentClient()
randomName := qa.RandomName()
cluster := Cluster{
NumWorkers: 1,
ClusterName: "Terraform Integration Test " + randomName,
SparkVersion: CommonRuntimeVersion(),
InstancePoolID: CommonInstancePoolID(),
IdempotencyToken: "acc-list-" + randomName,
AutoterminationMinutes: 15,
}
clusterReadInfo, err := NewClustersAPI(client).Create(cluster)
assert.NoError(t, err, err)
assert.True(t, clusterReadInfo.NumWorkers == cluster.NumWorkers)
assert.True(t, clusterReadInfo.ClusterName == cluster.ClusterName)
assert.True(t, reflect.DeepEqual(clusterReadInfo.SparkEnvVars, cluster.SparkEnvVars))
assert.True(t, clusterReadInfo.SparkVersion == cluster.SparkVersion)
assert.True(t, clusterReadInfo.AutoterminationMinutes == cluster.AutoterminationMinutes)
assert.True(t, clusterReadInfo.State == ClusterStateRunning)
defer func() {
err = NewClustersAPI(client).Terminate(clusterReadInfo.ClusterID)
assert.NoError(t, err, err)
clusterReadInfo, err = NewClustersAPI(client).Get(clusterReadInfo.ClusterID)
assert.NoError(t, err, err)
assert.True(t, clusterReadInfo.State == ClusterStateTerminated)
err = NewClustersAPI(client).Unpin(clusterReadInfo.ClusterID)
assert.NoError(t, err, err)
err = NewClustersAPI(client).PermanentDelete(clusterReadInfo.ClusterID)
assert.NoError(t, err, err)
}()
err = NewClustersAPI(client).Pin(clusterReadInfo.ClusterID)
assert.NoError(t, err, err)
clusterReadInfo, err = NewClustersAPI(client).Get(clusterReadInfo.ClusterID)
assert.NoError(t, err, err)
assert.True(t, clusterReadInfo.State == ClusterStateRunning)
}
func TestClusters_SortNodeTypes_Deprecated(t *testing.T) {
nodeTypes := []NodeType{
{
IsDeprecated: true,
NodeTypeID: "deprecated1",
},
{
IsDeprecated: false,
NodeTypeID: "not deprecated",
},
{
IsDeprecated: true,
NodeTypeID: "deprecated2",
},
}
smallestNodeType := getSmallestNodeType(nodeTypes)
assert.Equal(t, "not deprecated", smallestNodeType.NodeTypeID)
}
func TestClusters_SortNodeTypes_Memory(t *testing.T) {
nodeTypes := []NodeType{
{
MemoryMB: 3,
NodeTypeID: "3",
},
{
MemoryMB: 1,
NodeTypeID: "1",
},
{
MemoryMB: 2,
NodeTypeID: "2",
},
{
MemoryMB: 2,
NodeTypeID: "another 2",
},
}
smallestNodeType := getSmallestNodeType(nodeTypes)
assert.Equal(t, "1", smallestNodeType.NodeTypeID)
}
func TestClusters_SortNodeTypes_CPU(t *testing.T) {
nodeTypes := []NodeType{
{
NumCores: 3,
NodeTypeID: "3",
},
{
NumCores: 1,
NodeTypeID: "1",
},
{
NumCores: 2,
NodeTypeID: "2",
},
{
NumCores: 1,
NodeTypeID: "another 1",
},
}
smallestNodeType := getSmallestNodeType(nodeTypes)
assert.Equal(t, "1", smallestNodeType.NodeTypeID)
}
func TestClusters_SortNodeTypes_GPU(t *testing.T) {
nodeTypes := []NodeType{
{
NumGPUs: 3,
NodeTypeID: "3",
},
{
NumGPUs: 1,
NodeTypeID: "1",
},
{
NumGPUs: 2,
NodeTypeID: "2",
},
{
NumGPUs: 1,
NodeTypeID: "another 1",
},
}
smallestNodeType := getSmallestNodeType(nodeTypes)
assert.Equal(t, "1", smallestNodeType.NodeTypeID)
}
func TestClusters_SortNodeTypes_CPU_Deprecated(t *testing.T) {
nodeTypes := []NodeType{
{
NumCores: 3,
IsDeprecated: false,
NodeTypeID: "3 not deprecated",
},
{
NumCores: 1,
IsDeprecated: true,
NodeTypeID: "1 deprecated",
},
{
NumCores: 2,
IsDeprecated: false,
NodeTypeID: "2 not deprecated",
},
{
NumCores: 1,
IsDeprecated: false,
NodeTypeID: "1 not deprecated",
},
{
NumCores: 2,
IsDeprecated: true,
NodeTypeID: "2 deprecated",
},
}
smallestNodeType := getSmallestNodeType(nodeTypes)
assert.Equal(t, "1 not deprecated", smallestNodeType.NodeTypeID)
}
func TestClusters_SortNodeTypes_LocalDisks(t *testing.T) {
nodeTypes := []NodeType{
{
NodeInstanceType: &NodeInstanceType{
LocalDisks: 3,
},
NodeTypeID: "3",
},
{
NodeInstanceType: &NodeInstanceType{
LocalDisks: 1,
},
NodeTypeID: "1",
},
{
NodeInstanceType: &NodeInstanceType{
LocalDisks: 2,
},
NodeTypeID: "2",
},
{
NodeInstanceType: &NodeInstanceType{
LocalDisks: 3,
},
NodeTypeID: "another 3",
},
}
smallestNodeType := getSmallestNodeType(nodeTypes)
assert.Equal(t, "1", smallestNodeType.NodeTypeID)
}
func TestAwsAccSmallestNodeType(t *testing.T) {
cloudEnv := os.Getenv("CLOUD_ENV")
if cloudEnv == "" {
t.Skip("Acceptance tests skipped unless env 'CLOUD_ENV' is set")
}
client := common.CommonEnvironmentClient()
nodeType := NewClustersAPI(client).GetSmallestNodeTypeWithStorage()
assert.Equal(t, "m5d.large", nodeType)
}
|
package main
import (
"encoding/json"
"github.com/ethereum/go-ethereum/common"
"log"
"math/big"
"sub_account_service/blockchain_server/arguments"
"sub_account_service/blockchain_server/config"
"sub_account_service/blockchain_server/contracts"
myeth "sub_account_service/blockchain_server/lib/eth"
"time"
)
var (
newAddress3 = "0x56a58d378fd5647de22bf10007ab2f49e47d83b7"
describe3 = `{"address":"56a58d378fd5647de22bf10007ab2f49e47d83b7","crypto":{"cipher":"aes-128-ctr","ciphertext":"01b960ea11abb9baa2d9f5e4f8cec0eaaa2a6165f6a3c63b1fb3a767e86ad729","cipherparams":{"iv":"20f30ca3334be401de850a6fd912aa4f"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"2e219cc58e1468cc95863f62ce9d1bf5bbdec467f13ed4ecd2bc996f11c56822"},"mac":"6faf19ee181fbfd076a446854e2348020408b34e120824edc69085f2306bfa26"},"id":"44e95601-9851-422e-ae00-c0a481436fe6","version":3}`
addmanager_samrt_addr = "0x6536dbd4df44fb7b85654866d91a5d9755b0046d"
//addmanager_samrt_addr = "0xcd146c990330b536a274c4b97fcfd297e02d3a0b"
newAddress = "0x38e7005d85117f7fc3b3508bb8adcf68ebb3cef9"
describe = `{"address":"38e7005d85117f7fc3b3508bb8adcf68ebb3cef9","crypto":{"cipher":"aes-128-ctr","ciphertext":"e99faebbc6a4fa1149e9f1bc23db42b9d640af9367357a14e7840d98feb52166","cipherparams":{"iv":"c41d79cda794cabc8b6ec07b7762c393"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"5c9062cb8b675475474537a044552dec7dcf5c3bb929490bc644612f274c88d9"},"mac":"d84469064184122378bea0719fa3673da25d4ee2eb7050ad3fd1c96b7e89a127"},"id":"287e6657-6539-4ed5-8d99-a8b8f24708d0","version":3}`
newAddress2 = "0x642b69852c7ac97fbc9e1db06cabde7be313ed76"
describe2 = `{"address":"642b69852c7ac97fbc9e1db06cabde7be313ed76","crypto":{"cipher":"aes-128-ctr","ciphertext":"355399c0be2cae5b5a2fab174f6021fbf427d763d6f91b1e5465920546edfa88","cipherparams":{"iv":"d9723017654f7f5b7056682d2559f5ed"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"1a7035be42a0eadf5136c1b6795514545c3808329f56ec022434b50065226f98"},"mac":"2d3782038b0607845fc2d168c17f32d17643fc268d3b0bbe1b009fd78414948d"},"id":"6debd6e5-3064-4938-ba41-89d0b8f53c64","version":3}`
)
func InitConf() {
config.ParseToml("/root/work/launch_pf/src/sub_account_service/blockchain_server/conf/config.toml") // 初始化配置
myeth.NewNonceMap()
log.Println("init conf")
}
func SubDeploy() {
sargs := arguments.DeployArguments{"apple1", "sub_account1", newAddress3, "apple test,start1d 11:00"}
key1 := myeth.ParseKeyStore(describe3, "dev3_apple1")
key_string, _ := json.Marshal(key1)
subaccount_smart_add, subaccount_smart_name, _ := contracts.Deploy(string(key_string), sargs)
log.Println("subaccount_smart_add:", subaccount_smart_add, "subaccount_smart_name:", subaccount_smart_name)
addmanager_samrt_addr = subaccount_smart_add
//bindAccountInfos()
//code := "test:a8f9037a3cdb6d42fa4c0eae1e727e62"
//QueryTestSub(code)
//time.Sleep(5 * time.Second)
//queryBalance()
}
func bindAccountInfos() {
key1 := myeth.ParseKeyStore(describe3, "dev3_apple1")
key_string, _ := json.Marshal(key1)
for idx := 0; idx < 20; idx++ {
account := arguments.AccountArguments{newAddress3, "testApple", "", "", "", ""}
contracts.BindAccountInfos(string(key_string), addmanager_samrt_addr, account)
time.Sleep(1 * time.Second)
}
}
func QueryTestSub(code string) {
//key1 := eth.ParseKeyStore(describe3, "dev3_apple1")
key1 := myeth.ParseKeyStore(describe, "apple1")
key_string, _ := json.Marshal(key1)
isok, _ := contracts.CheckSubCodeIsOk(addmanager_samrt_addr, code)
log.Println("isok ***************************** :", isok)
//if isok == false {
ratio := []*big.Int{big.NewInt(3333), big.NewInt(50000), big.NewInt(6667)}
subWay := []*big.Int{big.NewInt(0), big.NewInt(1), big.NewInt(0)}
quotaWay := []*big.Int{big.NewInt(0), big.NewInt(1), big.NewInt(0)}
resetTime := []*big.Int{big.NewInt(0), big.NewInt(2), big.NewInt(0)}
roles := [][32]byte{[32]byte{'a'}, [32]byte{'b'}, [32]byte{'c'}}
pargs := arguments.DistributionArguments{addmanager_samrt_addr, code, roles, ratio, subWay, quotaWay, resetTime}
contracts.SetDistributionRatio(string(key_string), pargs)
time.Sleep(60 * time.Second)
//}
//sub_arr, _ := contracts.GetbindSubCode(addmanager_samrt_addr, newAddress)
//for sdx := 0; sdx < len(sub_arr); sdx++ {
// contracts.GetIssueSubCxtLen(addmanager_samrt_addr, sub_arr[sdx])
//
// contracts.GetDistributionRatioByCode(addmanager_samrt_addr, sub_arr[sdx])
//}
//contracts.GetDistributionRatioByCode(addmanager_samrt_addr, code)
white := []string{newAddress, newAddress2, newAddress3}
contracts.IssueScheduling(string(key_string), addmanager_samrt_addr, code, roles, white)
//if len(sub_arr) > 0 {
// lshuih := "testaph0xxq120"
// queryBalance(code, lshuih)
//}
// 订单存证
//subaccount.SetOrdersContent(describe, "apple1", subaccount_smart_add, "0x23888b05804ed02066d07da852ee974b04dc", "sub account frist test")
//subaccount.SetOrdersContent(describe, "apple1", subaccount_smart_add, "0x23888b05804ed02066d07da852ee974b04dd", "sub account frist test2222222222222")
}
func queryFun(code string) {
contracts.GetSchedulingCxt(addmanager_samrt_addr,code)
}
func queryBalance(code string, lshuih string) {
key1 := myeth.ParseKeyStore(describe3, "dev3_apple1")
key_string, _ := json.Marshal(key1)
contracts.SettleAccounts(string(key_string), addmanager_samrt_addr, code, big.NewInt(20000), lshuih)
time.Sleep(60 * time.Second)
sub_arr, _ := contracts.GetgetLedgerSubAddrs(addmanager_samrt_addr, lshuih)
for sdx := 0; sdx < len(sub_arr); sdx++ {
contracts.GetOneLedgerCxt(addmanager_samrt_addr, lshuih, common.HexToAddress(sub_arr[sdx]))
transferDetails := "update test by 1 apple"
contracts.UpdateCalulateLedger(string(key_string), addmanager_samrt_addr, lshuih, transferDetails, common.HexToAddress(sub_arr[sdx]))
}
//
time.Sleep(60 * time.Second)
for sdx := 0; sdx < len(sub_arr); sdx++ {
contracts.GetOneLedgerCxt(addmanager_samrt_addr, lshuih, common.HexToAddress(sub_arr[sdx]))
}
queryLedger(code, lshuih)
contracts.GetSubCodeQuotaData(addmanager_samrt_addr, code, newAddress)
//AppliceTestAB()
}
func queryLedger(code string, lshuih string) {
contracts.GetDistributionRatioByCode(addmanager_samrt_addr, code)
sub_arr, _ := contracts.GetgetLedgerSubAddrs(addmanager_samrt_addr, lshuih)
for sdx := 0; sdx < len(sub_arr); sdx++ {
contracts.GetOneLedgerCxt(addmanager_samrt_addr, lshuih, common.HexToAddress(sub_arr[sdx]))
contracts.GetSubCodeQuotaData(addmanager_samrt_addr, code, sub_arr[sdx])
}
}
func AppliceTestAB() {
//code := "mjn:7c4eedbe4cce25f3f8f9052772d9e3aa"
//contracts.GetSubCodeQuotaData(addmanager_samrt_addr, code, newAddress)
contracts.GetSubCodeBalance(addmanager_samrt_addr, newAddress)
contracts.GetSubCodeBalance(addmanager_samrt_addr, newAddress2)
contracts.GetSubCodeBalance(addmanager_samrt_addr, newAddress3)
}
func GetAllLedges() {
code := "AC:a8f9037a3cdb6d42fa4c0eae1e727e62"
contracts.GetSubCodeQuotaData(addmanager_samrt_addr, code, "0xe30983c8a9a7e6f899011dae309cbcb1d20e181a")
contracts.GetSubCodeQuotaData(addmanager_samrt_addr, code, "0x76daf8cb871a6c3f4616475e99e00725773c1b83")
//codeList,_ := contracts.GetAllSubCodes(addmanager_samrt_addr)
//
//for cdx:=0;cdx<len(codeList);cdx++{
//
//}
}
func main() {
InitConf()
//GetAllLedges()
//code := "AC:a8f9037a3cdb6d42fa4c0eae1e727e62" // txsd012 3333 3333 3334
SubDeploy()
//AppliceTestAB()
//QueryTestSub(code)
//queryFun(code)
//bindAccountInfos()
//lshuih := "testaph0xxq120"
//queryBalance(code,lshuih)
//queryLedger(code,lshuih)
//lsuh2 := "2018063021001004000597467936"
//queryLedger(code,lsuh2)
//AppliceTestAB()
}
|
package main
import (
"fmt"
"time"
)
func grade(n float64) string {
var rank = int(n)
switch rank {
case 10:
fallthrough
case 9:
return "A"
case 8, 7:
return "B"
case 6, 5, 4:
return "C"
default:
return "D"
}
}
func main() {
fmt.Println(grade(3))
t := time.Now()
switch {
case t.Hour() < 12:
fmt.Println("Morning")
case t.Hour() < 18:
fmt.Println("Afternoon")
default:
fmt.Println("Night")
}
fmt.Println(theType(2.1))
}
|
// Copyright 2015-2018 trivago N.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package consumer
import (
"fmt"
"io"
"io/ioutil"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"gollum/core"
"github.com/sirupsen/logrus"
"github.com/trivago/tgo"
"github.com/trivago/tgo/tio"
)
const (
fileBufferGrowSize = 1024
fileOffsetStart = "oldest"
fileOffsetEnd = "newest"
)
const (
observeModePoll = "poll"
observeModeWatch = "watch"
)
const (
stopIfNotExist = true
retryIfNotExist = false
)
// File consumer plugin
//
// The File consumer reads messages from a file, looking for a customizable
// delimiter sequence that marks the end of a message. If the file is part of
// e.g. a log rotation, the consumer can be set to read from a symbolic link
// pointing to the current file and (optionally) be told to reopen the file
// by sending a SIGHUP. A symlink to a file will automatically be reopened
// if the underlying file is changed.
//
// Metadata
//
// *NOTE: The metadata will only set if the parameter `SetMetadata` is active.*
//
// - file: The file name of the consumed file (set)
//
// - dir: The directory of the consumed file (set)
//
// Parameters
//
// - File: This value is a mandatory setting and contains the name of the
// file to read. This field supports glob patterns.
// If the file pointed to is a symlink, changes to the symlink will be
// detected. The file will be watched for changes, so active logfiles can
// be scraped, too.
//
// - OffsetFilePath: This value defines a path where the individual, current
// file offsets are stored. The filename will the name and extension of the
// source file plus the extension ".offset". If the consumer is restarted,
// these offset files are used to continue reading from the previous position.
// To disable this setting, set it to "".
// By default this parameter is set to "".
//
// - Delimiter: This value defines the delimiter sequence to expect at the
// end of each message in the file.
// By default this parameter is set to "\n".
//
// - ObserveMode: This value select how the source file is observed. Available
// values are `poll` and `watch`. NOTE: The watch implementation uses
// the [fsnotify/fsnotify](https://github.com/fsnotify/fsnotify) package.
// If your source file is rotated (moved or removed), please verify that
// your file system and distribution support the `RENAME` and `REMOVE` events;
// the consumer's stability depends on them.
// By default this parameter is set to `poll`.
//
// - DefaultOffset: This value defines the default offset from which to start
// reading within the file. Valid values are "oldest" and "newest". If OffsetFile
// is defined and the file exists, the DefaultOffset parameter is ignored.
// By default this parameter is set to "newest".
//
// - PollingDelayMs: This value defines the duration in milliseconds the consumer
// waits between checking the source file for new content after hitting the
// end of file (EOF). NOTE: This settings only takes effect if the consumer is
// running in `poll` mode!
// By default this parameter is set to "100".
//
// - RetryDelaySec: This value defines the duration in seconds the consumer waits
// between retries, e.g. after not being able to open a file.
// By default this parameter is set to "3".
//
// - DirScanIntervalSec: Only applies when using globs. This setting will define the
// interval in secnds in which the glob will be re-evaluated and new files can be
// scraped. By default this parameter is set to "10".
//
// - SetMetadata: When this value is set to "true", the fields mentioned in the metadata
// section will be added to each message. Adding metadata will have a
// performance impact on systems with high throughput.
// By default this parameter is set to "false".
//
// - BlackList: A regular expression matching file paths to NOT read. When both
// BlackList and WhiteList are defined, the WhiteList takes precedence.
// This setting is only used when glob expressions (*, ?) are present in the
// filename. The path checked is the one before symlink evaluation.
// By default this parameter is set to "".
//
// - WhiteList: A regular expression matching file paths to read. When both
// BlackList and WhiteList are defined, the WhiteList takes precedence.
// This setting is only used when glob expressions (*, ?) are present in the
// filename. The path checked is the one before symlink evaluation.
// By default this parameter is set to "".
//
// Examples
//
// This example will read all the `.log` files `/var/log/` into one stream and
// create a message for each new entry. If the file starts with `sys` it is ignored
//
// FileIn:
// Type: consumer.File
// File: /var/log/*.log
// BlackList '^sys.*'
// DefaultOffset: newest
// OffsetFilePath: ""
// Delimiter: "\n"
// ObserveMode: poll
// PollingDelay: 100
//
type File struct {
core.SimpleConsumer `gollumdoc:"embed_type"`
fileName string `config:"Files" default:"/var/log/*.log"`
offsetFilePath string `config:"OffsetFilePath"`
pollingDelay time.Duration `config:"PollingDelayMs" default:"100" metric:"ms"`
retryDelay time.Duration `config:"RetryDelaySec" default:"3" metric:"s"`
dirScanInterval time.Duration `config:"DirScanIntervalSec" default:"10" metric:"s"`
delimiter string `config:"Delimiter" default:"\n"`
observeMode string `config:"ObserveMode" default:"poll"`
hasToSetMetadata bool `config:"SetMetadata" default:"false"`
defaultOffset string `config:"DefaultOffset" default:"newest"`
blackListString string `config:"BlackList"`
whiteListString string `config:"WhiteList"`
observedFiles *sync.Map
done chan struct{}
isBlackListed func(string) bool
}
func init() {
core.TypeRegistry.Register(File{})
}
// Configure initializes this consumer with values from a plugin config.
func (cons *File) Configure(conf core.PluginConfigReader) {
cons.done = make(chan struct{})
cons.observedFiles = new(sync.Map)
// TODO: support manual roll again
//cons.SetRollCallback(cons.onRoll)
cons.SetStopCallback(func() {
close(cons.done)
})
// restore default observer mode for invalid config settings
if cons.observeMode != observeModePoll && cons.observeMode != observeModeWatch {
cons.Logger.Warningf("Unknown observe mode '%s'. Using poll", cons.observeMode)
cons.observeMode = observeModePoll
}
cons.configureBlacklist(conf)
}
func (cons *File) configureBlacklist(conf core.PluginConfigReader) {
var (
err error
blackList *regexp.Regexp
whiteList *regexp.Regexp
)
if len(cons.blackListString) > 0 {
blackList, err = regexp.Compile(cons.blackListString)
conf.Errors.Push(err)
}
if len(cons.whiteListString) > 0 {
whiteList, err = regexp.Compile(cons.whiteListString)
conf.Errors.Push(err)
}
// Define how to do blacklisting based on the values given above
switch {
case blackList == nil && whiteList == nil:
cons.isBlackListed = func(string) bool { return false }
case blackList == nil:
cons.isBlackListed = func(filename string) bool { return !whiteList.MatchString(filename) }
case whiteList == nil:
cons.isBlackListed = func(filename string) bool { return blackList.MatchString(filename) }
default:
cons.isBlackListed = func(filename string) bool {
return blackList.MatchString(filename) && !whiteList.MatchString(filename)
}
}
}
func (cons *File) newObservedFile(name string, stopIfNotExist bool) *observableFile {
logger := cons.Logger.WithFields(logrus.Fields{
"File": name,
})
offsetFileName := ""
defaultOffset := strings.ToLower(cons.defaultOffset)
cursor := fileCursor{whence: io.SeekStart}
switch {
case cons.offsetFilePath != "":
offsetFileName = fmt.Sprintf("%s/%s.offset", cons.offsetFilePath, filepath.Base(cons.fileName))
if offsetFileData, err := ioutil.ReadFile(offsetFileName); err != nil {
logger.WithError(err).Errorf("Failed to open offset file %s", offsetFileName)
} else {
if offset, err := strconv.ParseInt(string(offsetFileData), 10, 64); err != nil {
logger.WithError(err).Errorf("Error reading offset number from %s", offsetFileName)
} else {
cursor.offset = offset
}
}
case defaultOffset == fileOffsetEnd:
cursor.whence = io.SeekEnd
case defaultOffset == fileOffsetStart:
default:
}
logger.Info("Starting file scraper")
return &observableFile{
fileName: name,
offsetFileName: offsetFileName,
cursor: cursor,
stopIfNotExist: stopIfNotExist,
retryDelay: cons.retryDelay,
pollDelay: cons.pollingDelay,
buffer: tio.NewBufferedReader(fileBufferGrowSize, tio.BufferedReaderFlagDelimiter, 0, cons.delimiter),
log: logger,
}
}
func (cons *File) observeFile(name string, stopIfNotExist bool) {
defer cons.WorkerDone()
file := cons.newObservedFile(name, stopIfNotExist)
defer file.close()
cons.observedFiles.Store(name, file)
defer cons.observedFiles.Delete(name)
enqueue := cons.Enqueue
if cons.hasToSetMetadata {
dirName, fileName := filepath.Split(name)
enqueue = func(data []byte) {
metaData := core.NewMetadata()
metaData.Set("file", fileName)
metaData.Set("dir", dirName)
cons.EnqueueWithMetadata(data, metaData)
}
}
if cons.offsetFilePath != "" {
enqueue = func(data []byte) {
cons.Enqueue(data)
file.storeOffset()
}
}
switch cons.observeMode {
case observeModeWatch:
file.observeFSNotify(enqueue, cons.done)
default:
file.observePoll(enqueue, cons.done)
}
}
func (cons *File) observeFiles() {
defer cons.WorkerDone()
if !strings.ContainsAny(cons.fileName, "*?") {
cons.observeFile(cons.fileName, retryIfNotExist) // blocking
return
}
// Glob needs to be re-evaluated to find new files.
for {
fileNames, err := filepath.Glob(cons.fileName)
if err != nil {
cons.Logger.Warningf("Failed to evaluate glob '%s'", cons.fileName)
return
}
cons.Logger.Debugf("Evaluating glob returned %d files to scrape", len(fileNames))
for i := range fileNames {
if cons.isBlackListed(fileNames[i]) {
continue
}
if _, ok := cons.observedFiles.Load(fileNames[i]); !ok {
cons.AddWorker()
go cons.observeFile(fileNames[i], stopIfNotExist)
}
}
select {
case <-time.After(cons.dirScanInterval):
case <-cons.done:
return
}
}
}
// Consume opens the given file(s) for reading
func (cons *File) Consume(workers *sync.WaitGroup) {
go tgo.WithRecoverShutdown(func() {
cons.AddMainWorker(workers)
cons.observeFiles()
})
cons.ControlLoop()
}
|
package Problem0202
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
// tcs is testcase slice
var tcs = []struct {
n int
ans bool
}{
{
2,
false,
},
{
77,
false,
},
{
19,
true,
},
// 可以有多个 testcase
}
func Test_isHappy(t *testing.T) {
ast := assert.New(t)
for _, tc := range tcs {
fmt.Printf("~~%v~~\n", tc)
ast.Equal(tc.ans, isHappy(tc.n), "输入:%v", tc)
}
}
func Benchmark_isHappy(b *testing.B) {
for i := 0; i < b.N; i++ {
for _, tc := range tcs {
isHappy(tc.n)
}
}
}
func Benchmark_mult(b *testing.B) {
n := 12345
for i := 0; i < b.N; i++ {
_ = (n % 10) * (n % 10)
}
}
func Benchmark_square(b *testing.B) {
n := 12345
for i := 0; i < b.N; i++ {
_ = square(n % 10)
}
}
func square(a int) int {
return a * a
}
|
package models
import (
//"time"
"github.com/astaxie/beego/orm"
_ "github.com/go-sql-driver/mysql"
)
type Album struct {
Id int
Title string
Picture string
Keywords string
Summary string
Created int64
Viewnum int
Status int
}
func (this *Album) Table() string {
return "album"
}
func init() {
orm.RegisterModel(new(Album))
}
|
package model
import (
"time"
"github.com/guregu/null"
)
type RushNodeModel struct {
Id uint64 `json:"id" xorm:"id int"`
NodeName string `json:"nodeName" xorm:"nodeName varchar(256)"`
// if it is root, will be null
ParentNodeId null.Int `json:"parentNodeId" xorm:"parentNodeId int"`
ParentNodeName string `json:"parentNodeName" xorm:"parentNodeName varchar(256)"`
// interface url: https://1.1.1.1:8080
Url string `json:"url" xorm:"url varchar(256)"`
// 'true/null: vc to identify itself. rp do not need this
IsSelfUrl string `json:"isSelfUrl" xorm:"isSelfUrl varchar(8)"`
// {"state":"valid"}, valid/invalid
State string `json:"state" xorm:"state json"`
Note string `json:"note" xorm:"note varchar(256)"`
//update time
UpdateTime time.Time `json:"updateTime" xorm:"updateTime datetime"`
}
|
package currency
import (
"context"
)
//go:generate mockgen -destination ./mock/mock_service.go -package mocsvc amis/pkg/currency/service CurrencyService
type CurrencyService interface {
GetCurrency(ctx context.Context, coin string, start int64) (*Currency, error)
}
type Currency struct {
Sources []string `json:"sources"`
TWD int `json:"twd"`
USD float64 `json:"usd"`
Time string `json:"time"`
}
|
package ionic_test
import (
"fmt"
"github.com/ion-channel/ionic"
"github.com/ion-channel/ionic/pagination"
)
func ExampleIonClient_GetVulnerabilities() {
client, err := ionic.New("https://api.test.ionchannel.io")
if err != nil {
panic(fmt.Sprintf("Panic creating Ion Client: %v", err.Error()))
}
vulns, err := client.GetVulnerabilities("jdk", "", "atoken", pagination.AllItems)
if err != nil {
fmt.Println(err.Error())
}
fmt.Printf("Vulnerabilities: %v\n", vulns)
}
func ExampleIonClient_GetVulnerabilities_version() {
client, err := ionic.New("https://api.test.ionchannel.io")
if err != nil {
panic(fmt.Sprintf("Panic creating Ion Client: %v", err.Error()))
}
vulns, err := client.GetVulnerabilities("jdk", "1.7.0", "atoken", pagination.AllItems)
if err != nil {
fmt.Println(err.Error())
}
fmt.Printf("Vulnerabilities: %v\n", vulns)
}
func ExampleIonClient_GetVulnerability() {
client, err := ionic.New("https://api.test.ionchannel.io")
if err != nil {
panic(fmt.Sprintf("Panic creating Ion Client: %v", err.Error()))
}
vuln, err := client.GetVulnerability("CVD-2014-0030", "atoken")
if err != nil {
fmt.Println(err.Error())
}
fmt.Printf("Vulnerability: %v\n", vuln)
}
|
package mgr
import (
"errors"
"fmt"
"os"
"os/user"
"path/filepath"
"strconv"
"strings"
"syscall"
)
// FS is file system tree.
type FS struct {
Name string `json:"name"`
Mode uint `json:"mode"`
Owner string `json:"owner"`
Group string `json:"group"`
Children []FS `json:"children"`
}
// mkfs creates a real file system representation of fs inside of root,
// hence fs.Name is replaced with the root value.
func mkfs(root string, fs FS, first bool) error {
if first {
if fs.Name != "" {
return errors.New("name must be blank for root node")
}
// TODO: avoid modifying fs in case we use pointer here
fs.Name = root
}
// check that directory is to create within the root.
fs.Name = filepath.Clean(fs.Name)
if !strings.HasPrefix(fs.Name, root) {
return fmt.Errorf("%s is outside of %q root", fs.Name, root)
}
mode := os.FileMode(0755)
if fs.Mode != 0 {
mode = os.FileMode(fs.Mode)
}
if err := os.MkdirAll(fs.Name, mode); err != nil && !os.IsExist(err) {
return err
}
// we can skip chmod here when we know that MkdirAll succeeds.
if fs.Mode != 0 {
if err := os.Chmod(fs.Name, os.FileMode(fs.Mode)); err != nil {
return err
}
}
if fs.Owner != "" || fs.Group != "" {
stat, err := os.Stat(fs.Name)
if err != nil {
return err
}
sys := stat.Sys().(*syscall.Stat_t)
uid := int(sys.Uid)
gid := int(sys.Gid)
if fs.Owner != "" {
u, err := user.Lookup(fs.Owner)
if err != nil {
return err
}
uid, err = strconv.Atoi(u.Uid)
if err != nil {
return err
}
}
if fs.Group != "" {
g, err := user.LookupGroup(fs.Group)
if err != nil {
return err
}
gid, err = strconv.Atoi(g.Gid)
if err != nil {
return err
}
}
if err = os.Lchown(fs.Name, uid, gid); err != nil {
return err
}
}
// recursively create children
for _, ch := range fs.Children {
if ch.Name == "" {
return errors.New("node name is blank")
}
// TODO: avoid modifying ch in case we use pointer here
ch.Name = filepath.Join(fs.Name, ch.Name)
if err := mkfs(root, ch, false); err != nil {
return err
}
}
return nil
}
|
//Package reply provides implementation of a reply mangos node.
package rep
import (
"github.com/go-mangos/mangos"
"github.com/go-mangos/mangos/protocol/rep"
"github.com/go-mangos/mangos/transport/ipc"
"github.com/go-mangos/mangos/transport/tcp"
)
type Node struct {
url string
sock mangos.Socket
}
type ResponseHandler func(*Node, []byte)
//Starts a Node reply server on the specified url. A set of workers can run to handle more traffic.
func (self *Node) Listen(url string, workers int, handler ResponseHandler) error {
self.url = url
var err error
if self.sock, err = rep.NewSocket(); err != nil {
return err
}
self.sock.AddTransport(ipc.NewTransport())
self.sock.AddTransport(tcp.NewTransport())
if err = self.sock.Listen(url); err != nil {
return err
}
for id := 0; id < workers; id++ {
go self.processData(handler)
}
return nil
}
//Reply to the Request node Message.
func (self *Node) Reply(payload []byte) error {
var err error
if err = self.sock.Send(payload); err != nil {
return err
}
return nil
}
//Handles the Request Messages.
func (self *Node) processData(handler ResponseHandler) {
for {
msg, err := self.sock.Recv()
if err != nil {
continue
}
go handler(self, msg)
}
}
|
package service
import "math/rand"
const (
redisAddress = "127.0.0.1:6379"
//stringLength длина случайной строки
stringLength = 20
// errorsList список для записи ошибок
errorsList = "errors"
// messagesList список для записи сообщений
messagesList = "messages"
// publisher хранит текущего генератора
publisher = "currentPublisher"
// PublishTimeMS интервал генерации сообщений
publishTimeMS = 500
noNewMessages = "redigo: nil returned"
)
// randomString генерирует случайные строки
func randomString(n int) string {
var letter = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
b := make([]rune, n)
for i := range b {
b[i] = letter[rand.Intn(len(letter))]
}
return string(b)
}
|
package n2s
import (
"bufio"
"log"
"time"
"io"
"github.com/DavidHuie/n2s/nginx"
"github.com/DavidHuie/n2s/statsd"
)
// N2S translates NGINX log lines to a summary format accepted by
// statsd. N2S polls the source file with a specified pollDuration.
type N2S struct {
dest io.Writer
pollDuration time.Duration
source io.Reader
stopChan chan struct{}
}
// New creates a new instance of N2S.
func New(pollDuration time.Duration, source io.Reader, dest io.Writer) *N2S {
return &N2S{
dest: dest,
pollDuration: pollDuration,
source: source,
stopChan: make(chan struct{}),
}
}
// processLines processes lines from the source io.Reader until it
// reaches EOF or an error.
func (n *N2S) processLines() error {
var lineCount int
logBuilder := statsd.NewLogBuilder()
scanner := bufio.NewScanner(n.source)
for scanner.Scan() {
line, err := nginx.ParseLogLine(scanner.Text())
if err != nil {
log.Printf("error parsing nginx line: %s", err)
continue
}
if err := logBuilder.AddRequest(line.Path, line.Status); err != nil {
log.Printf("error adding request to statsd log: %s", err)
continue
}
lineCount++
}
if err := scanner.Err(); err != nil {
return err
}
linesReader, err := logBuilder.Build()
if err != nil {
return err
}
if _, err = io.Copy(n.dest, linesReader); err != nil {
return err
}
if lineCount > 0 {
log.Printf("processed %d lines", lineCount)
}
return err
}
// Start starts the log processing process.
func (n *N2S) Start() {
log.Println("starting n2s")
// Do this once before the ticker starts.
if err := n.processLines(); err != nil {
log.Printf("error processing log lines: %s", err)
}
ticker := time.NewTicker(n.pollDuration)
for {
select {
case <-ticker.C:
if err := n.processLines(); err != nil {
log.Printf("error processing log lines: %s", err)
}
case <-n.stopChan:
return
}
}
}
// Stop stops the log processing process
func (n *N2S) Stop() {
n.stopChan <- struct{}{}
log.Println("stopping n2s")
}
|
package utils
import (
"fmt"
"time"
)
// CheckUntil regularly check a predicate until it's true or time out is reached.
func CheckUntil(interval time.Duration, timeout time.Duration, predicate func() (bool, error)) error {
timeoutCh := time.After(timeout)
for {
select {
case <-time.After(interval):
predTrue, err := predicate()
if predTrue {
return nil
}
if err != nil {
return err
}
case <-timeoutCh:
return fmt.Errorf("timeout of %ds reached", int64(timeout/time.Second))
}
}
}
|
package preference
import (
"fmt"
"github.com/blang/semver"
)
// GetClientPreferences 获取客户端对应资源配置
func (f ClientPreferences) GetClientPreferences(clientID, clientVersion, clientEnvironment string) (*ClientPreference, error) {
version, err := semver.ParseTolerant(clientVersion)
if err != nil {
return nil, err
}
// 取版本号的前两位数字
clientVersion = fmt.Sprintf("%d.%d", version.Major, version.Minor)
if _, ok := f.mapClientConfig[clientID]; !ok {
return nil, fmt.Errorf("clientID: %s ,clientVersion : %s , clientEnvironment : %s , clientID is invalid", clientID, clientVersion, clientEnvironment)
}
if _, ok := f.mapClientConfig[clientID][clientVersion]; !ok {
return nil, fmt.Errorf("clientID: %s ,clientVersion : %s , clientEnvironment : %s , clientVersion is invalid", clientID, clientVersion, clientEnvironment)
}
if _, ok := f.mapClientConfig[clientID][clientVersion][clientEnvironment]; !ok {
return nil, fmt.Errorf("clientID: %s ,clientVersion : %s , clientEnvironment : %s , clientEnvironment is invalid", clientID, clientVersion, clientEnvironment)
}
clientPreference := f.mapClientConfig[clientID][clientVersion][clientEnvironment]
return &clientPreference, nil
}
|
package main
import (
"fmt"
"github.com/iwindfree/go-study/eval/lib"
)
func main() {
fmt.Println("hello")
a :=eval.Eval("5 + 3 + 2")
fmt.Println(a)
}
|
package util;
import (
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/hex"
);
// Get the hex SHA1 string.
func SHA1Hex(val string) string {
hash := sha1.New();
hash.Write([]byte(val));
return hex.EncodeToString(hash.Sum(nil));
}
// Get the SHA2-256 string.
func SHA256Hex(val string) string {
data := sha256.Sum256([]byte(val));
return hex.EncodeToString(data[:]);
}
// Get the SHA2-512 string.
func SHA512Hex(val string) string {
data := sha512.Sum512([]byte(val));
return hex.EncodeToString(data[:]);
}
// Generate a password hash the same way that clients are expected to.
func Passhash(username string, password string) string {
saltedData := username + "." + password + "." + username;
return SHA256Hex(saltedData);
}
|
package c33_diffie_hellman
import (
"testing"
)
func TestGenerate(t *testing.T) {
dh1 := NewDHSystem()
dh2 := NewDHSystem()
if dh1.Pub == dh2.Pub {
t.Errorf("Public keys are equal")
}
if dh1.SessionKeySHA256(dh2.Pub) != dh2.SessionKeySHA256(dh1.Pub) {
t.Errorf("Invalid session keys\n")
}
}
|
package main
import (
"fmt"
"math"
)
func main() {
fmt.Println(mySqrt(4))
fmt.Println(mySqrt(8))
fmt.Println(mySqrt(10))
}
/**
x 的平方根
实现 `int sqrt(int x)` 函数。
计算并返回 x 的平方根,其中 x 是非负整数。
由于返回类型是整数,结果只保留整数的部分,小数部分将被舍去。
示例 1:
```
输入: 4
输出: 2
```
示例 2:
```
输入: 8
输出: 2
说明: 8 的平方根是 2.82842...,
由于返回类型是整数,小数部分将被舍去。
```
*/
/**
牛顿迭代法来了,膜拜膜拜
*/
func mySqrt(x int) int {
if x == 0 {
return 0
}
C, x0 := float64(x), float64(x)
for {
xi := 0.5 * (x0 + C/x0)
if math.Abs(x0-xi) < 1e-7 {
break
}
x0 = xi
}
return int(x0)
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bindinfo_test
import (
"fmt"
"strings"
"testing"
"time"
"github.com/pingcap/tidb/bindinfo"
"github.com/pingcap/tidb/bindinfo/internal"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/parser/auth"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/testkit"
utilparser "github.com/pingcap/tidb/util/parser"
"github.com/pingcap/tidb/util/stmtsummary"
"github.com/stretchr/testify/require"
"go.opencensus.io/stats/view"
)
func TestDMLCapturePlanBaseline(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec(" SET GLOBAL tidb_capture_plan_baselines = on")
defer func() {
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = off")
}()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, c int, key idx_b(b), key idx_c(c))")
tk.MustExec("create table t1 like t")
dom.BindHandle().CaptureBaselines()
tk.MustQuery("show global bindings").Check(testkit.Rows())
tk.MustExec("delete from t where b = 1 and c > 1")
tk.MustExec("delete from t where b = 1 and c > 1")
tk.MustExec("update t set a = 1 where b = 1 and c > 1")
tk.MustExec("update t set a = 1 where b = 1 and c > 1")
tk.MustExec("insert into t1 select * from t where t.b = 1 and t.c > 1")
tk.MustExec("insert into t1 select * from t where t.b = 1 and t.c > 1")
tk.MustExec("replace into t1 select * from t where t.b = 1 and t.c > 1")
tk.MustExec("replace into t1 select * from t where t.b = 1 and t.c > 1")
tk.MustExec("insert into t1 values(1,1,1)")
tk.MustExec("insert into t1 values(1,1,1)")
tk.MustExec("replace into t1 values(1,1,1)")
tk.MustExec("replace into t1 values(1,1,1)")
tk.MustExec("admin capture bindings")
rows := tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 0)
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("delete from t where b = 1 and c > 1")
tk.MustExec("delete from t where b = 1 and c > 1")
tk.MustExec("update t set a = 1 where b = 1 and c > 1")
tk.MustExec("update t set a = 1 where b = 1 and c > 1")
tk.MustExec("insert into t1 select * from t where t.b = 1 and t.c > 1")
tk.MustExec("insert into t1 select * from t where t.b = 1 and t.c > 1")
tk.MustExec("replace into t1 select * from t where t.b = 1 and t.c > 1")
tk.MustExec("replace into t1 select * from t where t.b = 1 and t.c > 1")
tk.MustExec("insert into t1 values(1,1,1)")
tk.MustExec("insert into t1 values(1,1,1)")
tk.MustExec("replace into t1 values(1,1,1)")
tk.MustExec("replace into t1 values(1,1,1)")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Sort().Rows()
require.Len(t, rows, 4)
require.Equal(t, "delete from `test` . `t` where `b` = ? and `c` > ?", rows[0][0])
require.Equal(t, "DELETE /*+ use_index(@`del_1` `test`.`t` `idx_b`), no_order_index(@`del_1` `test`.`t` `idx_b`)*/ FROM `test`.`t` WHERE `b` = 1 AND `c` > 1", rows[0][1])
require.Equal(t, "insert into `test` . `t1` select * from `test` . `t` where `t` . `b` = ? and `t` . `c` > ?", rows[1][0])
require.Equal(t, "INSERT INTO `test`.`t1` SELECT /*+ use_index(@`sel_1` `test`.`t` `idx_b`), no_order_index(@`sel_1` `test`.`t` `idx_b`)*/ * FROM `test`.`t` WHERE `t`.`b` = 1 AND `t`.`c` > 1", rows[1][1])
require.Equal(t, "replace into `test` . `t1` select * from `test` . `t` where `t` . `b` = ? and `t` . `c` > ?", rows[2][0])
require.Equal(t, "REPLACE INTO `test`.`t1` SELECT /*+ use_index(@`sel_1` `test`.`t` `idx_b`), no_order_index(@`sel_1` `test`.`t` `idx_b`)*/ * FROM `test`.`t` WHERE `t`.`b` = 1 AND `t`.`c` > 1", rows[2][1])
require.Equal(t, "update `test` . `t` set `a` = ? where `b` = ? and `c` > ?", rows[3][0])
require.Equal(t, "UPDATE /*+ use_index(@`upd_1` `test`.`t` `idx_b`), no_order_index(@`upd_1` `test`.`t` `idx_b`)*/ `test`.`t` SET `a`=1 WHERE `b` = 1 AND `c` > 1", rows[3][1])
}
func TestCapturePlanBaseline(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = on")
defer func() {
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = off")
}()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
dom.BindHandle().CaptureBaselines()
tk.MustQuery("show global bindings").Check(testkit.Rows())
tk.MustExec("select count(*) from t where a > 10")
tk.MustExec("select count(*) from t where a > 10")
tk.MustExec("admin capture bindings")
rows := tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 0)
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("select * from t where a > 10")
tk.MustExec("select * from t where a > 10")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `test` . `t` where `a` > ?", rows[0][0])
require.Equal(t, "SELECT /*+ use_index(@`sel_1` `test`.`t` )*/ * FROM `test`.`t` WHERE `a` > 10", rows[0][1])
}
func TestCapturePlanBaseline4DisabledStatus(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = on")
defer func() {
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = off")
}()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, index idx_a(a))")
dom.BindHandle().CaptureBaselines()
tk.MustQuery("show global bindings").Check(testkit.Rows())
tk.MustExec("select /*+ USE_INDEX(t, idx_a) */ * from t where a > 10")
tk.MustExec("select /*+ USE_INDEX(t, idx_a) */ * from t where a > 10")
tk.MustExec("admin capture bindings")
rows := tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 0)
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("select * from t where a > 10")
tk.MustExec("select * from t where a > 10")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, bindinfo.Enabled, rows[0][3])
require.Equal(t, bindinfo.Capture, rows[0][8])
tk.MustExec("select * from t where a > 10")
tk.MustQuery("select @@last_plan_from_binding").Check(testkit.Rows("1"))
tk.MustExec("set binding disabled for select * from t where a > 10")
tk.MustExec("select * from t where a > 10")
tk.MustQuery("select @@last_plan_from_binding").Check(testkit.Rows("0"))
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, bindinfo.Disabled, rows[0][3])
tk.MustExec("select * from t where a > 10")
tk.MustExec("select * from t where a > 10")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, bindinfo.Disabled, rows[0][3])
tk.MustExec("select * from t where a > 10")
tk.MustQuery("select @@last_plan_from_binding").Check(testkit.Rows("0"))
tk.MustExec("drop global binding for select * from t where a > 10")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 0)
internal.UtilCleanBindingEnv(tk, dom)
}
func TestCaptureDBCaseSensitivity(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("drop database if exists SPM")
tk.MustExec("create database SPM")
tk.MustExec("use SPM")
tk.MustExec("create table t(a int, b int, key(b))")
tk.MustExec("create global binding for select * from t using select /*+ use_index(t) */ * from t")
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("select /*+ use_index(t,b) */ * from t")
tk.MustExec("select /*+ use_index(t,b) */ * from t")
tk.MustExec("admin capture bindings")
// The capture should ignore the case sensitivity for DB name when checking if any binding exists,
// so there would be no new binding captured.
rows := tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "SELECT /*+ use_index(`t` )*/ * FROM `SPM`.`t`", rows[0][1])
require.Equal(t, "manual", rows[0][8])
}
func TestCaptureBaselinesDefaultDB(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = on")
defer func() {
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = off")
}()
tk.MustExec("use test")
tk.MustExec("drop database if exists spm")
tk.MustExec("create database spm")
tk.MustExec("create table spm.t(a int, index idx_a(a))")
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("select * from spm.t ignore index(idx_a) where a > 10")
tk.MustExec("select * from spm.t ignore index(idx_a) where a > 10")
tk.MustExec("admin capture bindings")
rows := tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
// Default DB should be "" when all columns have explicit database name.
require.Equal(t, "", rows[0][2])
require.Equal(t, bindinfo.Enabled, rows[0][3])
tk.MustExec("use spm")
tk.MustExec("select * from spm.t where a > 10")
// Should use TableScan because of the "ignore index" binding.
require.Len(t, tk.Session().GetSessionVars().StmtCtx.IndexNames, 0)
}
func TestCapturePreparedStmt(t *testing.T) {
originalVal := config.CheckTableBeforeDrop
config.CheckTableBeforeDrop = true
defer func() {
config.CheckTableBeforeDrop = originalVal
}()
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
stmtsummary.StmtSummaryByDigestMap.Clear()
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, c int, key idx_b(b), key idx_c(c))")
require.True(t, tk.MustUseIndex("select * from t where b = 1 and c > 1", "idx_b(b)"))
tk.MustExec("prepare stmt from 'select /*+ use_index(t,idx_c) */ * from t where b = ? and c > ?'")
tk.MustExec("set @p = 1")
tk.MustExec("execute stmt using @p, @p")
tk.MustExec("execute stmt using @p, @p")
tk.MustQuery("show global bindings").Check(testkit.Rows())
tk.MustExec("admin capture bindings")
rows := tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `test` . `t` where `b` = ? and `c` > ?", rows[0][0])
require.Equal(t, "SELECT /*+ use_index(@`sel_1` `test`.`t` `idx_c`), no_order_index(@`sel_1` `test`.`t` `idx_c`)*/ * FROM `test`.`t` WHERE `b` = ? AND `c` > ?", rows[0][1])
require.True(t, tk.MustUseIndex("select /*+ use_index(t,idx_b) */ * from t where b = 1 and c > 1", "idx_c(c)"))
tk.MustExec("admin flush bindings")
tk.MustExec("admin evolve bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `test` . `t` where `b` = ? and `c` > ?", rows[0][0])
require.Equal(t, "SELECT /*+ use_index(@`sel_1` `test`.`t` `idx_c`), no_order_index(@`sel_1` `test`.`t` `idx_c`)*/ * FROM `test`.`t` WHERE `b` = ? AND `c` > ?", rows[0][1])
}
func TestCapturePlanBaselineIgnoreTiFlash(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int, key(a), key(b))")
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("select * from t")
tk.MustExec("select * from t")
// Create virtual tiflash replica info.
domSession := domain.GetDomain(tk.Session())
is := domSession.InfoSchema()
db, exists := is.SchemaByName(model.NewCIStr("test"))
require.True(t, exists)
for _, tblInfo := range db.Tables {
if tblInfo.Name.L == "t" {
tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{
Count: 1,
Available: true,
}
}
}
// Here the plan is the TiFlash plan.
rows := tk.MustQuery("explain select * from t").Rows()
require.Equal(t, "mpp[tiflash]", fmt.Sprintf("%v", rows[len(rows)-1][2]))
tk.MustQuery("show global bindings").Check(testkit.Rows())
tk.MustExec("admin capture bindings")
// Don't have the TiFlash plan even we have TiFlash replica.
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `test` . `t`", rows[0][0])
require.Equal(t, "SELECT /*+ use_index(@`sel_1` `test`.`t` )*/ * FROM `test`.`t`", rows[0][1])
}
func TestBindingSource(t *testing.T) {
originalVal := config.CheckTableBeforeDrop
config.CheckTableBeforeDrop = true
defer func() {
config.CheckTableBeforeDrop = originalVal
}()
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, index idx_a(a))")
// Test Source for SQL created sql
tk.MustExec("create global binding for select * from t where a > 10 using select * from t ignore index(idx_a) where a > 10")
bindHandle := dom.BindHandle()
sql, hash := internal.UtilNormalizeWithDefaultDB(t, "select * from t where a > ?")
bindData := bindHandle.GetBindRecord(hash, sql, "test")
require.NotNil(t, bindData)
require.Equal(t, "select * from `test` . `t` where `a` > ?", bindData.OriginalSQL)
require.Len(t, bindData.Bindings, 1)
bind := bindData.Bindings[0]
require.Equal(t, bindinfo.Manual, bind.Source)
// Test Source for evolved sql
tk.MustExec("set @@tidb_evolve_plan_baselines=1")
tk.MustQuery("select * from t where a > 10")
bindHandle.SaveEvolveTasksToStore()
sql, hash = internal.UtilNormalizeWithDefaultDB(t, "select * from t where a > ?")
bindData = bindHandle.GetBindRecord(hash, sql, "test")
require.NotNil(t, bindData)
require.Equal(t, "select * from `test` . `t` where `a` > ?", bindData.OriginalSQL)
require.Len(t, bindData.Bindings, 2)
bind = bindData.Bindings[1]
require.Equal(t, bindinfo.Evolve, bind.Source)
tk.MustExec("set @@tidb_evolve_plan_baselines=0")
// Test Source for captured sqls
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = on")
defer func() {
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = off")
}()
tk.MustExec("use test")
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("select * from t ignore index(idx_a) where a < 10")
tk.MustExec("select * from t ignore index(idx_a) where a < 10")
tk.MustExec("admin capture bindings")
bindHandle.CaptureBaselines()
sql, hash = internal.UtilNormalizeWithDefaultDB(t, "select * from t where a < ?")
bindData = bindHandle.GetBindRecord(hash, sql, "test")
require.NotNil(t, bindData)
require.Equal(t, "select * from `test` . `t` where `a` < ?", bindData.OriginalSQL)
require.Len(t, bindData.Bindings, 1)
bind = bindData.Bindings[0]
require.Equal(t, bindinfo.Capture, bind.Source)
}
func TestCapturedBindingCharset(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
stmtsummary.StmtSummaryByDigestMap.Clear()
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("use test")
tk.MustExec("create table t(name varchar(25), index idx(name))")
tk.MustExec("set character_set_connection = 'ascii'")
tk.MustExec("update t set name = 'hello' where name <= 'abc'")
tk.MustExec("update t set name = 'hello' where name <= 'abc'")
tk.MustExec("set character_set_connection = 'utf8mb4'")
tk.MustExec("admin capture bindings")
rows := tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "update `test` . `t` set `name` = ? where `name` <= ?", rows[0][0])
require.Equal(t, "UPDATE /*+ use_index(@`upd_1` `test`.`t` `idx`), no_order_index(@`upd_1` `test`.`t` `idx`)*/ `test`.`t` SET `name`='hello' WHERE `name` <= 'abc'", rows[0][1])
require.Equal(t, "utf8mb4", rows[0][6])
require.Equal(t, "utf8mb4_bin", rows[0][7])
}
func TestConcurrentCapture(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
// Simulate an existing binding generated by concurrent CREATE BINDING, which has not been synchronized to current tidb-server yet.
// Actually, it is more common to be generated by concurrent baseline capture, I use Manual just for simpler test verification.
tk.MustExec("insert into mysql.bind_info values('select * from `test` . `t`', 'select * from `test` . `t`', '', 'enabled', '2000-01-01 09:00:00', '2000-01-01 09:00:00', '', '','" +
bindinfo.Manual + "', '', '')")
tk.MustQuery("select original_sql, source from mysql.bind_info where source != 'builtin'").Check(testkit.Rows(
"select * from `test` . `t` manual",
))
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, b int)")
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("select * from t")
tk.MustExec("select * from t")
tk.MustExec("admin capture bindings")
tk.MustQuery("select original_sql, source, status from mysql.bind_info where source != 'builtin'").Check(testkit.Rows(
"select * from `test` . `t` manual deleted",
"select * from `test` . `t` capture enabled",
))
}
func TestUpdateSubqueryCapture(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1(a int, b int, c int, key idx_b(b))")
tk.MustExec("create table t2(a int, b int)")
stmtsummary.StmtSummaryByDigestMap.Clear()
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("update t1 set b = 1 where b = 2 and (a in (select a from t2 where b = 1) or c in (select a from t2 where b = 1))")
tk.MustExec("update t1 set b = 1 where b = 2 and (a in (select a from t2 where b = 1) or c in (select a from t2 where b = 1))")
tk.MustExec("admin capture bindings")
rows := tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
bindSQL := "UPDATE /*+ hash_join(@`upd_1` `test`.`t1`), use_index(@`upd_1` `test`.`t1` `idx_b`), no_order_index(@`upd_1` `test`.`t1` `idx_b`), use_index(@`sel_1` `test`.`t2` ), use_index(@`sel_2` `test`.`t2` )*/ `test`.`t1` SET `b`=1 WHERE `b` = 2 AND (`a` IN (SELECT `a` FROM `test`.`t2` WHERE `b` = 1) OR `c` IN (SELECT `a` FROM `test`.`t2` WHERE `b` = 1))"
originSQL := "UPDATE `test`.`t1` SET `b`=1 WHERE `b` = 2 AND (`a` IN (SELECT `a` FROM `test`.`t2` WHERE `b` = 1) OR `c` IN (SELECT `a` FROM `test`.`t2` WHERE `b` = 1))"
require.Equal(t, bindSQL, rows[0][1])
tk.MustExec(originSQL)
require.Len(t, tk.Session().GetSessionVars().StmtCtx.GetWarnings(), 0)
}
func TestIssue20417(t *testing.T) {
originalVal := config.CheckTableBeforeDrop
config.CheckTableBeforeDrop = true
defer func() {
config.CheckTableBeforeDrop = originalVal
}()
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec(`CREATE TABLE t (
pk VARBINARY(36) NOT NULL PRIMARY KEY,
b BIGINT NOT NULL,
c BIGINT NOT NULL,
pad VARBINARY(2048),
INDEX idxb(b),
INDEX idxc(c)
)`)
// Test for create binding
internal.UtilCleanBindingEnv(tk, dom)
tk.MustExec("create global binding for select * from t using select /*+ use_index(t, idxb) */ * from t")
rows := tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `test` . `t`", rows[0][0])
require.Equal(t, "SELECT /*+ use_index(`t` `idxb`)*/ * FROM `test`.`t`", rows[0][1])
require.True(t, tk.MustUseIndex("select * from t", "idxb(b)"))
require.True(t, tk.MustUseIndex("select * from test.t", "idxb(b)"))
tk.MustExec("create global binding for select * from t WHERE b=2 AND c=3924541 using select /*+ use_index(@sel_1 test.t idxb) */ * from t WHERE b=2 AND c=3924541")
require.True(t, tk.MustUseIndex("SELECT /*+ use_index(@`sel_1` `test`.`t` `idxc`)*/ * FROM `test`.`t` WHERE `b`=2 AND `c`=3924541", "idxb(b)"))
require.True(t, tk.MustUseIndex("SELECT /*+ use_index(@`sel_1` `test`.`t` `idxc`)*/ * FROM `t` WHERE `b`=2 AND `c`=3924541", "idxb(b)"))
// Test for capture baseline
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = on")
dom.BindHandle().CaptureBaselines()
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("select * from t where b=2 and c=213124")
tk.MustExec("select * from t where b=2 and c=213124")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `test` . `t` where `b` = ? and `c` = ?", rows[0][0])
require.Equal(t, "SELECT /*+ use_index(@`sel_1` `test`.`t` `idxb`), no_order_index(@`sel_1` `test`.`t` `idxb`)*/ * FROM `test`.`t` WHERE `b` = 2 AND `c` = 213124", rows[0][1])
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = off")
// Test for evolve baseline
internal.UtilCleanBindingEnv(tk, dom)
tk.MustExec("set @@tidb_evolve_plan_baselines=1")
tk.MustExec("create global binding for select * from t WHERE c=3924541 using select /*+ use_index(@sel_1 test.t idxb) */ * from t WHERE c=3924541")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `test` . `t` where `c` = ?", rows[0][0])
require.Equal(t, "SELECT /*+ use_index(@`sel_1` `test`.`t` `idxb`)*/ * FROM `test`.`t` WHERE `c` = 3924541", rows[0][1])
tk.MustExec("select /*+ use_index(t idxc)*/ * from t where c=3924541")
require.Equal(t, "t:idxb", tk.Session().GetSessionVars().StmtCtx.IndexNames[0])
tk.MustExec("admin flush bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 2)
require.Equal(t, "select * from `test` . `t` where `c` = ?", rows[0][0])
require.Equal(t, "SELECT /*+ use_index(@`sel_1` `test`.`t` `idxc`), no_order_index(@`sel_1` `test`.`t` `idxc`)*/ * FROM `test`.`t` WHERE `c` = 3924541", rows[0][1])
require.Equal(t, "pending verify", rows[0][3])
tk.MustExec("admin evolve bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 2)
require.Equal(t, "select * from `test` . `t` where `c` = ?", rows[0][0])
require.Equal(t, "SELECT /*+ use_index(@`sel_1` `test`.`t` `idxc`), no_order_index(@`sel_1` `test`.`t` `idxc`)*/ * FROM `test`.`t` WHERE `c` = 3924541", rows[0][1])
status := rows[0][3].(string)
require.True(t, status == bindinfo.Enabled || status == bindinfo.Rejected)
tk.MustExec("set @@tidb_evolve_plan_baselines=0")
}
func TestCaptureWithZeroSlowLogThreshold(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
stmtsummary.StmtSummaryByDigestMap.Clear()
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("set tidb_slow_log_threshold = 0")
tk.MustExec("select * from t")
tk.MustExec("select * from t")
tk.MustExec("set tidb_slow_log_threshold = 300")
tk.MustExec("admin capture bindings")
rows := tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `test` . `t`", rows[0][0])
}
func TestIssue25505(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
defer func() {
tk.MustExec("set tidb_slow_log_threshold = 300")
}()
tk.MustExec("set tidb_slow_log_threshold = 0")
tk.MustExec("create table t (a int(11) default null,b int(11) default null,key b (b),key ba (b))")
tk.MustExec("create table t1 (a int(11) default null,b int(11) default null,key idx_ab (a,b),key idx_a (a),key idx_b (b))")
tk.MustExec("create table t2 (a int(11) default null,b int(11) default null,key idx_ab (a,b),key idx_a (a),key idx_b (b))")
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
spmMap := map[string]string{}
spmMap["with recursive `cte` ( `a` ) as ( select ? union select `a` + ? from `test` . `t1` where `a` < ? ) select * from `cte`"] =
"WITH RECURSIVE `cte` (`a`) AS (SELECT 2 UNION SELECT `a` + 1 FROM `test`.`t1` WHERE `a` < 5) SELECT /*+ hash_agg(@`sel_1`), use_index(@`sel_3` `test`.`t1` `idx_ab`), no_order_index(@`sel_3` `test`.`t1` `idx_ab`)*/ * FROM `cte`"
spmMap["with recursive `cte1` ( `a` , `b` ) as ( select * from `test` . `t` where `b` = ? union select `a` + ? , `b` + ? from `cte1` where `a` < ? ) select * from `test` . `t`"] =
"WITH RECURSIVE `cte1` (`a`, `b`) AS (SELECT * FROM `test`.`t` WHERE `b` = 1 UNION SELECT `a` + 1,`b` + 1 FROM `cte1` WHERE `a` < 2) SELECT /*+ use_index(@`sel_1` `test`.`t` )*/ * FROM `test`.`t`"
spmMap["with `cte1` as ( select * from `test` . `t` ) , `cte2` as ( select ? ) select * from `test` . `t`"] =
"WITH `cte1` AS (SELECT * FROM `test`.`t`), `cte2` AS (SELECT 4) SELECT /*+ use_index(@`sel_1` `test`.`t` )*/ * FROM `test`.`t`"
spmMap["with `cte` as ( select * from `test` . `t` where `b` = ? ) select * from `test` . `t`"] =
"WITH `cte` AS (SELECT * FROM `test`.`t` WHERE `b` = 6) SELECT /*+ use_index(@`sel_1` `test`.`t` )*/ * FROM `test`.`t`"
spmMap["with recursive `cte` ( `a` ) as ( select ? union select `a` + ? from `test` . `t1` where `a` > ? ) select * from `cte`"] =
"WITH RECURSIVE `cte` (`a`) AS (SELECT 2 UNION SELECT `a` + 1 FROM `test`.`t1` WHERE `a` > 5) SELECT /*+ hash_agg(@`sel_1`), use_index(@`sel_3` `test`.`t1` `idx_b`), no_order_index(@`sel_3` `test`.`t1` `idx_b`)*/ * FROM `cte`"
spmMap["with `cte` as ( with `cte1` as ( select * from `test` . `t2` where `a` > ? and `b` > ? ) select * from `cte1` ) select * from `cte` join `test` . `t1` on `t1` . `a` = `cte` . `a`"] =
"WITH `cte` AS (WITH `cte1` AS (SELECT * FROM `test`.`t2` WHERE `a` > 1 AND `b` > 1) SELECT * FROM `cte1`) SELECT /*+ use_index(@`sel_3` `test`.`t2` `idx_ab`), order_index(@`sel_3` `test`.`t2` `idx_ab`), use_index(@`sel_1` `test`.`t1` `idx_ab`), order_index(@`sel_1` `test`.`t1` `idx_ab`)*/ * FROM `cte` JOIN `test`.`t1` ON `t1`.`a` = `cte`.`a`"
spmMap["with `cte` as ( with `cte1` as ( select * from `test` . `t2` where `a` = ? and `b` = ? ) select * from `cte1` ) select * from `cte` join `test` . `t1` on `t1` . `a` = `cte` . `a`"] =
"WITH `cte` AS (WITH `cte1` AS (SELECT * FROM `test`.`t2` WHERE `a` = 1 AND `b` = 1) SELECT * FROM `cte1`) SELECT /*+ use_index(@`sel_3` `test`.`t2` `idx_a`), no_order_index(@`sel_3` `test`.`t2` `idx_a`), use_index(@`sel_1` `test`.`t1` `idx_a`), no_order_index(@`sel_1` `test`.`t1` `idx_a`)*/ * FROM `cte` JOIN `test`.`t1` ON `t1`.`a` = `cte`.`a`"
tk.MustExec("with cte as (with cte1 as (select /*+use_index(t2 idx_a)*/ * from t2 where a = 1 and b = 1) select * from cte1) select /*+use_index(t1 idx_a)*/ * from cte join t1 on t1.a=cte.a;")
tk.MustExec("with cte as (with cte1 as (select /*+use_index(t2 idx_a)*/ * from t2 where a = 1 and b = 1) select * from cte1) select /*+use_index(t1 idx_a)*/ * from cte join t1 on t1.a=cte.a;")
tk.MustExec("with cte as (with cte1 as (select /*+use_index(t2 idx_a)*/ * from t2 where a = 1 and b = 1) select * from cte1) select /*+use_index(t1 idx_a)*/ * from cte join t1 on t1.a=cte.a;")
tk.MustExec("with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a;")
tk.MustExec("with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a;")
tk.MustExec("with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a;")
tk.MustExec("WITH RECURSIVE cte(a) AS (SELECT 2 UNION SELECT a+1 FROM t1 use index(idx_ab) WHERE a < 5) SELECT * FROM cte;")
tk.MustExec("WITH RECURSIVE cte(a) AS (SELECT 2 UNION SELECT a+1 FROM t1 use index(idx_ab) WHERE a < 5) SELECT * FROM cte;")
tk.MustExec("WITH RECURSIVE cte(a) AS (SELECT 2 UNION SELECT a+1 FROM t1 use index(idx_ab) WHERE a < 5) SELECT * FROM cte;")
tk.MustExec("WITH RECURSIVE cte(a) AS (SELECT 2 UNION SELECT /*+use_index(t1 idx_b)*/ a+1 FROM t1 WHERE a > 5) SELECT * FROM cte;")
tk.MustExec("WITH RECURSIVE cte(a) AS (SELECT 2 UNION SELECT /*+use_index(t1 idx_b)*/ a+1 FROM t1 WHERE a > 5) SELECT * FROM cte;")
tk.MustExec("WITH RECURSIVE cte(a) AS (SELECT 2 UNION SELECT /*+use_index(t1 idx_b)*/ a+1 FROM t1 WHERE a > 5) SELECT * FROM cte;")
tk.MustExec("with cte as (select * from t where b=6) select * from t")
tk.MustExec("with cte as (select * from t where b=6) select * from t")
tk.MustExec("with cte as (select * from t where b=6) select * from t")
tk.MustExec("with cte1 as (select * from t), cte2 as (select 4) select * from t")
tk.MustExec("with cte1 as (select * from t), cte2 as (select 5) select * from t")
tk.MustExec("with cte1 as (select * from t), cte2 as (select 6) select * from t")
tk.MustExec("with recursive cte1(a,b) as (select * from t where b = 1 union select a+1,b+1 from cte1 where a < 2) select * from t")
tk.MustExec("with recursive cte1(a,b) as (select * from t where b = 1 union select a+1,b+1 from cte1 where a < 2) select * from t")
tk.MustExec("with recursive cte1(a,b) as (select * from t where b = 1 union select a+1,b+1 from cte1 where a < 2) select * from t")
tk.MustExec("admin capture bindings")
rows := tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 7)
for _, row := range rows {
str := fmt.Sprintf("%s", row[0])
require.Equal(t, spmMap[str], row[1])
}
}
func TestCaptureUserFilter(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = on")
defer func() {
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = off")
}()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("select * from t where a > 10")
tk.MustExec("select * from t where a > 10")
tk.MustExec("admin capture bindings")
rows := tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `test` . `t` where `a` > ?", rows[0][0])
// test user filter
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("insert into mysql.capture_plan_baselines_blacklist(filter_type, filter_value) values('user', 'root')")
tk.MustExec("select * from t where a > 10")
tk.MustExec("select * from t where a > 10")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 0) // cannot capture the stmt
// change another user
tk.MustExec(`create user usr1`)
tk.MustExec(`grant all on *.* to usr1 with grant option`)
tk2 := testkit.NewTestKit(t, store)
tk2.MustExec("use test")
require.NoError(t, tk2.Session().Auth(&auth.UserIdentity{Username: "usr1", Hostname: "%"}, nil, nil, nil))
tk2.MustExec("select * from t where a > 10")
tk2.MustExec("select * from t where a > 10")
tk2.MustExec("admin capture bindings")
rows = tk2.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1) // can capture the stmt
// use user-filter with other types of filter together
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("insert into mysql.capture_plan_baselines_blacklist(filter_type, filter_value) values('user', 'root')")
tk.MustExec("insert into mysql.capture_plan_baselines_blacklist(filter_type, filter_value) values('table', 'test.t')")
tk2.MustExec("select * from t where a > 10")
tk2.MustExec("select * from t where a > 10")
tk2.MustExec("admin capture bindings")
rows = tk2.MustQuery("show global bindings").Rows()
require.Len(t, rows, 0) // filtered by the table filter
}
func TestCaptureTableFilterValid(t *testing.T) {
defer view.Stop()
type matchCase struct {
table string
matched bool
}
type filterCase struct {
filter string
valid bool
mcases []matchCase
}
filterCases := []filterCase{
{"*.*", true, []matchCase{{"db.t", true}}},
{"***.***", true, []matchCase{{"db.t", true}}},
{"d*.*", true, []matchCase{{"db.t", true}}},
{"*.t", true, []matchCase{{"db.t", true}}},
{"?.t*", true, []matchCase{{"d.t", true}, {"d.tb", true}, {"db.t", false}}},
{"db.t[1-3]", true, []matchCase{{"db.t1", true}, {"db.t2", true}, {"db.t4", false}}},
{"!db.table", false, nil},
{"@db.table", false, nil},
{"table", false, nil},
{"", false, nil},
{"\t ", false, nil},
}
for _, fc := range filterCases {
f, valid := bindinfo.ParseCaptureTableFilter(fc.filter)
require.Equal(t, fc.valid, valid)
if valid {
for _, mc := range fc.mcases {
tmp := strings.Split(mc.table, ".")
require.Equal(t, mc.matched, f.MatchTable(tmp[0], tmp[1]))
}
}
}
}
func TestCaptureWildcardFilter(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = on")
defer func() {
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = off")
}()
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
dbs := []string{"db11", "db12", "db2"}
tbls := []string{"t11", "t12", "t2"}
for _, db := range dbs {
tk.MustExec(fmt.Sprintf(`drop database if exists %v`, db))
tk.MustExec(fmt.Sprintf(`create database %v`, db))
tk.MustExec(fmt.Sprintf(`use %v`, db))
for _, tbl := range tbls {
tk.MustExec(fmt.Sprintf(`create table %v(a int)`, tbl))
}
}
mustExecTwice := func() {
for _, db := range dbs {
for _, tbl := range tbls {
tk.MustExec(fmt.Sprintf(`select * from %v.%v where a>10`, db, tbl))
tk.MustExec(fmt.Sprintf(`select * from %v.%v where a>10`, db, tbl))
}
}
}
checkBindings := func(dbTbls ...string) {
m := make(map[string]bool) // map[query]existed
for _, dbTbl := range dbTbls {
tmp := strings.Split(dbTbl, ".")
q := fmt.Sprintf("select * from `%v` . `%v` where `a` > ?", tmp[0], tmp[1])
m[q] = false
}
tk.MustExec("admin capture bindings")
var rows [][]interface{}
require.Eventually(t, func() bool {
rows = tk.MustQuery("show global bindings").Sort().Rows()
return len(rows) == len(dbTbls)
}, time.Second*2, time.Millisecond*100)
for _, r := range rows {
q := r[0].(string)
if _, exist := m[q]; !exist { // encounter an unexpected binding
t.Fatalf("unexpected binding %v", q)
}
m[q] = true
}
for q, exist := range m {
if !exist { // a expected binding is not existed
t.Fatalf("missed binding %v", q)
}
}
}
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec(`insert into mysql.capture_plan_baselines_blacklist(filter_type, filter_value) values('table', 'db11.t1*')`)
mustExecTwice()
checkBindings("db11.t2", "db12.t11", "db12.t12", "db12.t2", "db2.t11", "db2.t12", "db2.t2") // db11.t11 and db11.t12 are filtered
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("delete from mysql.capture_plan_baselines_blacklist")
tk.MustExec(`insert into mysql.capture_plan_baselines_blacklist(filter_type, filter_value) values('table', 'db1*.t11')`)
mustExecTwice()
checkBindings("db11.t12", "db11.t2", "db12.t12", "db12.t2", "db2.t11", "db2.t12", "db2.t2") // db11.t11 and db12.t11 are filtered
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("delete from mysql.capture_plan_baselines_blacklist")
tk.MustExec(`insert into mysql.capture_plan_baselines_blacklist(filter_type, filter_value) values('table', 'db1*.t1*')`)
mustExecTwice()
checkBindings("db11.t2", "db12.t2", "db2.t11", "db2.t12", "db2.t2") // db11.t11 / db12.t11 / db11.t12 / db12.t12 are filtered
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("delete from mysql.capture_plan_baselines_blacklist")
tk.MustExec(`insert into mysql.capture_plan_baselines_blacklist(filter_type, filter_value) values('table', 'db1*.*')`)
mustExecTwice()
checkBindings("db2.t11", "db2.t12", "db2.t2") // db11.* / db12.* are filtered
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("delete from mysql.capture_plan_baselines_blacklist")
tk.MustExec(`insert into mysql.capture_plan_baselines_blacklist(filter_type, filter_value) values('table', '*.t1*')`)
mustExecTwice()
checkBindings("db11.t2", "db12.t2", "db2.t2") // *.t11 and *.t12 are filtered
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("delete from mysql.capture_plan_baselines_blacklist")
tk.MustExec(`insert into mysql.capture_plan_baselines_blacklist(filter_type, filter_value) values('table', 'db*.t*')`)
mustExecTwice()
checkBindings() // all are filtered
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("delete from mysql.capture_plan_baselines_blacklist")
mustExecTwice()
checkBindings("db11.t11", "db11.t12", "db11.t2", "db12.t11", "db12.t12", "db12.t2", "db2.t11", "db2.t12", "db2.t2") // no filter, all can be captured
}
func TestCaptureFilter(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = on")
defer func() {
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = off")
}()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int)")
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
tk.MustExec("select * from t where a > 10")
tk.MustExec("select * from t where a > 10")
tk.MustExec("admin capture bindings")
rows := tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `test` . `t` where `a` > ?", rows[0][0])
// Valid table filter.
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("insert into mysql.capture_plan_baselines_blacklist(filter_type, filter_value) values('table', 'test.t')")
tk.MustExec("select * from t where a > 10")
tk.MustExec("select * from t where a > 10")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 0)
tk.MustExec("select * from mysql.capture_plan_baselines_blacklist")
tk.MustExec("select * from mysql.capture_plan_baselines_blacklist")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `mysql` . `capture_plan_baselines_blacklist`", rows[0][0])
tk.MustExec("delete from mysql.capture_plan_baselines_blacklist")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Sort().Rows()
require.Len(t, rows, 2)
require.Equal(t, "select * from `mysql` . `capture_plan_baselines_blacklist`", rows[0][0])
require.Equal(t, "select * from `test` . `t` where `a` > ?", rows[1][0])
// Invalid table filter.
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("insert into mysql.capture_plan_baselines_blacklist(filter_type, filter_value) values('table', 't')")
tk.MustExec("select * from t where a > 10")
tk.MustExec("select * from t where a > 10")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `test` . `t` where `a` > ?", rows[0][0])
// Valid database filter.
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("insert into mysql.capture_plan_baselines_blacklist(filter_type, filter_value) values('table', 'mysql.*')")
tk.MustExec("select * from mysql.capture_plan_baselines_blacklist")
tk.MustExec("select * from mysql.capture_plan_baselines_blacklist")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 0)
tk.MustExec("select * from t where a > 10")
tk.MustExec("select * from t where a > 10")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `test` . `t` where `a` > ?", rows[0][0])
tk.MustExec("delete from mysql.capture_plan_baselines_blacklist")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Sort().Rows()
require.Len(t, rows, 2)
require.Equal(t, "select * from `mysql` . `capture_plan_baselines_blacklist`", rows[0][0])
require.Equal(t, "select * from `test` . `t` where `a` > ?", rows[1][0])
// Valid frequency filter.
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("insert into mysql.capture_plan_baselines_blacklist(filter_type, filter_value) values('frequency', '2')")
tk.MustExec("select * from t where a > 10")
tk.MustExec("select * from t where a > 10")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 0)
tk.MustExec("select * from t where a > 10")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `test` . `t` where `a` > ?", rows[0][0])
tk.MustExec("delete from mysql.capture_plan_baselines_blacklist")
// Invalid frequency filter.
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("insert into mysql.capture_plan_baselines_blacklist(filter_type, filter_value) values('frequency', '0')")
tk.MustExec("select * from t where a > 10")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 0)
tk.MustExec("select * from t where a > 10")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `test` . `t` where `a` > ?", rows[0][0])
tk.MustExec("delete from mysql.capture_plan_baselines_blacklist")
// Invalid filter type.
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("insert into mysql.capture_plan_baselines_blacklist(filter_type, filter_value) values('unknown', 'xx')")
tk.MustExec("select * from t where a > 10")
tk.MustExec("select * from t where a > 10")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `test` . `t` where `a` > ?", rows[0][0])
tk.MustExec("delete from mysql.capture_plan_baselines_blacklist")
// Case sensitivity.
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("insert into mysql.capture_plan_baselines_blacklist(filter_type, filter_value) values('tABle', 'tESt.T')")
tk.MustExec("select * from t where a > 10")
tk.MustExec("select * from t where a > 10")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 0)
tk.MustExec("delete from mysql.capture_plan_baselines_blacklist")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Sort().Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `test` . `t` where `a` > ?", rows[0][0])
internal.UtilCleanBindingEnv(tk, dom)
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("insert into mysql.capture_plan_baselines_blacklist(filter_type, filter_value) values('table', 'mySQl.*')")
tk.MustExec("select * from mysql.capture_plan_baselines_blacklist")
tk.MustExec("select * from mysql.capture_plan_baselines_blacklist")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Rows()
require.Len(t, rows, 0)
tk.MustExec("delete from mysql.capture_plan_baselines_blacklist")
tk.MustExec("admin capture bindings")
rows = tk.MustQuery("show global bindings").Sort().Rows()
require.Len(t, rows, 1)
require.Equal(t, "select * from `mysql` . `capture_plan_baselines_blacklist`", rows[0][0])
}
func TestCaptureHints(t *testing.T) {
store, dom := testkit.CreateMockStoreAndDomain(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = on")
defer func() {
tk.MustExec("SET GLOBAL tidb_capture_plan_baselines = off")
}()
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(pk int primary key, a int, b int, key(a), key(b))")
require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil))
captureCases := []struct {
query string
hint string
}{
// agg hints
{"select /*+ hash_agg() */ count(1) from t", "hash_agg"},
{"select /*+ stream_agg() */ count(1) from t", "stream_agg"},
// join hints
{"select /*+ merge_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", "merge_join"},
{"select /*+ tidb_smj(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", "merge_join"},
{"select /*+ hash_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", "hash_join"},
{"select /*+ tidb_hj(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", "hash_join"},
{"select /*+ inl_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", "inl_join"},
{"select /*+ tidb_inlj(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", "inl_join"},
{"select /*+ inl_hash_join(t1, t2) */ * from t t1, t t2 where t1.a=t2.a", "inl_hash_join"},
// index hints
{"select * from t use index(primary)", "use_index(@`sel_1` `test`.`t` )"},
{"select /*+ use_index(primary) */ * from t", "use_index(@`sel_1` `test`.`t` )"},
{"select * from t use index(a)", "use_index(@`sel_1` `test`.`t` `a`)"},
{"select /*+ use_index(a) */ * from t use index(a)", "use_index(@`sel_1` `test`.`t` `a`)"},
{"select * from t use index(b)", "use_index(@`sel_1` `test`.`t` `b`)"},
{"select /*+ use_index(b) */ * from t use index(b)", "use_index(@`sel_1` `test`.`t` `b`)"},
{"select /*+ use_index_merge(t, a, b) */ a, b from t where a=1 or b=1", "use_index_merge(@`sel_1` `t` `a`, `b`)"},
{"select /*+ ignore_index(t, a) */ * from t where a=1", "ignore_index(`t` `a`)"},
// push-down hints
{"select /*+ limit_to_cop() */ * from t limit 10", "limit_to_cop(@`sel_1`)"},
{"select /*+ agg_to_cop() */ a, count(*) from t group by a", "agg_to_cop(@`sel_1`)"},
// index-merge hints
{"select /*+ no_index_merge() */ a, b from t where a>1 or b>1", "no_index_merge()"},
{"select /*+ use_index_merge(t, a, b) */ a, b from t where a>1 or b>1", "use_index_merge(@`sel_1` `t` `a`, `b`)"},
// runtime hints
{"select /*+ memory_quota(1024 MB) */ * from t", "memory_quota(1024 mb)"},
{"select /*+ max_execution_time(1000) */ * from t", "max_execution_time(1000)"},
{"select /*+ tidb_kv_read_timeout(1000) */ * from t", "tidb_kv_read_timeout(1000)"},
// storage hints
{"select /*+ read_from_storage(tikv[t]) */ * from t", "read_from_storage(tikv[`t`])"},
// others
{"select /*+ use_toja(true) */ t1.a, t1.b from t t1 where t1.a in (select t2.a from t t2)", "use_toja(true)"},
}
for _, capCase := range captureCases {
stmtsummary.StmtSummaryByDigestMap.Clear()
internal.UtilCleanBindingEnv(tk, dom)
tk.MustExec(capCase.query)
tk.MustExec(capCase.query)
tk.MustExec("admin capture bindings")
res := tk.MustQuery(`show global bindings`).Rows()
require.Equal(t, len(res), 1) // this query is captured, and
require.True(t, strings.Contains(res[0][1].(string), capCase.hint), fmt.Sprintf("%v:%v", capCase.query, res[0][1])) // the binding contains the expected hint
// test sql digest
parser4binding := parser.New()
originNode, err := parser4binding.ParseOneStmt(capCase.query, "utf8mb4", "utf8mb4_general_ci")
require.NoError(t, err)
_, sqlDigestWithDB := parser.NormalizeDigest(utilparser.RestoreWithDefaultDB(originNode, "test", capCase.query))
require.Equal(t, res[0][9], sqlDigestWithDB.String())
}
}
|
package oauth2_test
import (
"context"
"encoding/json"
"github.com/golang/mock/gomock"
"github.com/tsingsun/go-oauth2"
mocks "github.com/tsingsun/go-oauth2/mocks"
"testing"
"time"
)
func TestRefreshTokenGrant_RespondToAccessTokenRequest(t *testing.T) {
ctx := context.Background()
client := &Client{}
client.SetIdentifier("foo")
mockCtl := gomock.NewController(t)
clientRepositoryMock := mocks.NewMockClientRepositoryInterface(mockCtl)
clientRepositoryMock.EXPECT().GetClientEntity(gomock.Any(),gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(client)
scope := &Scope{}
scope.SetIdentifier("foo")
scopeRepositoryMock := mocks.NewMockScopeRepositoryInterface(mockCtl)
scopeRepositoryMock.EXPECT().GetScopeEntityByIdentifier(gomock.Any(),gomock.Any()).Return(scope)
accessTokenRepositoryMock := mocks.NewMockAccessTokenRepositoryInterface(mockCtl)
accessTokenRepositoryMock.EXPECT().GetNewToken(gomock.Any(),gomock.Any(), gomock.Any(), gomock.Any()).Return(&AccessToken{})
accessTokenRepositoryMock.EXPECT().RevokeAccessToken(gomock.Any(),gomock.Any()).Return()
accessTokenRepositoryMock.EXPECT().PersistNewAccessToken(gomock.Any(),gomock.Any()).Return(true)
refreshTokenRepositoryMock := mocks.NewMockRefreshTokenRepositoryInterface(mockCtl)
refreshTokenRepositoryMock.EXPECT().GetNewRefreshToken(gomock.Any()).Return(&RefreshToken{})
refreshTokenRepositoryMock.EXPECT().IsRefreshTokenRevoked(gomock.Any(),gomock.Any()).Return(false)
refreshTokenRepositoryMock.EXPECT().RevokeRefreshToken(gomock.Any(),gomock.Any()).Return()
refreshTokenRepositoryMock.EXPECT().PersistNewRefreshToken(gomock.Any(),gomock.Any()).Return(true)
grant := oauth2.RefreshTokenGrant{
RefreshTokenRepository: refreshTokenRepositoryMock,
}
grant.SetClientRepository(clientRepositoryMock)
grant.SetScopeRepository(scopeRepositoryMock)
grant.SetAccessTokenRepository(accessTokenRepositoryMock)
grant.SetEncryptionKey([]byte(ENCRYPTION_KEY))
payload := oauth2.RefreshTokenPayload{
ClientId: "foo",
RefreshTokenId: "abcdefg",
AccessTokenId: "gfedcba",
Scopes: "foo",
UserID: "123",
ExpiresTime: time.Now().Add(10 * time.Minute),
}
d, _ := json.Marshal(payload)
oldRefreshToken, _ := grant.Encrypt(d)
var sw = &oauth2.RequestWapper{
ClientId: "foo",
ClientSecret: "bar",
RefreshToken: oldRefreshToken,
Scope: "foo",
}
sw.SetContext(ctx)
bearer := &oauth2.BearerTokenResponse{}
err := grant.RespondToAccessTokenRequest(sw, bearer)
if err != nil {
t.Fatal(err)
}
if _, ok := bearer.AccessToken.(oauth2.AccessTokenEntityInterface); !ok {
t.Error("access token error")
}
if _, ok := bearer.RefreshToken.(oauth2.RefreshTokenEntityInterface); !ok {
t.Error("refresh token error")
}
}
|
package main
import (
"encoding/base64"
"fmt"
"mysql_byroad/model"
"mysql_byroad/mysql_schema"
"mysql_byroad/nsq"
"strconv"
log "github.com/Sirupsen/logrus"
)
type Context struct {
dispatcher *Dispatcher
}
type Enqueuer interface {
Enqueue(name string, evt interface{})
}
type KafkaEventHandler struct {
queue Enqueuer
taskManager *TaskManager
BinlogStatistics *model.BinlogStatistics
columnManager *schema.ColumnManager
ctx *Context
}
func NewKafkaEventHandler(nsqConfig NSQConf, taskManager *TaskManager, ctx *Context) (*KafkaEventHandler, error) {
keh := &KafkaEventHandler{}
keh.ctx = ctx
qm, err := nsqm.NewNSQManager(nsqConfig.LookupdHttpAddrs, nsqConfig.NsqdAddrs, nil)
if err != nil {
log.Error(err.Error())
return nil, err
}
qm.InitProducers()
qm.ProducerUpdateLoop()
binlogStatistics := &model.BinlogStatistics{
Statistics: make([]*model.BinlogStatistic, 0, 100),
}
keh.initColumnManager()
keh.queue = qm
keh.taskManager = taskManager
keh.BinlogStatistics = binlogStatistics
return keh, nil
}
func (keh *KafkaEventHandler) initColumnManager() {
rpcClientSchema := fmt.Sprintf("%s:%d", keh.ctx.dispatcher.Config.MonitorConf.Host, keh.ctx.dispatcher.Config.MonitorConf.RpcPort)
rpcClient := NewRPCClient(rpcClientSchema)
dbconfigs, err := rpcClient.GetDBInstanceConfig(rpcClientSchema)
if err != nil {
log.Error("get db instance name error: ", err.Error())
}
configs := []*schema.MysqlConfig{}
for _, config := range dbconfigs {
myconf := schema.MysqlConfig{
Name: config.Name,
Host: config.Host,
Port: config.Port,
Username: config.Username,
Password: config.Password,
Exclude: config.Exclude,
Interval: config.Interval.Duration,
}
configs = append(configs, &myconf)
}
columnManager, err := schema.NewColumnManager(configs)
if err != nil {
log.Errorf("new column manager error: %s", err.Error())
}
columnManager.BuildColumnMap()
columnManager.LookupLoop()
keh.columnManager = columnManager
}
func (keh *KafkaEventHandler) HandleKafkaEvent(evt *Entity, taskName string) {
switch evt.EventType {
case "INSERT":
keh.HandleInsertEvent(evt, taskName)
case "DELETE":
keh.HandleDeleteEvent(evt, taskName)
case "UPDATE":
keh.HandleUpdateEvent(evt, taskName)
default:
}
}
func (keh *KafkaEventHandler) HandleInsertEvent(evt *Entity, taskName string) {
keh.genNotifyEvent(evt, taskName)
}
func (keh *KafkaEventHandler) HandleDeleteEvent(evt *Entity, taskName string) {
keh.genNotifyEvent(evt, taskName)
}
func (keh *KafkaEventHandler) HandleUpdateEvent(evt *Entity, taskName string) {
keh.genNotifyEvent(evt, taskName)
}
type UpdateColumn struct {
Name string
BeforeColumn *Column
AfterColumn *Column
}
func (keh *KafkaEventHandler) genNotifyEvent(evt *Entity, taskName string) {
keh.BinlogStatistics.IncStatistic(evt.Database, evt.Table, evt.EventType)
log.Debugf("gen notify event: %+v", evt)
updateColumns := make([]*UpdateColumn, 0, 10)
switch toTitle(evt.EventType) {
case model.INSERT_EVENT:
columns := evt.AfterColumns
for i := 0; i < len(columns); i++ {
column := columns[i]
keh.translateColumnValue(evt.Database, evt.Table, column)
log.Debugf("%s %s %s %v", evt.Database, evt.Table, column.Name, taskName)
updateColumn := UpdateColumn{
Name: column.Name,
BeforeColumn: new(Column),
AfterColumn: column,
}
updateColumns = append(updateColumns, &updateColumn)
}
case model.DELETE_EVENT:
columns := evt.BeforeColumns
for i := 0; i < len(columns); i++ {
column := columns[i]
keh.translateColumnValue(evt.Database, evt.Table, column)
log.Debugf("%s %s %s %v", evt.Database, evt.Table, column.Name, taskName)
updateColumn := UpdateColumn{
Name: column.Name,
BeforeColumn: new(Column),
AfterColumn: column,
}
updateColumns = append(updateColumns, &updateColumn)
}
case model.UPDATE_EVENT:
for i := 0; i < len(evt.BeforeColumns); i++ {
beforeColumn := evt.BeforeColumns[i]
afterColumn := evt.AfterColumns[i]
keh.translateColumnValue(evt.Database, evt.Table, beforeColumn)
keh.translateColumnValue(evt.Database, evt.Table, afterColumn)
log.Debugf("%s %s %s %v", evt.Database, evt.Table, beforeColumn.Name, taskName)
updateColumn := UpdateColumn{
Name: beforeColumn.Name,
BeforeColumn: beforeColumn,
AfterColumn: afterColumn,
}
updateColumns = append(updateColumns, &updateColumn)
}
}
keh.Enqueue(evt.Database, evt.Table, evt.EventType, updateColumns, taskName)
}
func (keh *KafkaEventHandler) Enqueue(database, table, event string, updateColumns []*UpdateColumn, taskName string) {
keh.enqueue(database, table, event, updateColumns, taskName)
}
func (keh *KafkaEventHandler) enqueue(database, table, event string, fields []*UpdateColumn, taskName string) {
event = toTitle(event)
log.Debugf("enqueue: %s.%s %s %d", database, table, event, taskName)
ntyevt := new(model.NotifyEvent)
ntyevt.Keys = make([]string, 0)
ntyevt.Fields = make([]*model.ColumnValue, 0, 10)
task := keh.taskManager.GetTask(taskName)
if task == nil {
return
}
updateChanged := false
//shit
for _, f := range fields {
tf := keh.taskManager.GetTaskField(task, database, table, f.Name)
if tf == nil {
continue
}
if event != model.UPDATE_EVENT {
if tf.Send == 1 {
newValue := model.ColumnValue{
ColunmName: f.Name,
Value: f.AfterColumn.Value,
OldValue: f.BeforeColumn.Value,
}
ntyevt.Fields = append(ntyevt.Fields, &newValue)
} else {
ntyevt.Keys = append(ntyevt.Keys, f.Name)
}
} else {
// 如果该字段需要推送值,则无论是否变化都要推送,如果该字段不需要推送值,则有变化才推送
if tf.Send == 1 {
newValue := model.ColumnValue{
ColunmName: f.Name,
Value: f.AfterColumn.Value,
OldValue: f.BeforeColumn.Value,
}
ntyevt.Fields = append(ntyevt.Fields, &newValue)
if f.AfterColumn.Updated {
updateChanged = true
}
} else if f.AfterColumn.Updated {
ntyevt.Keys = append(ntyevt.Keys, f.Name)
updateChanged = true
}
}
}
if len(ntyevt.Fields) == 0 && len(ntyevt.Keys) == 0 {
return
} else if event == model.UPDATE_EVENT && !updateChanged {
return
}
ntyevt.Schema = database
ntyevt.Table = table
ntyevt.Event = event
ntyevt.TaskID = task.ID
name := genTaskQueueName(task)
keh.queue.Enqueue(name, ntyevt)
}
/*
根据字段类型,得到和接binlog相兼容的数据
*/
func (keh *KafkaEventHandler) translateColumnValue(schema, table string, column *Column) {
myColumn := keh.columnManager.GetColumnByName(schema, table, column.Name)
if myColumn != nil {
if myColumn.IsEnum() {
index, err := strconv.Atoi(column.Value)
if err != nil {
return
}
enumValue := myColumn.GetEnumValue(index)
column.Value = enumValue
} else if myColumn.IsText() || myColumn.IsBlob() {
data := []byte(column.Value)
column.Value = base64.StdEncoding.EncodeToString(data)
}
}
}
|
// Write a program which prompts the user to enter a string. The program searches through
// the entered string for the characters ‘i’, ‘a’, and ‘n’. The program should print “Found!”
// if the entered string starts with the character ‘i’, ends with the character ‘n’, and contains
// the character ‘a’. The program should print “Not Found!” otherwise. The program should not be
// case-sensitive, so it does not matter if the characters are upper-case or lower-case.
// Examples:
// The program should print “Found!” for the following example entered strings, “ian”, “Ian”, “iuiygaygn”, “I d skd a efju N”.
// The program should print “Not Found!” for the following strings, “ihhhhhn”, “ina”, “xian”.
package main
import (
"bufio"
"fmt"
"os"
"strings"
)
func main() {
fmt.Print("Enter text: ")
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
text := scanner.Text()
text = strings.ToLower(text)
firstIsI := strings.HasPrefix(text, "i")
lastIsN := strings.HasSuffix(text, "n")
containsA := strings.ContainsRune(text, 97) // The code point for the lowercase letter "a" is 97.
output := "Not Found!"
if firstIsI && lastIsN && containsA {
output = "Found!"
}
fmt.Println(output)
}
|
package api
type er struct {
Message string `json:"message"`
}
|
package users
import (
"dapan/dbx"
"dapan/model"
"log"
"net/http"
"github.com/gin-gonic/gin"
)
func GetUserList(c *gin.Context) {
db := dbx.DB
// type results
type User struct {
Username string
Name string
}
var res []User
var u []model.UserInfo
db.Table("user_infos").Select("user_infos.username, user_roles.name").Joins("left join user_roles on user_roles.id = user_infos.role_id").Scan(&res)
// db.Table("user_info").Select("user_info.id as id, user_info.role_id as role_id").Joins("left join user_role on user_role.id = user_info.role_id").Find(&u)
log.Print(u)
log.Print(res)
c.JSON(http.StatusOK, gin.H{
"data": u,
})
}
|
package _struct
import ("time"
"github.com/marni/goigc"
)
type Track struct {
HeaderDate time.Time `json:"Header date"`
Pilot string `json:"Pilot"`
Glider string `json:"Glider"`
GliderId string `json:"Glider id"`
TrackLength float64 `json:"Track length"`
}
type TrackDB struct {
tracks map[string]Track
}
type ID struct {
ID string `json:"id"`
}
type URL struct {
URL string `json:"url"`
}
var IDs []string
var Db TrackDB
var LastUsed int
func (db *TrackDB) Init() {
db.tracks = make(map[string]Track)
}
func (db *TrackDB) Add(t Track, i ID) {
db.tracks[i.ID] = t
IDs = append(IDs, i.ID)
}
func (db *TrackDB) Get(keyID string) (Track, bool) {
t, err := db.tracks[keyID]
return t, err
}
func CalculatedDistance(track igc.Track) float64 {
distance := 0.0
for i := 0; i < len(track.Points)-1; i++ {
distance += track.Points[i].Distance(track.Points[i+1])
}
return distance
}
|
package gorequests
import (
"net/http"
"net/url"
"sync"
)
type CookieJar struct {
sync.Mutex
cookies map[string][]*http.Cookie
}
func (cj *CookieJar) SetCookies(u *url.URL, cookies []*http.Cookie) {
cj.Lock()
defer cj.Unlock()
if cj.cookies == nil {
cj.Reset()
}
cj.cookies[u.Host] = cookies
}
func (cj *CookieJar) Cookies(u *url.URL) []*http.Cookie {
return cj.cookies[u.Host]
}
func (cj *CookieJar) Reset() {
cj.cookies = map[string][]*http.Cookie{}
}
|
//~3 7 0 0 48 0 3 7
package main
func main() {
var i1, i2 int
i1, i2 = 3, 4
println(i1 % i2, i1 | i2, i1 & i2, i1 & i2, i1 << i2, i1 >> i2, i1 &^ i2, i1 ^ i2)
}
|
package main
import "fmt"
func main() {
s := []int{1, 2, 3} // len = 3, cap = 3
// [0:2)
s1 := s[0:2] // [1,2]
fmt.Println(s1)
s1[0] = 100
fmt.Println("s的内容是", s, "s1的内容是", s1)
fmt.Printf("s1的len = %d, cap = %d, value = %v\n", len(s1), cap(s1), s1)
s1 = append(s1, 200)
fmt.Println("append之后,s的内容是", s, "s1的内容是", s1)
fmt.Printf("append之后,s1的len = %d, cap = %d, value = %v\n", len(s1), cap(s1), s1)
s1 = append(s1, 300)
fmt.Println("再次append之后,s的内容是", s, "s1的内容是", s1)
fmt.Printf("再次append之后,s1的len = %d, cap = %d, value = %v\n", len(s1), cap(s1), s1)
s2 := make([]int, 3) // s2 = [0,0,0]
// 将s中的值,依次拷贝到s2中
copy(s2, s)
s2[0] = 0
fmt.Printf("拷贝并调整s2成员之后,s的值为%v,s2的值为%v\n", s, s2)
// 尝试拷贝一个二维数组
s3 := make([][]int, 3)
s3[0] = make([]int, 2)
s3[1] = make([]int, 2)
s3[2] = make([]int, 2)
s4 := make([][]int, 3)
copy(s4, s3)
s3[0][0] = 1000
fmt.Printf("拷贝并调整s3成员之后,s3的值为%v,s4的值为%v\n", s3, s4)
}
|
package log
import (
"context"
"fmt"
"os"
"github.com/zdao-pro/sky_blue/pkg/env"
)
var (
h Handle
)
//Config ...
type Config struct {
_debugPrintFlag bool
_infoPrintFlag bool
_warnPrintFlag bool
_errorPrintFlag bool
_fetalPrintFlag bool
// Filter tell log handler which field are sensitive message, use * instead.
Filter []string
// file
Dir string
// stdout
Stdout bool
Nlog bool
Source bool
}
var logConfig Config
func init() {
Init(nil)
}
//Init ...
func Init(conf *Config) {
if nil == conf {
logConfig = Config{
_infoPrintFlag: true,
_warnPrintFlag: true,
_errorPrintFlag: true,
_fetalPrintFlag: true,
_debugPrintFlag: true,
Stdout: true,
Source: true,
}
} else {
logConfig = *conf
}
logConfig._infoPrintFlag = true
logConfig._debugPrintFlag = true
logConfig._errorPrintFlag = true
logConfig._fetalPrintFlag = true
logConfig._warnPrintFlag = true
if env.IsOnline() {
logConfig._debugPrintFlag = false
}
// if !env.IsDev() {
// logConfig.Stdout = true
// }
var hs []Handle
//udp log handle
udpServerAddr := os.Getenv("UDP_LOG_ADDR")
udpServerPort := os.Getenv("UDP_LOG_PORT")
if "" != udpServerAddr && "" != udpServerPort {
nlogH := newNlogHnadle(udpServerAddr, udpServerPort)
hs = append(hs, nlogH)
}
if logConfig.Stdout {
hs = append(hs, newStdoutHandle())
}
h = newHandles(hs...)
}
//Debug ...
func Debug(format string, args ...interface{}) {
if logConfig._debugPrintFlag {
h.Log(context.Background(), _debugLevel, KVString(_log, fmt.Sprintf(format, args...)))
}
}
//Info ...
func Info(format string, args ...interface{}) {
if logConfig._infoPrintFlag {
h.Log(context.Background(), _infoLevel, KVString(_log, fmt.Sprintf(format, args...)))
}
}
//Warn ...
func Warn(format string, args ...interface{}) {
if logConfig._warnPrintFlag {
h.Log(context.Background(), _warnLevel, KVString(_log, fmt.Sprintf(format, args...)))
}
}
//Error ...
func Error(format string, args ...interface{}) {
if logConfig._errorPrintFlag {
h.Log(context.Background(), _errorLevel, KVString(_log, fmt.Sprintf(format, args...)))
}
}
//Fetal ...
func Fetal(format string, args ...interface{}) {
if logConfig._fetalPrintFlag {
h.Log(context.Background(), _fetalLevel, KVString(_log, fmt.Sprintf(format, args...)))
}
}
//Access ...
func Access(format string, args ...interface{}) {
if logConfig._fetalPrintFlag {
h.Log(context.Background(), _accessLevel, KVString(_log, fmt.Sprintf(format, args...)))
}
}
//Debugc ...
func Debugc(ctx context.Context, format string, args ...interface{}) {
if logConfig._debugPrintFlag {
h.Log(ctx, _debugLevel, KVString(_log, fmt.Sprintf(format, args...)))
}
}
//Infoc ...
func Infoc(ctx context.Context, format string, args ...interface{}) {
if logConfig._infoPrintFlag {
h.Log(ctx, _infoLevel, KVString(_log, fmt.Sprintf(format, args...)))
}
}
//Warnc ...
func Warnc(ctx context.Context, format string, args ...interface{}) {
if logConfig._warnPrintFlag {
h.Log(ctx, _warnLevel, KVString(_log, fmt.Sprintf(format, args...)))
}
}
//Errorc ...
func Errorc(ctx context.Context, format string, args ...interface{}) {
if logConfig._errorPrintFlag {
h.Log(ctx, _errorLevel, KVString(_log, fmt.Sprintf(format, args...)))
}
}
//Fetalc ...
func Fetalc(ctx context.Context, format string, args ...interface{}) {
if logConfig._fetalPrintFlag {
h.Log(ctx, _fetalLevel, KVString(_log, fmt.Sprintf(format, args...)))
}
}
//Accessc ...
func Accessc(ctx context.Context, format string, args ...interface{}) {
if logConfig._fetalPrintFlag {
h.Log(ctx, _accessLevel, KVString(_log, fmt.Sprintf(format, args...)))
}
}
// Debugv logs a message at the debug log level.
func Debugv(ctx context.Context, args ...D) {
if logConfig._debugPrintFlag {
h.Log(ctx, _debugLevel, args...)
}
}
// Infov logs a message at the info log level.
func Infov(ctx context.Context, args ...D) {
if logConfig._infoPrintFlag {
h.Log(ctx, _infoLevel, args...)
}
}
// Warnv logs a message at the warning log level.
func Warnv(ctx context.Context, args ...D) {
if logConfig._warnPrintFlag {
h.Log(ctx, _warnLevel, args...)
}
}
// Errorv logs a message at the error log level.
func Errorv(ctx context.Context, args ...D) {
if logConfig._errorPrintFlag {
h.Log(ctx, _errorLevel, args...)
}
}
// Fatalv logs a message at the error log level.
func Fatalv(ctx context.Context, args ...D) {
if logConfig._fetalPrintFlag {
h.Log(ctx, _fetalLevel, args...)
}
}
|
package model
import (
"Seaman/utils"
"time"
)
type TplAreaT struct {
Id int64 `xorm:"pk autoincr BIGINT(20)"`
Code string `xorm:"not null comment('编号') VARCHAR(100)"`
Desp string `xorm:"not null comment('描述') VARCHAR(100)"`
Type string `xorm:"comment('类型') VARCHAR(50)"`
ParentId int64 `xorm:"not null comment('父节点ID') index BIGINT(20)"`
Revision int64 `xorm:"not null comment('版本号') BIGINT(20)"`
CreateUserId string `xorm:"not null comment('创建用户ID') VARCHAR(36)"`
CreateDate time.Time `xorm:"comment('创建时间') DATETIME"`
LastUpdateUserId string `xorm:"not null comment('最后更新用户ID') VARCHAR(36)"`
LastUpdateDate time.Time `xorm:"comment('最后修改时间') DATETIME"`
AppName string `xorm:"not null comment('应用名') VARCHAR(32)"`
TenantId string `xorm:"comment('多租户ID') VARCHAR(32)"`
AppScope string `xorm:"comment('系统群名') VARCHAR(32)"`
}
/**
* 将数据库查询出来的结果进行格式组装成request请求需要的json字段格式
*/
func (tplAreaT *TplAreaT) tplAreaTToRespDesc() interface{} {
respInfo := map[string]interface{}{
"id": tplAreaT.Id,
"code": tplAreaT.Code,
"desp": tplAreaT.Desp,
"type": tplAreaT.Type,
"parent_id": tplAreaT.ParentId,
"revision": tplAreaT.Revision,
"tenant_id": tplAreaT.TenantId,
"app_name": tplAreaT.AppName,
"app_scope": tplAreaT.AppScope,
"create_date": utils.FormatDatetime(tplAreaT.CreateDate),
"last_update_date": utils.FormatDatetime(tplAreaT.LastUpdateDate),
"create_user_id": tplAreaT.CreateUserId,
"last_update_user_id": tplAreaT.LastUpdateUserId,
}
return respInfo
}
|
package ui
import (
"github.com/askovpen/goated/pkg/msgapi"
"github.com/askovpen/gocui"
)
var (
// App gui
App *gocui.Gui
// AreaPosition variable
AreaPosition uint16
// ActiveWindow name
ActiveWindow string
parentWindow string
curAreaID int
curMsgNum uint32
showKludges bool
// StatusLine variable
StatusLine string
// StatusTime variable
StatusTime string
newMsg *msgapi.Message
newMsgType int
newMsgAreaID int
)
const (
newMsgTypeAnswer = 1
newMsgTypeAnswerNewArea = 2
newMsgTypeForward = 4
)
// Quit application
func Quit(g *gocui.Gui, v *gocui.View) error {
return gocui.ErrQuit
}
|
/**
* database と接続を行う
*/
package config
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
"log"
"os"
)
// ConfigDB db seting
type ConfigDB struct {
User string
Password string
Host string
Port string
Dbname string
}
// ConnectDB returns initialized gorm.DB
func ConnectDB() (*gorm.DB, error) {
config := ConfigDB{
User: os.Getenv("MYSQL_USER"),
Password: os.Getenv("MYSQL_PASSWORD"),
Host: os.Getenv("MYSQL_CONTAINER_NAME"),
Port: os.Getenv("MYSQL_PORT"),
Dbname: os.Getenv("MYSQL_DATABASE"),
}
dsn := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?parseTime=true", config.User, config.Password, config.Host, config.Port, config.Dbname)
db, err := gorm.Open("mysql", dsn)
if err != nil {
log.Println(err)
panic("failed to connect DB")
return nil, err
}
return db, nil
}
|
package backends
import (
"encoding/json"
"io/ioutil"
"log"
"github.com/schachmat/wego/iface"
)
type jsnConfig struct {
}
func (c *jsnConfig) Setup() {
}
// Fetch will try to open the file specified in the location string argument and
// read it as json content to fill the data. The numdays argument will only work
// to further limit the amount of days in the output. It obviously cannot
// produce more data than is available in the file.
func (c *jsnConfig) Fetch(loc string, numdays int) (ret iface.Data) {
b, err := ioutil.ReadFile(loc)
if err != nil {
log.Fatal(err)
}
err = json.Unmarshal(b, &ret)
if err != nil {
log.Fatal(err)
}
if len(ret.Forecast) > numdays {
ret.Forecast = ret.Forecast[:numdays]
}
return
}
func init() {
iface.AllBackends["json"] = &jsnConfig{}
}
|
package skyhook
import (
"reflect"
"testing"
)
func TestConversion(t *testing.T) {
data := []byte(`output = input + " world" + bang()`)
read := func(string) ([]byte, error) { return data, nil }
bang := func() string { return "!" }
s := New([]string{"bar"})
s.readFile = read
actual, err := s.Run("foo.sky", map[string]interface{}{
"input": "hello",
"bang": bang,
})
if err != nil {
t.Fatal(err)
}
expected := map[string]interface{}{
"input": "hello",
"output": "hello world!",
}
if len(actual) != len(expected) {
t.Errorf("expected %d items, but got %d", len(expected), len(actual))
}
for k, v := range expected {
act, ok := actual[k]
if !ok {
t.Errorf("actual missing key %q", k)
continue
}
if !reflect.DeepEqual(act, v) {
t.Errorf("actual value for key %q expected to be %v but was %v", k, v, act)
}
}
}
func TestDirOrder(t *testing.T) {
s := New([]string{"testdata", "testdata/later"})
v, err := s.Run("foo.sky", map[string]interface{}{"input": "hello"})
if err != nil {
t.Fatal(err)
}
expected, actual := "hello world", v["output"]
if actual != expected {
t.Fatalf("expected %q but got %q", expected, actual)
}
}
func TestAllDirs(t *testing.T) {
s := New([]string{"testdata", "testdata/later"})
v, err := s.Run("bar.sky", map[string]interface{}{"input": "hello"})
if err != nil {
t.Fatal(err)
}
expected, actual := "hello from bar.sky", v["output"]
if actual != expected {
t.Fatalf("expected %q but got %q", expected, actual)
}
}
func TestFunc(t *testing.T) {
data := []byte(`
def foo():
return " world!"
output = input + foo()
`)
read := func(string) ([]byte, error) { return data, nil }
s := New([]string{"bar"})
s.readFile = read
actual, err := s.Run("foo.sky", map[string]interface{}{"input": "hello"})
if err != nil {
t.Fatal(err)
}
expected := map[string]interface{}{
"input": "hello",
"output": "hello world!",
}
if len(actual) != len(expected) {
t.Errorf("expected %d items, but got %d", len(expected), len(actual))
}
for k, v := range expected {
act, ok := actual[k]
if !ok {
t.Errorf("actual missing key %q", k)
continue
}
if !reflect.DeepEqual(act, v) {
t.Errorf("actual value for key %q expected to be %v but was %v", k, v, act)
}
}
}
func TestRerun(t *testing.T) {
data := []byte(`output = input + " world!"`)
read := func(string) ([]byte, error) { return data, nil }
s := New([]string{"bar"})
s.readFile = read
actual, err := s.Run("foo.sky", map[string]interface{}{
"input": "hello",
})
if err != nil {
t.Fatal(err)
}
if actual["output"] != "hello world!" {
t.Fatalf(`expected "hello world!" but got %q`, actual["output"])
}
// change inputs but not script
actual, err = s.Run("foo.sky", map[string]interface{}{
"input": "goodbye",
})
if err != nil {
t.Fatal(err)
}
if actual["output"] != "goodbye world!" {
t.Fatalf(`expected "goodbye world!" but got %q`, actual["output"])
}
// change script, shouldn't change output sicne we cached it
data = []byte(`output = "hi!"`)
actual, err = s.Run("foo.sky", map[string]interface{}{
"input": "goodbye",
})
if err != nil {
t.Fatal(err)
}
if actual["output"] != "goodbye world!" {
t.Fatalf(`expected "goodbye world!" but got %q`, actual["output"])
}
// remove script, should change output
s.Forget("foo.sky")
actual, err = s.Run("foo.sky", map[string]interface{}{
"input": "goodbye",
})
if err != nil {
t.Fatal(err)
}
if actual["output"] != "hi!" {
t.Fatalf(`expected "hi!" but got %q`, actual["output"])
}
// reset all, should change output
s.Reset()
data = []byte(`output = "bye!"`)
actual, err = s.Run("foo.sky", map[string]interface{}{
"input": "goodbye",
})
if err != nil {
t.Fatal(err)
}
if actual["output"] != "bye!" {
t.Fatalf(`expected "bye!" but got %q`, actual["output"])
}
}
func TestEval(t *testing.T) {
v, err := Eval(`output = hi()`, map[string]interface{}{
"hi": func() string { return "hi!" },
})
if err != nil {
t.Fatal(err)
}
if v["output"] != "hi!" {
t.Fatalf(`expected "hi!" but got %q`, v["output"])
}
}
|
package token
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"strconv"
"strings"
)
type Token struct {
raw string
clientId string
userName string
scopes map[string]bool
deviceId string
ts int64
hmac []byte
}
type Error struct {
ErrCode int16 `json:"err_code"`
ErrMsg string `json:"err_msg"`
}
func (err Error) Error() string {
if data, err := json.Marshal(err); err == nil {
return string(data)
}
return ""
}
func NewToken(clientId string, userName string, scopes []string, deviceId string) (*Token, error) {
ts, key := GetKey()
scopeStr := strings.Join(scopes, " ")
raw := fmt.Sprintf("%s,%s,%s,%s,%s,%d", getRandString(10), clientId, userName, scopeStr, deviceId, ts)
h := hmac.New(sha256.New, []byte(key))
h.Write([]byte(raw))
hmac := h.Sum(nil)
tk := &Token{
raw: raw,
clientId: clientId,
userName: userName,
scopes: make(map[string]bool),
deviceId: deviceId,
ts: ts,
hmac: hmac}
for _, sp := range scopes {
tk.scopes[sp] = true
}
return tk, nil
}
func NewStaticToken(randValue string, clientId string, userName string, scopeStr string, deviceId string, ts int64, hmac []byte) (*Token, error) {
raw := fmt.Sprintf("%s,%s,%s,%s,%s,%d", randValue, clientId, userName, scopeStr, deviceId, ts)
scopes := strings.Split(scopeStr, " ")
tk := &Token{
raw: raw,
clientId: clientId,
userName: userName,
scopes: make(map[string]bool),
deviceId: deviceId,
ts: ts,
hmac: hmac}
for _, sp := range scopes {
tk.scopes[sp] = true
}
return tk, nil
}
func ParseToken(token string) (*Token, error) {
data := strings.Split(token, "|")
if len(data) != 2 {
return nil, NewError(ERROR_TOKEN_INVALIDE)
}
raw := data[0]
hmacV := data[1]
data = strings.Split(raw, ",")
if len(data) != 6 {
return nil, NewError(ERROR_TOKEN_INVALIDE)
}
ts, err := strconv.ParseInt(data[5], 10, 64)
if err != nil {
return nil, NewError(ERROR_TOKEN_INVALIDE)
}
key, ret := GetKeyByTs(ts)
if ret == false {
return nil, NewError(ERROR_TOKEN_EXPIRED)
}
encoder := hmac.New(sha256.New, []byte(key))
encoder.Write([]byte(raw))
hash, err := hex.DecodeString(hmacV)
newhmac := encoder.Sum(nil)
if err != nil || !hmac.Equal(newhmac, hash) {
return nil, NewError(ERROR_TOKEN_INVALIDE)
}
return NewStaticToken(data[0], data[1], data[2], data[3], data[4], ts, hash)
}
func (self *Token) String() string {
return fmt.Sprintf("%s|%s", self.raw, hex.EncodeToString(self.hmac))
}
func (self *Token) CheckScopes(scopes []string) bool {
for _, sp := range scopes {
if _, ok := self.scopes[sp]; !ok {
return false
}
}
return true
}
|
/*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package blobserver
import (
"sync"
"time"
"camlistore.org/pkg/blob"
)
type BlobHub interface {
// For new blobs to notify
NotifyBlobReceived(blob blob.Ref)
RegisterListener(ch chan<- blob.Ref)
UnregisterListener(ch chan<- blob.Ref)
RegisterBlobListener(blob blob.Ref, ch chan<- blob.Ref)
UnregisterBlobListener(blob blob.Ref, ch chan<- blob.Ref)
}
var (
hubmu sync.Mutex
stohub = map[interface{}]BlobHub{} // Storage -> hub
)
// GetHub return a BlobHub for the give storage implementation.
func GetHub(storage interface{}) BlobHub {
hubmu.Lock()
defer hubmu.Unlock()
h, ok := stohub[storage]
if ok {
return h
}
h = new(memHub)
stohub[storage] = h
return h
}
// canLongPoll is set to false on App Engine. (multi-process environment...)
var canLongPoll = true
// WaitForBlob waits until deadline for blobs to arrive. If blobs is empty, any
// blobs are waited on. Otherwise, those specific blobs are waited on.
// When WaitForBlob returns, nothing may have happened.
func WaitForBlob(storage interface{}, deadline time.Time, blobs []blob.Ref) {
hub := GetHub(storage)
ch := make(chan blob.Ref, 1)
if len(blobs) == 0 {
hub.RegisterListener(ch)
defer hub.UnregisterListener(ch)
}
for _, br := range blobs {
hub.RegisterBlobListener(br, ch)
defer hub.UnregisterBlobListener(br, ch)
}
var tc <-chan time.Time
if !canLongPoll {
tc = time.After(2 * time.Second)
}
select {
case <-ch:
case <-tc:
case <-time.After(deadline.Sub(time.Now())):
}
}
type memHub struct {
l sync.Mutex
listeners map[chan<- blob.Ref]bool
blobListeners map[string]map[chan<- blob.Ref]bool
}
func (h *memHub) NotifyBlobReceived(br blob.Ref) {
h.l.Lock()
defer h.l.Unlock()
// Callback channels to notify, nil until non-empty
var notify []chan<- blob.Ref
// Append global listeners
for ch, _ := range h.listeners {
notify = append(notify, ch)
}
// Append blob-specific listeners
if h.blobListeners != nil {
blobstr := br.String()
if set, ok := h.blobListeners[blobstr]; ok {
for ch, _ := range set {
notify = append(notify, ch)
}
}
}
if len(notify) > 0 {
// Run in a separate Goroutine so NotifyBlobReceived doesn't block
// callers if callbacks are slow.
go func() {
for _, ch := range notify {
ch <- br
}
}()
}
}
func (h *memHub) RegisterListener(ch chan<- blob.Ref) {
h.l.Lock()
defer h.l.Unlock()
if h.listeners == nil {
h.listeners = make(map[chan<- blob.Ref]bool)
}
h.listeners[ch] = true
}
func (h *memHub) UnregisterListener(ch chan<- blob.Ref) {
h.l.Lock()
defer h.l.Unlock()
if h.listeners == nil {
panic("blobhub: UnregisterListener called without RegisterListener")
}
delete(h.listeners, ch)
}
func (h *memHub) RegisterBlobListener(br blob.Ref, ch chan<- blob.Ref) {
h.l.Lock()
defer h.l.Unlock()
if h.blobListeners == nil {
h.blobListeners = make(map[string]map[chan<- blob.Ref]bool)
}
bstr := br.String()
_, ok := h.blobListeners[bstr]
if !ok {
h.blobListeners[bstr] = make(map[chan<- blob.Ref]bool)
}
h.blobListeners[bstr][ch] = true
}
func (h *memHub) UnregisterBlobListener(blob blob.Ref, ch chan<- blob.Ref) {
h.l.Lock()
defer h.l.Unlock()
if h.blobListeners == nil {
panic("blobhub: UnregisterBlobListener called without RegisterBlobListener")
}
bstr := blob.String()
set, ok := h.blobListeners[bstr]
if !ok {
panic("blobhub: UnregisterBlobListener called without RegisterBlobListener for " + bstr)
}
delete(set, ch)
if len(set) == 0 {
delete(h.blobListeners, bstr)
}
}
type memHubPartitionMap struct {
hubLock sync.Mutex
hub BlobHub
}
func (spm *memHubPartitionMap) GetBlobHub() BlobHub {
spm.hubLock.Lock()
defer spm.hubLock.Unlock()
if spm.hub == nil {
// TODO: in the future, allow for different blob hub
// implementations rather than the
// everything-in-memory-on-a-single-machine memHub.
spm.hub = new(memHub)
}
return spm.hub
}
|
package 模拟
func findDiagonalOrder(matrix [][]int) []int {
if len(matrix) == 0 {
return []int{}
}
m, n := len(matrix), len(matrix[0])
times := (n + m) / 2 // 循环次数
ans := make([]int, 0, n*m)
beginx, beginy, base := 0, 0, 0
for times != 0 {
times--
// 向左上遍历
beginx = min(base, m-1)
beginy = base - beginx
for beginx >= 0 && beginx < m && beginy >= 0 && beginy < n {
ans = append(ans, matrix[beginx][beginy])
beginx--
beginy = base - beginx
}
base++
// 向右下遍历
beginy = min(base, n-1)
beginx = base - beginy
for beginx >= 0 && beginx < m && beginy >= 0 && beginy < n {
ans = append(ans, matrix[beginx][beginy])
beginy--
beginx = base - beginy
}
base++
}
return ans
}
func min(a, b int) int {
if a > b {
return b
}
return a
}
/*
总结
1. 这个题目就是单纯的模拟,我的模拟思路是: 以一上一下为一个轮回,在遍历过程中把元素加入结果集。
2. 这个代码还不算太优美,接下来我改一改。
*/
|
package analyzer
import (
"fmt"
"go/ast"
"go/types"
"path/filepath"
"reflect"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
)
var InterfaceMustBePtr = &analysis.Analyzer{
Name: "interfacemustbeptr",
Doc: "Checks that calls that take an interface{} are passed a pointer",
Run: run,
Requires: []*analysis.Analyzer{inspect.Analyzer},
}
var interfaceMustBePtrTargets = map[call][]int{
{pkg: "encoding/json", fxn: "Unmarshal"}: {1},
}
type call struct {
pkg, fxn string
}
func run(pass *analysis.Pass) (interface{}, error) {
inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
var imports map[string]string
var currentScope *types.Scope
isPointer := func(t types.Type) bool {
return t.String()[0] == '*'
}
scope2Node := func(target *types.Scope) ast.Node {
for node, scope := range pass.TypesInfo.Scopes {
if scope == target {
return node
}
}
return nil
}
inspect := func(node ast.Node, push bool, stack []ast.Node) bool {
if node == nil {
return true
}
if push == false {
return true
}
fmt.Println("=", node, push, reflect.TypeOf(node))
scope, hasScope := pass.TypesInfo.Scopes[node]
if hasScope {
currentScope = scope
}
if file, ok := node.(*ast.File); ok {
fmt.Println("🎸", file.Name, push)
imports = make(map[string]string)
return true
}
if funcDecl, ok := node.(*ast.FuncDecl); ok {
fmt.Println("___", funcDecl.Name.Name, "____")
return true
}
if importSpec, ok := node.(*ast.ImportSpec); ok {
value := importSpec.Path.Value
value = value[1 : len(value)-1] // strip quotes
var name string
if importSpec.Name != nil {
name = importSpec.Name.Name
} else {
name = filepath.Base(value)
}
imports[name] = value
return true
}
callExpr, ok := node.(*ast.CallExpr)
if !ok {
return true
}
fmt.Println("call__", callExpr, callExpr.Fun, reflect.TypeOf(callExpr.Fun))
selector, ok := callExpr.Fun.(*ast.SelectorExpr)
if ok {
goto OK
} else if ident, ok := callExpr.Fun.(*ast.Ident); ok {
fmt.Println("Ident call", ident)
return true
} else {
return true
}
OK:
fmt.Println("selector name", selector.Sel.Name)
x := selector.X.(*ast.Ident)
// TODO handle methods x.Obj :)
// fmt.Println("x", x, reflect.TypeOf(x))
// fmt.Println("x.Name", x.Name, reflect.TypeOf(x.Name))
pkg, ok := imports[x.Name]
if !ok {
err := fmt.Errorf("couldn't resolve identifier %s to import", x.Name)
panic(err)
}
c := call{pkg: pkg, fxn: selector.Sel.Name}
argNumbers, ok := interfaceMustBePtrTargets[c]
if ok {
// fmt.Printf("matched! %s.%s\n", c.pkg, c.fxn)
} else {
return true
}
fmt.Println("(")
for _, argNumber := range argNumbers {
arg := callExpr.Args[argNumber]
fmt.Println("arg", argNumber, arg, reflect.TypeOf(arg))
t := pass.TypesInfo.TypeOf(arg)
isPtr := isPointer(t)
fmt.Println("arg isPtr?", isPtr)
if ident, ok := arg.(*ast.Ident); ok {
o := pass.TypesInfo.ObjectOf(ident)
fmt.Println("ident is object of", o)
fmt.Println(" ", o.Parent())
fmt.Println("decl", ident.Obj.Decl, reflect.TypeOf(ident.Obj.Decl))
if decl, ok := ident.Obj.Decl.(*ast.Field); ok {
fmt.Println("pos", decl.Pos())
// pass.TypesInfo.ObjectOf(decl)
from := scope2Node(o.Parent())
if from != nil {
fmt.Println("from ", (from), reflect.TypeOf(from))
// from.(*ast.FuncType)
if fxn, ok := from.(*ast.FuncType); ok {
for _, item := range fxn.Params.List {
fmt.Println("item", item, reflect.TypeOf(item))
for i, name := range item.Names {
fmt.Println("name", name, reflect.TypeOf(name))
if name.Name == decl.Names[0].Name {
fmt.Println("name is taint", i, name)
}
}
}
fmt.Println("params", fxn.Params.NumFields())
}
}
}
fmt.Println("ident name", ident.Name)
fmt.Println("ident obj", ident.Obj)
fmt.Println("ident obj kind", ident.Obj.Kind, reflect.TypeOf(ident.Obj.Kind))
fmt.Println("ident obj decl", ident.Obj.Decl, reflect.TypeOf(ident.Obj.Decl))
if currentScope == nil {
panic("nil scope")
}
_, obj := currentScope.LookupParent(ident.Name, ident.NamePos)
if obj == nil {
fmt.Println("LookupParent is nil") // error
} else {
isPtr := isPointer(obj.Type())
pass.TypesInfo.Types[nil].IsValue()
fmt.Println("obj isPtr?", isPtr, obj.Type())
}
} else if callExpr, ok := arg.(*ast.CallExpr); ok {
fmt.Println("callExpr", callExpr)
}
fmt.Println(",")
}
fmt.Println(")")
return true
}
inspector.WithStack(nil, inspect)
return nil, nil
}
|
package mysql
func NewMysql() {
}
|
package factories
import (
sdk "github.com/identityOrg/oidcsdk"
"net/url"
"time"
)
type (
DefaultTokenRequestContext struct {
RequestID string
PreviousRequestID string
RequestedAt time.Time
State string
RedirectURI string
GrantType string
ClientId string
ClientSecret string
Username string
Password string
AuthorizationCode string
RefreshToken string
RequestedScopes sdk.Arguments
RequestedAudience sdk.Arguments
Claims map[string]interface{}
Client sdk.IClient
Profile sdk.RequestProfile
IssuedTokens sdk.Tokens
Error sdk.IError
Form *url.Values
}
)
func (d *DefaultTokenRequestContext) GetError() sdk.IError {
return d.Error
}
func (d *DefaultTokenRequestContext) SetError(err sdk.IError) {
d.Error = err
}
func (d *DefaultTokenRequestContext) SetPreviousRequestID(id string) {
d.PreviousRequestID = id
}
func (d *DefaultTokenRequestContext) GetPreviousRequestID() (id string) {
return d.PreviousRequestID
}
func (d *DefaultTokenRequestContext) GetIssuedTokens() sdk.Tokens {
return d.IssuedTokens
}
func (d *DefaultTokenRequestContext) IssueAccessToken(token string, signature string, expiry time.Time) {
d.IssuedTokens.AccessToken = token
d.IssuedTokens.AccessTokenSignature = signature
d.IssuedTokens.AccessTokenExpiry = expiry
}
func (d *DefaultTokenRequestContext) IssueAuthorizationCode(code string, signature string, expiry time.Time) {
d.IssuedTokens.AuthorizationCode = code
d.IssuedTokens.AuthorizationCodeSignature = signature
d.IssuedTokens.AuthorizationCodeExpiry = expiry
}
func (d *DefaultTokenRequestContext) IssueRefreshToken(token string, signature string, expiry time.Time) {
d.IssuedTokens.RefreshToken = token
d.IssuedTokens.RefreshTokenSignature = signature
d.IssuedTokens.RefreshTokenExpiry = expiry
}
func (d *DefaultTokenRequestContext) IssueIDToken(token string) {
d.IssuedTokens.IDToken = token
}
func (d *DefaultTokenRequestContext) GetUsername() string {
return d.Username
}
func (d *DefaultTokenRequestContext) GetPassword() string {
return d.Password
}
func (d *DefaultTokenRequestContext) GetRequestID() string {
return d.RequestID
}
func (d *DefaultTokenRequestContext) GetClaims() map[string]interface{} {
return d.Claims
}
func (d *DefaultTokenRequestContext) GetClient() sdk.IClient {
return d.Client
}
func (d *DefaultTokenRequestContext) SetClient(client sdk.IClient) {
d.Client = client
}
func (d *DefaultTokenRequestContext) GetProfile() sdk.RequestProfile {
return d.Profile
}
func (d *DefaultTokenRequestContext) SetProfile(profile sdk.RequestProfile) {
d.Profile = profile
}
func (d *DefaultTokenRequestContext) GetForm() *url.Values {
return d.Form
}
func (d *DefaultTokenRequestContext) GetRequestedAt() time.Time {
return d.RequestedAt
}
func (d *DefaultTokenRequestContext) GetState() string {
return d.State
}
func (d *DefaultTokenRequestContext) GetRedirectURI() string {
return d.RedirectURI
}
func (d *DefaultTokenRequestContext) GetGrantType() string {
return d.GrantType
}
func (d *DefaultTokenRequestContext) GetClientID() string {
return d.ClientId
}
func (d *DefaultTokenRequestContext) GetClientSecret() string {
return d.ClientSecret
}
func (d *DefaultTokenRequestContext) GetAuthorizationCode() string {
return d.AuthorizationCode
}
func (d *DefaultTokenRequestContext) GetRefreshToken() string {
return d.RefreshToken
}
func (d *DefaultTokenRequestContext) GetRequestedScopes() sdk.Arguments {
return d.RequestedScopes
}
func (d *DefaultTokenRequestContext) GetRequestedAudience() sdk.Arguments {
return d.RequestedAudience
}
|
package phpGo
import (
// "reflect"
)
func InterfaceToInt(val interface{}) {
}
|
package main
import (
"fmt"
"log"
"net"
"os"
"runtime"
"time"
"github.com/SommerEngineering/Sync/Sync"
"github.com/howeyc/gopass"
"golang.org/x/crypto/ssh"
)
func main() {
// Show the current version:
log.Println(`Sync v1.3.2`)
// Allow Go to use all CPUs:
runtime.GOMAXPROCS(runtime.NumCPU())
// Read the configuration from the command-line args:
readFlags()
// Check if the directories are provided:
if localDir == `` || remoteDir == `` {
log.Println(`Please provide the local and remote directory.`)
os.Exit(1)
return
}
// Should I use the current working dir?
if localDir == `.` {
if currentWD, currentWDError := os.Getwd(); currentWDError != nil {
log.Println("Cannot use the current working directory as local directory: " + currentWDError.Error())
os.Exit(2)
return
} else {
log.Println("I use the current working directory as local directory: " + currentWD)
localDir = currentWD
}
}
// Remove trailing separators from both directories
localDir = correctPath(localDir)
remoteDir = correctPath(remoteDir)
// Check if local dir exist
if dirInfo, dirError := os.Stat(localDir); dirError != nil {
log.Println("There is an error with the local directory: " + dirError.Error())
os.Exit(3)
return
} else {
if !dirInfo.IsDir() {
log.Println("There is an error with the local directory: You provided a file instead!")
os.Exit(4)
return
}
}
// Check if the password was provided:
for true {
if password == `` {
// Promt for the password:
fmt.Print(`Please provide the password for the connection: `)
if pass, errPass := gopass.GetPasswd(); errPass != nil {
log.Println(`There was an error reading the password securely: ` + errPass.Error())
os.Exit(5)
return
} else {
password = string(pass)
}
} else {
break
}
}
// Give some information about the state
if supervised {
log.Println("I use the supervised mode.")
} else {
log.Println("I do not use the supervised mode.")
}
if pushOnly {
log.Println("I use the push only mode i.e. backup mode. Any remote change will be ignored.")
} else {
log.Println("I use the full mode and consider also remote changes.")
}
// Create the SSH configuration:
Sync.SetPassword4Callback(password)
config := &ssh.ClientConfig{
User: username,
Auth: []ssh.AuthMethod{
ssh.Password(password),
ssh.PasswordCallback(Sync.PasswordCallback),
ssh.KeyboardInteractive(Sync.KeyboardInteractiveChallenge),
},
HostKeyCallback: showHostKey(),
}
// Connect to the SSH server:
ssh := Sync.ConnectSSH(config, serverAddrString)
if ssh == nil {
log.Println(`It was not possible to connect to the SSH server.`)
os.Exit(6)
return
}
defer ssh.Close()
Sync.Synchronise(ssh, supervised, pushOnly, localDir, remoteDir)
log.Println("Synchronising done.")
}
func showHostKey() ssh.HostKeyCallback {
return func(hostname string, remote net.Addr, key ssh.PublicKey) error {
log.Printf("Your server's hostname is %s (%s) and its public key is %s. If this is wrong, please abort the program now! Wait 16 seconds for your check.", hostname, remote.String(), ssh.FingerprintSHA256(key))
time.Sleep(16 * time.Second)
return nil
}
}
|
package test
import (
"fwb/core"
"log"
"runtime"
"sgs"
"strings"
"testing"
"time"
)
func TestPlay2PvP(t *testing.T) {
loadConf()
mockClient1 := mockClient{
name: _1P_NAME,
clientID: _1P_ID,
t: t,
}
log.Print("client 1 ", mockClient1.name)
mockClient2 := mockClient{
name: _2P_NAME,
clientID: _2P_ID,
t: t,
}
log.Print("client 2 ", mockClient2.name)
gameOverMap := make(map[int]bool)
go bootServer(t)
<-time.After(time.Duration(3) * time.Second)
go func() {
<-time.After(time.Duration(600) * time.Second)
log.Print("test timeout")
t.Fail()
}()
for i := 0; i < 3; i++ {
go mockClient1.connect()
go mockClient2.connect()
select {
case c := <-_ch:
gameOverMap[c] = true
c = <-_ch
gameOverMap[c] = true
if len(gameOverMap) != 2 {
log.Print("incorrect client state")
t.Fail()
}
}
}
}
func bootServer(t *testing.T) {
_, file, _, _ := runtime.Caller(0)
c := strings.Split(file, "/")
path := file[:len(file)-len(c[len(c)-1])-1]
_tl.Inf("Configuration Path: " + path)
e := sgs.Run(core.FwAppBuildFunc, path)
if e != nil {
t.Fail()
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.