text stringlengths 11 4.05M |
|---|
package db
import (
"log"
"testing"
)
const (
Host string = "127.0.0.1:3306"
User string = "root"
Password string = "huang03"
Db string = "test"
)
//查询单条数据
func TestFind(t *testing.T) {
mysql := NewMySQL(Host, User, Password, Db)
mysql.Table("user")
mysql.Alias("T1")
mysql.Order("id desc")
mysql.Field([]string{"id,name,age"})
mysql.AddCon("id", "in", []string{"1", "2"})
mysql.AddCon("name", "not in", []string{"hello", "02"})
ret, err := mysql.Find()
log.Println("Test FIND", ret, err, mysql.LastSql())
}
//查询多条数据
func TestFindAll(t *testing.T) {
mysql := NewMySQL(Host, User, Password, Db)
mysql.Table("user")
mysql.Alias("T1")
mysql.Order("id desc")
mysql.Field([]string{"T1.id,T1.name,T1.age"})
mysql.AddCon("id", "in", []string{"1", "2"})
mysql.AddCon("name", "eq", "hello", "OR")
ret, err := mysql.FindAll()
log.Println("Test FIND ALL ", ret, err, mysql.LastSql())
}
//Join 测试
func TestFinAll_Join(t *testing.T) {
mysql := NewMySQL(Host, User, Password, Db)
mysql.Table("user")
mysql.Alias("T1")
mysql.Order("id desc")
mysql.Field([]string{"T1.id", "T1.name", "T1.age", "P.age AS page"})
mysql.Join("LEFT JOIN profile AS P ON T1.id=P.user_id")
ret, err := mysql.FindAll()
log.Println("Test FIND ALL ", ret, err, mysql.LastSql())
}
//插入
func TestInsert(t *testing.T) {
mysql := NewMySQL(Host, User, Password, Db)
keyVal := make(map[string]string)
keyVal["name"] = "samTest32"
keyVal["age"] = "32"
ret, err := mysql.Insert("user", keyVal)
log.Println("Test INSERT", ret, err, mysql.LastSql())
}
//更新
func TestUpdate(t *testing.T) {
mysql := NewMySQL(Host, User, Password, Db)
keyVal := make(map[string]string)
keyVal["name"] = "sam888"
keyVal["age"] = "30"
mysql.AddCon("id", "eq", 1)
ret, err := mysql.Update("user", keyVal)
log.Println(ret, err, mysql.LastSql())
}
//删除
func TestDelete(t *testing.T) {
mysql := NewMySQL(Host, User, Password, Db)
con := mysql.FOP("id", "in", []int{20, 21})
ret, err := mysql.Delete("user", con)
log.Println(ret, err, mysql.LastSql())
}
//原生SQL
func TestRawSql(t *testing.T) {
mysql := NewMySQL(Host, User, Password, Db)
ret, err := mysql.QueryRawSql("SELECT * FROM user where id=?", 1)
log.Println(ret, err, mysql.LastSql())
}
//查询多条数据 between
func TestFindAllBetween(t *testing.T) {
mysql := NewMySQL(Host, User, Password, Db)
mysql.Table("user")
mysql.Alias("T1")
mysql.Order("id desc")
mysql.Field([]string{"T1.id,T1.name,T1.age"})
mysql.AddCon("id", "between", []int{1, 4})
ret, err := mysql.FindAll()
log.Println("Test FIND Between ", ret, err, mysql.LastSql())
}
//查询多条数据 between
func TestFindAllLike(t *testing.T) {
mysql := NewMySQL(Host, User, Password, Db)
mysql.Table("user")
mysql.Alias("T1")
mysql.Order("id desc")
mysql.Field([]string{"T1.id,T1.name,T1.age"})
mysql.AddCon("name", "like", "%hell%")
ret, err := mysql.FindAll()
log.Println("Test FIND LIKE ", ret, err, mysql.LastSql())
}
|
package handlers
import (
"log"
"net/http"
"os"
"github.com/IsaiasMorochi/twitter-clone-backend/middleware"
"github.com/IsaiasMorochi/twitter-clone-backend/routers"
"github.com/gorilla/mux"
"github.com/rs/cors"
)
/*Manejadores seteo mi puerto, el handler y pongo a escuchar el servidor*/
func Manejadores() {
router := mux.NewRouter()
router.HandleFunc("/register", middleware.CheckCnx(routers.PostUser)).Methods("POST")
router.HandleFunc("/login", middleware.CheckCnx(routers.Login)).Methods("POST")
router.HandleFunc("/view-profile", middleware.CheckCnx(middleware.Validate(routers.ViewProfile))).Methods("GET")
router.HandleFunc("/update-profile", middleware.CheckCnx(middleware.Validate(routers.PutUser))).Methods("PUT")
router.HandleFunc("/tweet", middleware.CheckCnx(middleware.Validate(routers.PostTweet))).Methods("POST")
router.HandleFunc("/tweet", middleware.CheckCnx(middleware.Validate(routers.GetTweet))).Methods("GET")
router.HandleFunc("/tweet", middleware.CheckCnx(middleware.Validate(routers.DeleteTweet))).Methods("DELETE")
PORT := os.Getenv("PORT")
if PORT == "" {
PORT = "8080"
}
handler := cors.AllowAll().Handler(router)
log.Fatal(http.ListenAndServe(":"+PORT, handler))
}
|
package types
import (
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"github.com/reed/crypto"
"strconv"
"testing"
)
func TestTx_GenerateID(t *testing.T) {
id1, _ := hex.DecodeString("416c7cfb8a0836d51517b6ae32e0dee579a554f41c10448da847f0888b76881e")
id2, _ := hex.DecodeString("1a946d5a05761732ae162a886a767fdefa3ce3ee66f9cc0352481e0ae751db9c")
id3, _ := hex.DecodeString("f3e023bebab7e9b6f83a94ac4064f0f4e2ea5f67af77f6857a6c0fe9121d359f")
inp1 := &TxInput{
ID: BytesToHash(id1),
ScriptSig: []byte("miner"),
}
inp2 := &TxInput{
ID: BytesToHash(id2),
}
inp3 := &TxInput{
ID: BytesToHash(id3),
}
var inps []TxInput
inps = append(inps, *inp1, *inp2, *inp3)
oid1, _ := hex.DecodeString("4116ac20904e2b01daf6ececf6f0afb960b760bdfce179f3afc3f6c5eea36c82")
oid2, _ := hex.DecodeString("570c9dcf6705794eee3d50025d4f838ef15b87992962247252f23d8a106d9758")
oid3, _ := hex.DecodeString("4c66effcf3d375e28bce54d5ae405d47c721fdd7d135d0ae14e487b7bf58f324")
onp1 := &TxOutput{
ID: BytesToHash(oid1),
}
onp2 := &TxOutput{
ID: BytesToHash(oid2),
}
onp3 := &TxOutput{
ID: BytesToHash(oid3),
}
var onps []TxOutput
onps = append(onps, *onp1, *onp2, *onp3)
emptyTX := &Tx{
}
if _, err := emptyTX.GenerateID(); err == nil {
t.Error("empty inputs would not pass")
}
tx := &Tx{
TxInput: inps,
TxOutput: onps,
}
txId, err := tx.GenerateID()
if err != nil {
t.Error(err)
}
b := bytes.Join([][]byte{
id1, id2, id3, oid1, oid2, oid3,
}, []byte{})
if !bytes.Equal((*txId).Bytes(), crypto.Sha256(b)) {
t.Error("GenerateID error")
}
var txs []Tx
txs = append(txs, *tx)
block := &Block{
Transactions: &txs,
}
incrementExtraNonce(19, block)
marshal, _ := json.Marshal(tx.TxInput[0])
fmt.Printf("%s", marshal)
}
func incrementExtraNonce(extraNonce uint64, cblock *Block) {
txs := *cblock.Transactions
msg := bytes.Join([][]byte{txs[0].TxInput[0].ScriptSig, []byte(strconv.FormatUint(extraNonce,10))}, []byte{})
txs[0].TxInput[0].ScriptSig = msg
}
func mockJustIDTxInput() TxInput {
spend := Spend{
SpendOutputId: BytesToHash(spoutId),
SoureId: BytesToHash(spsrcId),
SourcePos: 0,
Amount: 10,
ScriptPk: scriptPk,
}
return TxInput{
ID: BytesToHash(inpId),
Spend: spend,
ScriptSig: scriptSig,
}
}
|
package clear
import (
"os"
)
const workDir = "/etc/kubernetes"
func Clean() {
os.RemoveAll(workDir)
}
|
package cfmysql
import "net"
//go:generate counterfeiter . NetWrapper
type NetWrapper interface {
Dial(network, address string) (net.Conn, error)
Close(conn net.Conn) error
}
func NewNetWrapper() NetWrapper {
return new(netWrapper)
}
type netWrapper struct{}
func (self *netWrapper) Dial(network, address string) (net.Conn, error) {
return net.Dial(network, address)
}
func (self *netWrapper) Close(conn net.Conn) error {
return conn.Close()
}
|
package flats
import (
"fmt"
"github.com/dgraph-io/experiments/flats/fuids"
flatbuffers "github.com/google/flatbuffers/go"
)
func ToAndFrom() {
}
func ToAndFromProto(uids []uint64) (error, int) {
var ul UidList
ul.Uid = make([]uint64, len(uids))
copy(ul.Uid, uids)
data, err := ul.Marshal()
if err != nil {
return err, 0
}
var nl UidList
if err := nl.Unmarshal(data); err != nil {
return err, 0
}
if len(nl.Uid) != len(ul.Uid) {
return fmt.Errorf("Length doesn't match"), 0
}
for i := 0; i < len(uids); i++ {
if nl.Uid[i] != uids[i] {
return fmt.Errorf("ID doesn't match at index: %v", i), 0
}
}
return nil, len(data)
}
func ToAndFromProtoAlt(uids []uint64) (error, int) {
var ul UidListAlt
ul.Uid = make([]uint64, len(uids))
copy(ul.Uid, uids)
data, err := ul.Marshal()
if err != nil {
return err, 0
}
var nl UidListAlt
if err := nl.Unmarshal(data); err != nil {
return err, 0
}
if len(nl.Uid) != len(ul.Uid) {
return fmt.Errorf("Length doesn't match"), 0
}
for i := 0; i < len(uids); i++ {
if nl.Uid[i] != uids[i] {
return fmt.Errorf("ID doesn't match at index: %v", i), 0
}
}
return nil, len(data)
}
func ToAndFromFlat(uids []uint64) (error, int) {
b := flatbuffers.NewBuilder(0)
fuids.UidListStartUidsVector(b, len(uids))
for i := len(uids) - 1; i >= 0; i-- {
b.PrependUint64(uids[i])
}
ve := b.EndVector(len(uids))
fuids.UidListStart(b)
fuids.UidListAddUids(b, ve)
ue := fuids.UidListEnd(b)
b.Finish(ue)
data := b.FinishedBytes()
nl := fuids.GetRootAsUidList(data, 0)
if nl.UidsLength() != len(uids) {
return fmt.Errorf("Length doesn't match"), 0
}
for i := 0; i < len(uids); i++ {
if nl.Uids(i) != uids[i] {
return fmt.Errorf("ID doesn't match at index: %v Expected: %v. Got: %v",
i, uids[i], nl.Uids(i)), 0
}
}
return nil, len(data)
}
|
package padding
import (
"strings"
"unicode/utf8"
)
// A ZipCode structure
type ZipCode struct {
string
}
// MarshalCSV returns the zip code unchanged
func (zip *ZipCode) MarshalCSV() (string, error) {
return zip.string, nil
}
// UnmarshalCSV accepts a string and unmarshals the string into a zero
// left-padded string
func (zip *ZipCode) UnmarshalCSV(csv string) error {
maxZipCodeLength := 5
paddingString := "0"
currentStringLength := utf8.RuneCountInString(csv)
if currentStringLength < maxZipCodeLength {
zip.string = strings.Repeat(paddingString, maxZipCodeLength-currentStringLength) + csv
} else {
zip.string = csv
}
return nil
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package operator
import (
"regexp"
"strings"
"sync"
"testing"
"github.com/stretchr/testify/require"
)
func TestPipelineAsyncMultiOperators(t *testing.T) {
words := `Bob hiT a ball, the hIt BALL flew far after it was hit.`
var mostCommonWord string
splitter := makeSplitter(words)
lower := makeLower()
trimmer := makeTrimmer()
counter := makeCounter()
collector := makeCollector(&mostCommonWord)
Compose[string](splitter, lower)
Compose[string](lower, trimmer)
Compose[string](trimmer, counter)
Compose[strCnt](counter, collector)
pipeline := NewAsyncPipeline(splitter, lower, trimmer, counter, collector)
require.Equal(t, pipeline.String(), "AsyncPipeline[simpleSource -> simpleOperator(AsyncOp[string, string]) -> simpleOperator(AsyncOp[string, string]) -> simpleOperator(AsyncOp[string, operator.strCnt]) -> simpleSink]")
err := pipeline.Execute()
require.NoError(t, err)
err = pipeline.Close()
require.NoError(t, err)
require.Equal(t, mostCommonWord, "hit")
}
type strCnt struct {
str string
cnt int
}
func makeSplitter(s string) *simpleSource[string] {
ss := strings.Split(s, " ")
src := newSimpleSource(func() string {
if len(ss) == 0 {
return ""
}
ret := ss[0]
ss = ss[1:]
return ret
})
return src
}
func makeLower() *simpleOperator[string, string] {
return newSimpleOperator(strings.ToLower, 3)
}
func makeTrimmer() *simpleOperator[string, string] {
var nonAlphaRegex = regexp.MustCompile(`[^a-zA-Z0-9]+`)
return newSimpleOperator(func(s string) string {
return nonAlphaRegex.ReplaceAllString(s, "")
}, 3)
}
func makeCounter() *simpleOperator[string, strCnt] {
strCntMap := make(map[string]int)
strCntMapMu := sync.Mutex{}
return newSimpleOperator(func(s string) strCnt {
strCntMapMu.Lock()
old := strCntMap[s]
strCntMap[s] = old + 1
strCntMapMu.Unlock()
return strCnt{s, old + 1}
}, 3)
}
func makeCollector(v *string) *simpleSink[strCnt] {
maxCnt := 0
maxMu := sync.Mutex{}
return newSimpleSink(func(sc strCnt) {
maxMu.Lock()
if sc.cnt > maxCnt {
maxCnt = sc.cnt
*v = sc.str
}
maxMu.Unlock()
})
}
|
package http
import (
"../dao"
"../socket"
"../types"
"../utility"
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"github.com/gorilla/sessions"
_ "github.com/mattn/go-sqlite3"
"html/template"
"log"
"math/rand"
"net/http"
"time"
)
/*
There are a lot of work remaining on this
There is next to no security - This is due to just a testing/prototyping phase. Security would need to be implemented before production.
Previously view endpoints were hit and Go templates were executed. Now it is all done client side with AngularJS and partials.
These endpoints need to be audited and cleaned up.
Currently all the client side JS resides in separate controllers etc. The Go binary needs to concat them all and minify.
Also the particular map API vendor needs to be added conditionally not all of them. Nginx config needs to make sure JS requests go through backend.
CSS needs to be minimized too and audited to see what can be removed.
*/
func HttpRouter(BindIP *string) {
Router := mux.NewRouter()
viewRouter := Router.Methods("GET").Subrouter()
actionRouter := Router.Methods("POST").Subrouter()
//Handle web socket traffic specially
Router.HandleFunc("/ws", socketInit)
//View Routes
viewRouter.HandleFunc("/system/settings", views["ViewSettings"].(func(http.ResponseWriter, *http.Request)))
viewRouter.HandleFunc("/system/login", views["ViewLogin"].(func(http.ResponseWriter, *http.Request)))
viewRouter.HandleFunc("/system/license", views["ViewLicense"].(func(http.ResponseWriter, *http.Request)))
viewRouter.HandleFunc("/system/support", views["ViewSupport"].(func(http.ResponseWriter, *http.Request)))
viewRouter.HandleFunc("/system/map", views["ViewMap"].(func(http.ResponseWriter, *http.Request)))
viewRouter.HandleFunc("/system/report", views["ViewReport"].(func(http.ResponseWriter, *http.Request)))
viewRouter.HandleFunc("/", views["ViewInvalid"].(func(http.ResponseWriter, *http.Request)))
//Action Routes
actionRouter.HandleFunc("/system/login", actions["ActionLogin"].(func(http.ResponseWriter, *http.Request)))
actionRouter.HandleFunc("/system/logout", actions["ActionLogout"].(func(http.ResponseWriter, *http.Request)))
actionRouter.HandleFunc("/system/settings", actions["ActionSettings"].(func(http.ResponseWriter, *http.Request)))
actionRouter.HandleFunc("/system/settings/password", actions["ActionSettingsPassword"].(func(http.ResponseWriter, *http.Request)))
actionRouter.HandleFunc("/system/historicalroute", actions["ActionHistorialRoute"].(func(http.ResponseWriter, *http.Request)))
actionRouter.HandleFunc("/", actions["ActionInvalid"].(func(http.ResponseWriter, *http.Request)))
//Use the router
http.Handle("/", Router)
fmt.Printf("\nListening for HTTP on %s", *BindIP)
err := http.ListenAndServe(*BindIP, nil)
if err != nil {
fmt.Printf("\nFailed to listen for http on %s", *BindIP)
log.Fatal("\nError: ", err)
}
}
func socketInit(w http.ResponseWriter, r *http.Request) {
socket.WebSocketInit(w, r, store)
}
//Session information
//get random bytes
var store = sessions.NewCookieStore([]byte("emtec789")) //this needs to be randomized something from /dev/random
var actions = map[string]interface{}{
"ActionInvalid": func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Invalid Action", 403)
},
"ActionLogout": func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/json")
session, _ := store.Get(r, "data")
var user types.User = session.Values["User"].(types.User)
//Update DB
dao.LogOutUser(user.ID)
var hash = utility.GetSocketHash(r, user.FirstName, user.LastName)
socket.WebSocketClose(hash)
//clear cookie
session.Values["User"] = ""
session.Values["Company"] = ""
session.Values["Settings"] = ""
if err := session.Save(r, w); err != nil {
fmt.Printf("Can't save session data (%s)\n", err.Error())
}
fmt.Fprint(w, types.JSONResponse{"success": true, "message": "Log out ok"})
},
"ActionLogin": func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/json")
name := r.FormValue("name")
password := r.FormValue("password")
user, company, settings, errors := dao.LoginUser(name, password)
//TODO IV + salt hash the password and compare against exiting hashed password - This is just testing code so plaintext is OK for now.
if len(errors) == 0 {
session, _ := store.Get(r, "data")
//TODO if this user is currently logged in then log them out
//TODO log out old users who have been logged in more than 24 hours
session.Values["User"] = user
session.Values["Company"] = company
session.Values["Settings"] = settings
session.Options = &sessions.Options{
Path: "/",
MaxAge: 86400, //1 day
}
if err := session.Save(r, w); err != nil {
fmt.Printf("Can't save session data (%s)\n", err.Error())
}
fmt.Fprint(w, types.JSONResponse{"success": true, "message": "Login OK", "user": user, "company": company, "settings": settings})
} else {
fmt.Fprint(w, types.JSONResponse{"success": false, "message": "Login Failed", "errors": Errors})
}
},
"ActionSettingsPassword": func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/json")
session, _ := store.Get(r, "data")
var user types.User = session.Values["User"].(types.User)
var settings types.Settings = session.Values["Settings"].(types.Settings)
var f map[string]interface{}
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&f)
if err != nil {
log.Fatal(err)
}
password := dao.GetPassword(user.ID)
//TODO hash and check against existing hashed password
if password == f["passwordold"] {
//If only Allow admins to reset password is NOT set then update the users password
if settings.SecurityAdminPasswordReset == 0 {
dao.SetPassword(user.ID, f["password"].(string))
} else {
if user.Accesslevel == 10 {
dao.SetPassword(user.ID, f["password"].(string))
}
}
user.Password = f["password"].(string)
session.Values["User"] = user
if err := session.Save(r, w); err != nil {
fmt.Printf("Can't save session data (%s)\n", err.Error())
}
fmt.Fprint(w, types.JSONResponse{"success": "Password Updated"})
} else {
fmt.Fprint(w, types.JSONResponse{"error": "Old Password incorrect"})
}
},
"ActionSettings": func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/json")
session, _ := store.Get(r, "data")
var user types.User = session.Values["User"].(types.User)
var settings types.Settings = session.Values["Settings"].(types.Settings)
var f map[string]interface{}
decoder := json.NewDecoder(r.Body)
err := decoder.Decode(&settings)
if err != nil {
log.Fatal(err)
}
dao.SetSettings(&user, &f)
//Update the cookie too
settings.MapAPI = f["MapAPI"].(string)
//TODO see if I can improve this verbose crappy code
if f["Interpolate"].(bool) {
settings.Interpolate = 1
} else {
settings.Interpolate = 0
}
if f["SnaptoRoad"].(bool) {
settings.SnaptoRoad = 1
} else {
settings.SnaptoRoad = 0
}
settings.CameraPanTrigger = int(f["CameraPanTrigger"].(float64))
settings.MinZoom = int(f["MinZoom"].(float64))
settings.MaxZoom = int(f["MaxZoom"].(float64))
settings.ClubBoundaryKM = int(f["ClubBoundaryKM"].(float64))
if f["RadioCommunication"].(bool) {
settings.RadioCommunication = 1
} else {
settings.RadioCommunication = 0
}
if f["DataCommunication"].(bool) {
settings.DataCommunication = 1
} else {
settings.DataCommunication = 0
}
if f["SecurityRemoteAdmin"].(bool) {
settings.SecurityRemoteAdmin = 1
} else {
settings.SecurityRemoteAdmin = 0
}
if f["SecurityConsoleAccess"].(bool) {
settings.SecurityConsoleAccess = 1
} else {
settings.SecurityConsoleAccess = 0
}
if f["SecurityAdminPasswordReset"].(bool) {
settings.SecurityAdminPasswordReset = 1
} else {
settings.SecurityAdminPasswordReset = 0
}
if f["MobileSmartPhoneAccess"].(bool) {
settings.MobileSmartPhoneAccess = 1
} else {
settings.MobileSmartPhoneAccess = 0
}
if f["MobileShowBusLocation"].(bool) {
settings.MobileShowBusLocation = 1
} else {
settings.MobileShowBusLocation = 0
}
session.Values["Settings"] = settings
if err := session.Save(r, w); err != nil {
fmt.Printf("Can't save session data (%s)\n", err.Error())
}
fmt.Fprint(w, types.JSONResponse{"success": true})
},
"ActionHistorialRoute": func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/json")
//map with string key and slice of string slices
dateFrom := r.FormValue("dateFrom")
dateTo := r.FormValue("dateTo")
Route := dao.GetHistoricalRoute(dateFrom, dateTo)
//This was very problematic. Where the packets were being recorded every second this would repond back with way too much data in the body and client side rendering whould choke.
//There needs to be better heuristics to produce an optimized dataset that is will render quickly.
fmt.Fprint(w, types.JSONResponse{"success": true, "data": Route})
},
}
//Note - Template caching needs to be implemented http://golang.org/doc/articles/wiki/ There is an inefficiency in this code: renderTemplate calls ParseFiles every time a page is rendered.
var views = map[string]interface{}{
"ViewInvalid": func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Invalid view", 403)
},
"ViewLogin": func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/json")
session, _ := store.Get(r, "data")
if session == nil {
http.Error(w, "Unauthorized", 401)
} else {
var user types.User
var company types.Company
var settings types.Settings
user = session.Values["User"].(types.User)
company = session.Values["Company"].(types.Company)
settings = session.Values["Settings"].(types.Settings)
fmt.Fprint(w, types.JSONResponse{"success": true, "message": "Login OK", "user": user, "company": company, "settings": settings})
}
},
"ViewSupport": func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "text/html")
session, err := store.Get(r, "session")
if session == nil {
fmt.Printf("Session is nil \n")
}
if err != nil {
fmt.Printf("Error loading session information %s", err.Error())
}
t, err := template.ParseFiles("templates/support.html")
if err != nil {
log.Fatal("Failed to read the template file for support. Fix it")
}
t.Execute(w, session.Values)
},
"ViewReport": func(w http.ResponseWriter, r *http.Request) {
//TODO redirect to root if not logged in - USE middleware for this later
w.Header().Add("Content-Type", "application/json")
//session, _ := store.Get(r, "session")
var random *rand.Rand = rand.New(rand.NewSource(time.Now().UnixNano())) //new random with unix time nano seconds as seed
var percentAvailable int = random.Intn(75)
availability := [...]int{percentAvailable, 100 - percentAvailable}
kmPerDay := dao.GetKMReport()
fmt.Fprint(w, types.JSONResponse{"Availability": availability, "KMPerDay": kmPerDay})
},
"ViewMap": func(w http.ResponseWriter, r *http.Request) {
//TODO redirect to root if not logged in - USE middleware for this later
w.Header().Add("Content-Type", "text/html")
session, _ := store.Get(r, "session")
fmt.Printf("Session is %s", types.JSONResponse{"session": session})
var err error
t := template.New("Map")
t, err = template.ParseFiles("templates/map.html")
if err != nil {
log.Fatal("Failed to read the template file for map. Fix it")
}
t.Execute(w, session.Values)
},
"ViewLicense": func(w http.ResponseWriter, r *http.Request) {
//TODO redirect to root if not logged in - USE middleware for this later
w.Header().Add("Content-Type", "text/html")
session, _ := store.Get(r, "session")
var err error
t := template.New("License")
t, err = template.ParseFiles("templates/license.html")
if err != nil {
log.Fatal("Failed to read the template file for license. Fix it")
}
t.Execute(w, session.Values)
},
"ViewSettings": func(w http.ResponseWriter, r *http.Request) {
//TODO redirect to root if not logged in - USE middleware for this later
w.Header().Add("Content-Type", "text/html")
session, _ := store.Get(r, "session")
var user types.User = session.Values["User"].(types.User)
mapAPI, interpolate, snaptoroad := dao.GetSettings(&user)
session.Values["Settings"] = map[string]interface{}{
"MapAPI": mapAPI,
"Interpolate": interpolate,
"SnaptoRoad": snaptoroad,
}
session.Save(r, w)
var err error
t := template.New("Settings")
t, err = template.ParseFiles("templates/settings.html")
if err != nil {
fmt.Printf(err.Error())
log.Fatal("\nFailed to parse the template file for settings. Fix it")
}
t.Execute(w, session.Values)
},
}
|
package main
import (
//"compress/gzip" // uncomment this line to use the gzip package
"flag"
"fmt"
//"golang.org/x/net/html" // uncomment this line to use the html package
"log"
"net"
"net/http"
"os"
"sync"
"time"
)
// Global constants
const (
SERVERROR = 500
BADREQ = 400
MAX_OBJ_SIZE = 500*1024
MAX_CACHE_SIZE = 10*1024*1024
)
// Command line parameters
var (
listeningPort uint
dnsPrefetching bool
caching bool
cacheTimeout uint
maxCacheSize uint
maxObjSize uint
linkPrefetching bool
maxConcurrency uint
outputFile string
)
// Channel to synchronize number of prefetch threads
var semConc chan bool
// Stat variables
var (
clientRequests int // HTTP requests
cacheHits int // Cache Hits
cacheMisses int // Cache misses
cacheEvictions int // Cache evictions
trafficSent int // Bytes sent to clients
volumeFromCache int // Bytes sent from the cache
downloadVolume int // Bytes downloaded from servers
)
// RW lock for the stat variables.
// You need to lock the stat variables when updating them.
var statLock sync.RWMutex
func saveStatistics() {
f, err := os.Create(outputFile)
if err != nil {
log.Fatal("Error creating output file", outputFile)
}
start := time.Now()
str := "#Time(s)\tclientRequests\tcacheHits\tcacheMisses\tcacheEvictions" +
"\ttrafficSent\tvolumeFromCache\tdownloadVolume\ttrafficWastage\tcacheEfficiency";
f.WriteString(str)
for {
var cacheEfficiency float64
var trafficWastage int
currentTime := time.Now().Sub(start)
statLock.RLock()
if trafficSent > 0 {
cacheEfficiency = float64(volumeFromCache) / float64(trafficSent)
} else {
cacheEfficiency = 0.0
}
if downloadVolume > trafficSent {
trafficWastage = downloadVolume-trafficSent
} else {
trafficWastage = 0;
}
str := fmt.Sprintf("\n%d\t\t%d\t\t%d\t\t%d\t\t%d\t\t%d\t\t%d\t\t%d\t\t%d\t\t%f",
int(currentTime.Seconds()), clientRequests,
cacheHits, cacheMisses, cacheEvictions,
trafficSent, volumeFromCache, downloadVolume,
trafficWastage, cacheEfficiency)
statLock.RUnlock()
f.WriteString(str)
f.Sync()
time.Sleep(time.Second * 10)
}
}
func doLinkPrefetching(/* Declare the parameters you need. */) {
}
func doDnsPrefetching(/* Declare the parameters you need. */ ) {
}
func handleRequest(w net.Conn) {
defer w.Close()
var resp http.Response
// TODO: Handle http request
if resp.StatusCode == 200 {
if linkPrefetching {
go doLinkPrefetching(/* Pass the parameters you need. */)
} else if dnsPrefetching {
go doDnsPrefetching(/* Pass the parameters you need. */)
}
}
}
func initFlags() {
flag.UintVar(&listeningPort, "port", 8080, "Proxy listening port")
flag.BoolVar(&dnsPrefetching, "dns", false, "Enable DNS prefetching")
flag.BoolVar(&caching, "cache", false, "Enable object caching")
flag.UintVar(&cacheTimeout, "timeout", 120, "Cache timeout in seconds")
flag.UintVar(&maxCacheSize, "max_cache", MAX_CACHE_SIZE, "Maximum cache size")
flag.UintVar(&maxObjSize, "max_obj", MAX_OBJ_SIZE, "Maximum object size")
flag.BoolVar(&linkPrefetching, "link", false, "Enable link prefetching")
flag.UintVar(&maxConcurrency, "max_conc", 10, "Number of threads for link prefetching")
flag.StringVar(&outputFile, "file", "proxy.log", "Output file name")
flag.Parse()
}
func main() {
initFlags()
go saveStatistics()
// TODO: Other initializations
for {
// TODO: Main loop for accepting connections.
//go handleRequest(conn)
}
}
|
package factory
//import (
// "errors"
// "fmt"
//
// "github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp"
// "github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp/pkcs11"
// "github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp/sw"
//)
//
//const (
// PKCS11BasedFactoryName = "PKCS11"
//)
//
//type PKCS11Factory struct{}
//
//func (f *PKCS11Factory) Name() string {
// return PKCS11BasedFactoryName
//}
//
//func (f *PKCS11Factory) Get(config *FactoryOpts) (bccsp.BCCSP, error) {
// if config == nil || config.Pkcs11Opts == nil {
// return nil, errors.New("Invalid config. It must not be nil.")
// }
//
// p11Opts := config.Pkcs11Opts
//
// //TODO: PKCS11 does not need a keystore, but we have not migrated all of PKCS11 BCCSP to PKCS11 yet
// var ks bccsp.KeyStore
// if p11Opts.Ephemeral == true {
// ks = sw.NewDummyKeyStore()
// } else if p11Opts.FileKeystore != nil {
// fks, err := sw.NewFileBasedKeyStore(nil, p11Opts.FileKeystore.KeyStorePath, false)
// if err != nil {
// return nil, fmt.Errorf("Failed to initialize software key store: %s", err)
// }
// ks = fks
// } else {
// ks = sw.NewDummyKeyStore()
// }
// return pkcs11.New(*p11Opts, ks)
//}
|
package mssql
import _ "github.com/denisenkom/go-mssqldb"
|
package post
import (
"database/sql"
"fmt"
"log"
// driver import
_ "github.com/go-sql-driver/mysql"
)
var DB *sql.DB
var TotalPosts int
// connect to db and get all posts at start of the server
func init() {
ConnectDB()
queryString := "select * from post order by id desc"
rows, err := DB.Query(queryString)
if err != nil {
log.Println("Executing function DB.Query while executing function init() at data.go...")
log.Fatalln(err)
}
TotalPosts = 0
defer rows.Close()
for rows.Next() {
TotalPosts++
}
}
func ConnectDB() {
var err error
DB, err = sql.Open("mysql", "username:password@tcp(dbaddress:port)/dbname")
if err != nil {
log.Println("Executing function sql.Open() while executing ConnectDB() at data.go...")
log.Fatalln(err)
}
}
func (p *Post) insert() (err error) {
queryString := "insert into post (title, summary, body) values (?, ?, ?)"
rows, err := DB.Exec(queryString, p.Title, p.Summary, p.Body)
if err != nil {
log.Println("Executing function DB.Exec() while executing insert() in data.go...")
log.Fatalln(err)
return
}
n, err := rows.RowsAffected()
if err != nil {
log.Println("Executing function rows.RowsAffected() while executing insert() in data.go...")
log.Fatalln(err)
return
}
TotalPosts++
fmt.Printf("%d rows affected\n", n)
return
}
func (p *Post) rowToPost(row *sql.Row) (err error) {
err = row.Scan(&p.Id, &p.Title, &p.PublishedDate, &p.Summary, &p.Body)
if err != nil {
log.Println("Executing function row.Scan() while executing rowToPost() in data.go...")
log.Fatalln(err)
}
return
}
func (p *Post) rowsToPost(rows *sql.Rows) (err error) {
err = rows.Scan(&p.Id, &p.Title, &p.PublishedDate, &p.Summary, &p.Body)
if err != nil {
log.Println("Executing function rows.Scan() while executing rowsToPost() in data.go...")
log.Fatalln(err)
}
return
}
func GetFivePosts(offset int) (posts []*Post, err error) {
queryString := "select * from post order by id desc limit ?, 5"
rows, err := DB.Query(queryString, offset*5)
if err != nil {
log.Println("Executing function DB.Query() while executing GetFivePosts() in data.go...")
log.Fatalln(err)
}
defer rows.Close()
for rows.Next() {
result := Post{}
err = result.rowsToPost(rows)
if err != nil {
log.Println("Executing function result.rowsToPost() while executing GetFivePosts() in data.go...")
log.Fatalln(err)
}
posts = append(posts, &result)
}
return
}
func GetPostByTitle(title string) (post Post) {
queryString := "select * from post where title = ?"
row := DB.QueryRow(queryString, title)
post = Post{}
err := post.rowToPost(row)
if err != nil {
log.Println("Executing function post.rowToPost() while executing GetPostByTitle() in data.go...")
log.Fatalln(err)
}
return
}
func GetPostById(id int) (post Post, err error) {
queryString := "select * from post where id = ?"
row := DB.QueryRow(queryString, id)
post = Post{}
err = post.rowToPost(row)
if err != nil {
log.Println("Executing function post.rowToPost() while executing GetPostById() in data.go...")
log.Fatalln(err)
}
return
}
|
package kademlia
import (
"testing"
)
func TestChannels(t *testing.T) {
channels := NewChannelList()
contacts := []Contact{
NewContact(NewKademliaID("FFFFFFFF00000000000000000000000000000000"), "127.0.0.1:8000"),
NewContact(NewKademliaID("FFFFFFFF10000000000000000000000000000000"), "127.0.0.1:8000"),
NewContact(NewKademliaID("FFFFFFFF20000000000000000000000000000000"), "127.0.0.1:8000"),
}
channels.Add(&contacts[0], 5, make(chan Header))
_, err := channels.Find(&contacts[0], 6)
if err == nil {
t.Error("Channel found")
}
channels.Delete(&contacts[0], 5)
_, err = channels.Find(&contacts[0], 5)
if err == nil {
t.Error("Channel found")
}
channels.Add(&contacts[0], 5, make(chan Header))
channels.Add(&contacts[2], 8, make(chan Header))
channels.Add(&contacts[1], 9, make(chan Header))
_, err = channels.Find(&contacts[2], 8)
if err != nil {
t.Error("Channel not found")
}
}
|
package service
import (
"context"
"fmt"
"sync"
"time"
"github.com/geoirb/rss-aggregator/pkg/models"
)
type storage interface {
AddNews(newsField ...string) (err error)
GetNews(ctx context.Context, title *string) (news []models.News, err error)
}
type source interface {
GetDatа(url string) (data []byte, err error)
}
type rss interface {
Parse(data []byte) (rss models.Rss, err error)
}
type filter interface {
News(src []models.News, format string, interval time.Duration) (dst []models.News)
}
type converter interface {
News(src []models.News) (dst [][]string)
}
// Service ...
// @gtg http-server http-errors
type Service interface {
Start()
Shoutdown()
// @gtg http-server-method POST
// @gtg http-server-uri-path /tracking
// @gtg http-server-json-tag url url
// @gtg http-server-json-tag format format
// @gtg http-server-content-type application/json
// @gtg http-server-response-status http.StatusCreated
// @gtg http-server-response-content-type application/json
StartTracking(ctx context.Context, url string, format string) (err error)
// @gtg http-server-method DELETE
// @gtg http-server-uri-path /tracking
// @gtg http-server-json-tag url url
// @gtg http-server-content-type application/json
// @gtg http-server-response-status http.StatusOK
// @gtg http-server-response-content-type application/json
StopTracking(ctx context.Context, url string) (err error)
// @gtg http-server-method GET
// @gtg http-server-uri-path /news
// @gtg http-server-query title={title}
// @gtg http-server-response-status http.StatusOK
// @gtg http-server-response-json-tag news news
// @gtg http-server-response-content-type application/json
GetNews(ctx context.Context, title *string) (news []models.News, err error)
}
type service struct {
source source
rss rss
filter filter
converter converter
storage storage
cache sync.Map
interval time.Duration
newsChan chan [][]string
}
func (s *service) Start() {
go func() {
for news := range s.newsChan {
for _, n := range news {
s.storage.AddNews(n...)
}
}
}()
}
func (s *service) Shoutdown() {
s.cache.Range(func(k, v interface{}) bool {
v.(context.CancelFunc)()
return true
})
close(s.newsChan)
}
func (s *service) StartTracking(ctx context.Context, url, format string) (err error) {
if _, isExist := s.cache.Load(url); isExist {
err = fmt.Errorf("%s is exist", url)
return
}
trackingCtx, trackingCnl := context.WithCancel(context.Background())
go s.tracking(trackingCtx, url, format)
s.cache.Store(url, trackingCnl)
return
}
func (s *service) StopTracking(ctx context.Context, url string) (err error) {
var (
cancel interface{}
isExist bool
)
if cancel, isExist = s.cache.Load(url); !isExist {
err = fmt.Errorf("%s is not exist", url)
return
}
cancel.(context.CancelFunc)()
s.cache.Delete(url)
return
}
func (s *service) GetNews(ctx context.Context, title *string) (news []models.News, err error) {
news, err = s.storage.GetNews(ctx, title)
return
}
func (s *service) tracking(ctx context.Context, url, format string) {
t := time.NewTicker(s.interval)
s.getNews(url, format)
for {
select {
case <-t.C:
s.getNews(url, format)
case <-ctx.Done():
t.Stop()
return
}
}
}
func (s *service) getNews(url, format string) {
if data, err := s.source.GetDatа(url); err == nil {
if rss, err := s.rss.Parse(data); err != nil {
news := s.filter.News(rss.News, format, s.interval)
s.newsChan <- s.converter.News(news)
}
}
}
// NewService ...
func NewService(
source source,
rss rss,
filter filter,
converter converter,
storage storage,
interval time.Duration,
) Service {
return &service{
source: source,
rss: rss,
filter: filter,
converter: converter,
storage: storage,
interval: interval,
newsChan: make(chan [][]string, 1),
}
}
|
package core
import (
"fmt"
"github.com/comdeng/HapGo/hapgo/logger"
"reflect"
"strings"
"sync"
)
type Controller struct {
Name string
Method string
Req *HttpRequest
Res *HttpResponse
}
type controllerInfo struct {
Typ reflect.Type
Name string
Methods map[string]bool
}
func (ctrl *Controller) Write(str string) {
fmt.Fprint(ctrl.Res.Writer, str)
}
var controllers map[string]*controllerInfo = make(map[string]*controllerInfo)
var filterMethods map[string]bool
var once sync.Once
func RegisterController(ctlName string, c interface{}) {
once.Do(func() {
initFilterMethods(new(Controller))
})
typ := reflect.TypeOf(c)
num := typ.NumMethod()
methods := make(map[string]bool)
for i := 0; i < num; i++ {
name := typ.Method(i).Name
if _, ok := filterMethods[name]; !ok {
methods[name] = true
}
}
controllers[ctlName] = &controllerInfo{
typ,
ctlName,
methods,
}
}
func initFilterMethods(ctrl *Controller) {
typ := reflect.TypeOf(ctrl)
num := typ.NumMethod()
filterMethods = make(map[string]bool)
for i := 0; i < num; i++ {
filterMethods[typ.Method(i).Name] = true
}
}
func NewController(ctlName string, methodName string, req *HttpRequest, res *HttpResponse) (ctrl reflect.Value) {
ctlName = strings.ToLower(ctlName)
var ci *controllerInfo
var ok bool
if ci, ok = controllers[ctlName]; !ok {
logger.Warn("controllerNotFound controllerName=" + ctlName)
panic("hapgo.u_notfound")
}
methodName = strings.Title(methodName)
if _, ok = ci.Methods[methodName]; !ok {
logger.Warn("methodNotFound methodName=" + methodName)
panic("hapgo.u_notfound")
}
ctrl = reflect.New(ci.Typ.Elem())
coreCtrl := &Controller{ctlName, methodName, req, res}
// 必须将core.Controller 作为第一个参数
ctrl.Elem().Field(0).Set(reflect.ValueOf(coreCtrl))
return ctrl
}
func CallMethod(ctrl reflect.Value, methodName string, args ...interface{}) {
methodName = strings.Title(methodName)
method := ctrl.MethodByName(methodName)
if method.IsNil() || !method.IsValid() {
logger.Warn("hapgo.methodNotFound methodName=" + methodName)
panic("hapgo.u_notfound")
}
params := make([]reflect.Value, len(args))
for k, v := range args {
params[k] = reflect.ValueOf(v)
}
method.Call(params)
}
|
package main
import (
"container/list"
"math"
)
type treePair struct {
node *TreeNode
level int
}
func isEvenOddTree(root *TreeNode) bool {
if root == nil {
return true
}
l := list.New()
l.PushBack(&treePair{
node: root,
level: 0,
})
for l.Len() > 0 {
size := l.Len()
preMin := math.MinInt32
preMax := math.MaxInt32
for i := 0; i < size; i++ {
p := l.Front().Value.(*treePair)
l.Remove(l.Front())
if p.level%2 == 0 {
if p.node.Val%2 == 0 {
return false
}
if p.node.Val <= preMin {
return false
}
preMin = p.node.Val
} else {
if p.node.Val%2 == 1 {
return false
}
if p.node.Val >= preMax {
return false
}
preMax = p.node.Val
}
if p.node.Left != nil {
l.PushBack(&treePair{
node: p.node.Left,
level: p.level + 1,
})
}
if p.node.Right != nil {
l.PushBack(&treePair{
node: p.node.Right,
level: p.level + 1,
})
}
}
}
return true
}
|
package set
import (
"github.com/loginradius/lr-cli/cmd/set/accountPassword"
"github.com/loginradius/lr-cli/cmd/set/domain"
"github.com/loginradius/lr-cli/cmd/set/email"
"github.com/loginradius/lr-cli/cmd/set/site"
"github.com/loginradius/lr-cli/cmd/set/theme"
"github.com/spf13/cobra"
)
func NewsetCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "set",
Short: "set command",
Long: `This commmand acts as a base command for set subcommands`,
}
siteCmd := site.NewSiteCmd()
cmd.AddCommand(siteCmd)
themeCmd := theme.NewThemeCmd()
cmd.AddCommand(themeCmd)
domainCmd := domain.NewdomainCmd()
cmd.AddCommand((domainCmd))
emailCmd := email.NewemailCmd()
cmd.AddCommand(emailCmd)
accountPasswordCmd := accountPassword.NewaccountPasswordCmd()
cmd.AddCommand(accountPasswordCmd)
return cmd
}
|
package main
import (
"flag"
"fmt"
"colorize"
"id"
"log"
"net/http"
"encoding/json"
"tasks"
"os"
"path/filepath"
"strings"
"strconv"
)
func RequestHanler(w http.ResponseWriter, r *http.Request) {
var err error
guid, _ := id.Next()
w.Header().Set("X-REQUEST-ID", fmt.Sprintln(guid))
if r.Method == "GET" {
http.Error(w, "", http.StatusMethodNotAllowed)
return
}
if r.Method == "POST" {
var body map[string]interface{}
dec := json.NewDecoder(r.Body)
defer r.Body.Close()
if err = dec.Decode(&body); err != nil {
http.Error(w, "Error to parse the post body. Pls be json format first, then contact with maigoxin@gmail.com", http.StatusBadRequest)
colorize.Err(err.Error())
return
}else {
if task, err := tasks.NewTask(guid, body); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
}else {
tasks.Run(task)
}
return
}
}
}
func init() {
filepath.Walk("./data/", func(path string, f os.FileInfo, err error)(error) {
if f == nil {
return err
}
if f.IsDir() {
return nil
}
if path == "data/statistic" {
return nil
}
file, err := os.OpenFile(path, os.O_RDONLY, 0666)
if err != nil {
return err
}
defer file.Close()
dec := json.NewDecoder(file)
var data map[string]interface{}
if err = dec.Decode(&data); err != nil {
return err
}
splits := strings.Split(path, "/")
guid := splits[len(splits) - 1]
int64Id, _ := strconv.ParseInt(guid, 10, 64)
if task, err := tasks.NewTask(int64Id, data[`Raw`].(map[string]interface{})); err == nil {
tasks.Run(task)
}
return nil
})
}
func main() {
var port = flag.Int("port", 10200, `port, default is 10200`)
var isDebug = flag.Bool("debug", true, `debug, default is false`)
flag.Parse()
colorize.IsDebug = *isDebug
colorize.Info(`going to run :%d`, *port)
http.HandleFunc("/", RequestHanler)
log.Fatal(http.ListenAndServe(fmt.Sprintf("127.0.0.1:%d", *port), nil))
}
|
package externallogger
type UpdateChannel <-chan *update
func (up UpdateChannel) Clear() {
for len(up) != 0 {
<-up
}
}
type UpdateConfig struct {
Offset int
Limit int
Timeout int
}
func GenerateUpdateConfig(offset int) *UpdateConfig {
return &UpdateConfig{
Offset: offset,
Limit: 0,
Timeout: 30,
}
}
type response struct {
Ok bool `json:"ok"`
Result []*update `json:"result"`
}
type update struct {
UpdateId int `json:"update_id"`
Message message `json:"message"`
}
type message struct {
MessageId int `json:"message_id"`
From *user `json:"from"`
Chat *chat `json:"chat"`
Date int64 `json:"date"`
Text string `json:"text"`
}
type user struct {
Id int `json:"id"`
IsBot bool `json:"is_bot"`
FirstName string `json:"first_name"`
Username string `json:"username"`
Language string `json:"language_code"`
}
type chat struct {
Id int `json:"id"`
FirstName string `json:"first_name"`
Username string `json:"username"`
Type string `json:"type"`
}
|
package routers
import (
"github.com/majid-cj/go-docker-mongo/apps"
"github.com/majid-cj/go-docker-mongo/domain/entity"
"github.com/majid-cj/go-docker-mongo/util"
"github.com/kataras/iris/v12"
)
// VerifyCodeRouter ...
type VerifyCodeRouter struct {
vca apps.VerifyCodeAppInterface
}
// NewVerifyCodeRouter ...
func NewVerifyCodeRouter(vca apps.VerifyCodeAppInterface) *VerifyCodeRouter {
return &VerifyCodeRouter{
vca: vca,
}
}
// NewVerifyCode ...
func (vcr *VerifyCodeRouter) NewVerifyCode(c iris.Context) {
var verifycode entity.VerificationCode
err := c.ReadJSON(&verifycode)
if err != nil {
util.ResponseT(util.GetError("error_parsing_data"), iris.StatusBadRequest, c)
return
}
verifycode.PrepareVerificationCode(verifycode.Member, verifycode.CodeType)
code, err := vcr.vca.CreateVerificationCode(&verifycode)
if err != nil {
util.ResponseT(err, iris.StatusBadRequest, c)
return
}
util.Response(code, iris.StatusCreated, c)
}
// VerificationCodeFromEmail ...
func (vcr *VerifyCodeRouter) VerificationCodeFromEmail(c iris.Context) {
var verifycode entity.VerificationCode
err := c.ReadJSON(&verifycode)
if err != nil {
util.ResponseT(util.GetError("error_parsing_data"), iris.StatusBadRequest, c)
return
}
_, err = vcr.vca.CreateVerificationCodeFromEmail(&verifycode)
if err != nil {
util.ResponseT(err, iris.StatusBadRequest, c)
return
}
util.Response(nil, iris.StatusCreated, c)
}
// ResetPasswordVerifyCode ...
func (vcr *VerifyCodeRouter) ResetPasswordVerifyCode(c iris.Context) {
var verifycode entity.VerificationCode
err := c.ReadJSON(&verifycode)
if err != nil {
util.ResponseT(util.GetError("error_parsing_data"), iris.StatusBadRequest, c)
return
}
err = vcr.vca.ResetPassword(&verifycode)
if err != nil {
util.ResponseT(err, iris.StatusBadRequest, c)
return
}
util.Response(nil, iris.StatusOK, c)
}
// CheckVerifyCode ...
func (vcr *VerifyCodeRouter) CheckVerifyCode(c iris.Context) {
var verifyCode entity.VerificationCode
err := c.ReadJSON(&verifyCode)
if err != nil {
util.ResponseT(util.GetError("error_parsing_data"), iris.StatusUnprocessableEntity, c)
return
}
err = vcr.vca.CheckVerificationCode(&verifyCode)
if err != nil {
util.ResponseT(err, iris.StatusBadRequest, c)
return
}
util.Response(nil, iris.StatusOK, c)
}
// RenewVerifyCode ...
func (vcr *VerifyCodeRouter) RenewVerifyCode(c iris.Context) {
var verifycode entity.VerificationCode
err := c.ReadJSON(&verifycode)
if err != nil {
util.ResponseT(util.GetError("error_parsing_data"), iris.StatusBadRequest, c)
return
}
code, err := vcr.vca.RenewVerificationCode(&verifycode)
if err != nil {
util.ResponseT(err, iris.StatusBadRequest, c)
return
}
util.Response(code, iris.StatusCreated, c)
}
|
package frac
// Fraction type.
// See https://en.wikipedia.org/wiki/Fraction_%28mathematics%29.
type Frac struct {
Num int
Den int
}
|
package main
import (
"github.com/Microkubes/jwt-issuer/app"
"github.com/keitaroinc/goa"
)
// JWTController implements the jwt resource.
type JWTController struct {
*goa.Controller
}
// NewJWTController creates a jwt controller.
func NewJWTController(service *goa.Service) *JWTController {
return &JWTController{Controller: service.NewController("JWTController")}
}
// Signin runs the signin action.
func (c *JWTController) Signin(ctx *app.SigninJWTContext) error {
// JWTController_Signin: start_implement
// Put your logic here
// JWTController_Signin: end_implement
return nil
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/hyperledger/fabric/core/chaincode/shim"
pb "github.com/hyperledger/fabric/protos/peer"
)
func getChaincodeID(stub shim.ChaincodeStubInterface) (string, error) {
sp, err := stub.GetSignedProposal()
if err != nil {
return "", err
}
proposal := &pb.Proposal{}
if err := proto.Unmarshal(sp.ProposalBytes, proposal); err != nil {
return "", err
}
payload := &pb.ChaincodeProposalPayload{}
if err := proto.Unmarshal(proposal.Payload, payload); err != nil {
return "", err
}
spec := &pb.ChaincodeInvocationSpec{}
if err := proto.Unmarshal(payload.Input, spec); err != nil {
return "", err
}
return getKey(stub.GetChannelID(), spec.ChaincodeSpec.ChaincodeId.Name), nil
}
func getKey(channel, chaincodeName string) string {
return channel + delimiter + chaincodeName
}
func onlyBroker(stub shim.ChaincodeStubInterface) bool {
brokerCCID := channelID + delimiter + brokerContractName
invoker, err := getChaincodeID(stub)
if err != nil {
fmt.Printf("get Invoker failed: %s", err.Error())
return false
}
return brokerCCID == invoker
}
// putMap for persisting meta state into ledger
func (transaction *Transaction) putMap(stub shim.ChaincodeStubInterface, metaName string, meta map[string]uint64) error {
if meta == nil {
return nil
}
metaBytes, err := json.Marshal(meta)
if err != nil {
return err
}
return stub.PutState(metaName, metaBytes)
}
func (transaction *Transaction) getMap(stub shim.ChaincodeStubInterface, metaName string) (map[string]uint64, error) {
metaBytes, err := stub.GetState(metaName)
if err != nil {
return nil, err
}
meta := make(map[string]uint64)
if metaBytes == nil {
return meta, nil
}
if err := json.Unmarshal(metaBytes, &meta); err != nil {
return nil, err
}
return meta, nil
}
func (transaction *Transaction) setAppchainsMeta(stub shim.ChaincodeStubInterface, appchains map[string]Appchain) error {
appchainsBytes, err := json.Marshal(appchains)
if err != nil {
return err
}
return stub.PutState(appChainsMeta, appchainsBytes)
}
func (transaction *Transaction) getAppchainsMeta(stub shim.ChaincodeStubInterface) (map[string]Appchain, error) {
appchainsBytes, err := stub.GetState(appChainsMeta)
if err != nil {
return nil, err
}
appchains := make(map[string]Appchain)
if err := json.Unmarshal(appchainsBytes, &appchains); err != nil {
return nil, err
}
return appchains, nil
}
func (transaction *Transaction) setRemoteWhiteListMeta(stub shim.ChaincodeStubInterface, remoteWhiteList map[string][]string) error {
remoteWhiteListBytes, err := json.Marshal(remoteWhiteList)
if err != nil {
return err
}
return stub.PutState(remoteWhiteListMeta, remoteWhiteListBytes)
}
func (transaction *Transaction) getRemoteWhiteListMeta(stub shim.ChaincodeStubInterface) (map[string][]string, error) {
remoteWhiteListBytes, err := stub.GetState(remoteWhiteListMeta)
if err != nil {
return nil, err
}
remoteWhiteList := make(map[string][]string)
if err := json.Unmarshal(remoteWhiteListBytes, &remoteWhiteList); err != nil {
return nil, err
}
return remoteWhiteList, nil
}
func (transaction *Transaction) setStartTimeStampMeta(stub shim.ChaincodeStubInterface, startTimestamp map[string]timestamp.Timestamp) error {
startTimestampBytes, err := json.Marshal(startTimestamp)
if err != nil {
return err
}
return stub.PutState(startTimestampMeta, startTimestampBytes)
}
func (transaction *Transaction) getStartTimeStampMeta(stub shim.ChaincodeStubInterface) (map[string]timestamp.Timestamp, error) {
startTimestampBytes, err := stub.GetState(startTimestampMeta)
if err != nil {
return nil, err
}
startTimestamp := make(map[string]timestamp.Timestamp)
if err := json.Unmarshal(startTimestampBytes, &startTimestamp); err != nil {
return nil, err
}
return startTimestamp, nil
}
func (transaction *Transaction) genRemoteFullServiceID(chainID string, serviceID string) string {
return colon + chainID + colon + serviceID
}
func (transaction *Transaction) genIBTPid(from string, to string, id string) string {
return from + hyphen + to + hyphen + id
}
type response struct {
OK bool `json:"ok"`
Message string `json:"message"`
Data []byte `json:"data"`
}
func errorResponse(msg string) pb.Response {
res := &response{
OK: false,
Message: msg,
}
data, err := json.Marshal(res)
if err != nil {
panic(err)
}
return shim.Error(string(data))
}
|
package service
import (
"net/http"
"strings"
"github.com/GXK666/eosTransfer/service/general"
"github.com/GXK666/eosTransfer/transfer"
"net"
"context"
"time"
"github.com/gogo/gateway"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/kazegusuri/grpc-panic-handler"
"github.com/spf13/viper"
"google.golang.org/grpc"
)
func Serve() {
setupLog()
panichandler.InstallPanicHandler(LogPanicHandler)
// Protect GRPC from aborting by panic
opts := []grpc.ServerOption{
grpc.UnaryInterceptor(panichandler.UnaryPanicHandler),
grpc.StreamInterceptor(panichandler.StreamPanicHandler),
}
grpcServer := grpc.NewServer(opts...)
general.RegisterServiceServer(grpcServer, transfer.Server)
mux := http.NewServeMux()
addr := viper.GetString("rpc.addr")
endpoint := addr
parts := strings.Split(addr, ":")
if parts[0] == "0.0.0.0" {
endpoint = "127.0.0.1:" + parts[1]
}
dialOpts := []grpc.DialOption{grpc.WithInsecure()}
jsonpb := &gateway.JSONPb{
EmitDefaults: true,
Indent: " ",
OrigName: true,
}
gwmux := runtime.NewServeMux(
runtime.WithMarshalerOption(runtime.MIMEWildcard, jsonpb),
// This is necessary to get error details properly
// marshalled in unary requests.
runtime.WithProtoErrorHandler(runtime.DefaultHTTPProtoErrorHandler),
)
go func() {
time.Sleep(time.Second) // Avoid immediate connection failure
err := general.RegisterServiceHandlerFromEndpoint(context.Background(), gwmux, endpoint, dialOpts)
if err != nil {
panic(err)
}
}()
mux.Handle("/", gwmux)
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
grpcServer.ServeHTTP(w, r)
} else {
mux.ServeHTTP(w, r)
}
})
srv := &http.Server{
Addr: addr,
Handler: handler,
}
conn, err := net.Listen("tcp", addr)
if err != nil {
panic(err)
}
err = srv.Serve(conn)
if err != nil {
panic(err)
}
return
}
|
package utils
import (
"crypto/sha1"
"encoding/hex"
)
func Sha1(str string) string {
encryptInst := sha1.New()
encryptInst.Write([]byte(str))
res := hex.EncodeToString(encryptInst.Sum(nil))
return res
}
|
package saml
import (
"time"
)
func tearUp() {
fakeNow := time.Now()
Now = func() time.Time {
return fakeNow
}
NewID = func() string {
return "id-MOCKID"
}
}
|
package main
import (
"fmt"
"github.com/gocc/io"
)
func main() {
in := io.Input{}
in.ReadSourceFile("test.txt")
fmt.Printf("%d\n", in.Size)
var i int64
fmt.Printf("%s\n", string(in.Buf))
for i=0; i<in.Size; i++ {
fmt.Printf("%s\n", string(in.Buf[i]))
}
}
|
package domain
type Movie struct {
ID int
Title string
Language string
}
func (movie Movie) ValidTitle() bool {
return movie.Title != ""
}
func (movie Movie) ValidLanguage() bool {
return movie.Language != ""
}
type IContent interface {
ValidTitle() bool
ValidLanguage() bool
}
|
package cmd
import (
"fmt"
"os"
"github.com/hspazio/mint/configurations"
"github.com/spf13/cobra"
)
var configCmd = &cobra.Command{
Use: "config",
Short: "Read/write configurations",
Long: "Read/write configurations",
Run: func(cmd *cobra.Command, args []string) {
switch len(args) {
case 0:
conf, err := configurations.GetAll()
if err != nil {
fmt.Println("no configurations found, run 'mint init' command")
os.Exit(1)
}
fmt.Println(conf)
case 2:
if err := configurations.Set(args[0], args[1]); err != nil {
exit("%v", err)
}
default:
fmt.Println("mint config - prints all configurations")
fmt.Println("mint config dir /path/to/dir - sets a conf value")
}
},
}
|
package merkle
import (
"crypto/sha256"
"crypto/sha512"
"fmt"
"hash"
"io"
"io/ioutil"
"os"
"testing"
)
func TestMerkleHashWriterLargeChunk(t *testing.T) {
// make a large enough test file of increments, corresponding to our blockSize
bs := 512 * 1024
fh, err := ioutil.TempFile("", "merkleChunks.")
if err != nil {
t.Fatal(err)
}
defer fh.Close()
defer os.Remove(fh.Name())
for i := 0; i < 5; i++ {
b := []byte{byte(i)}
var b2 []byte
for j := 0; j < bs; j++ {
b2 = append(b2, b...)
}
fh.Write(b2)
}
if err := fh.Sync(); err != nil {
t.Fatal(err)
}
if _, err := fh.Seek(0, 0); err != nil {
t.Fatal(err)
}
expectedSums := []string{
"6a521e1d2a632c26e53b83d2cc4b0edecfc1e68c", // 0's
"316c136d75ffdeb6ac5f1262c45dd8c6ec50fd85", // 1's
"a56e9c245b9c50d61a91c6c4299813b5e6313722", // 2's
"58bed752c036310cc48d9dd0d25c4ee9ad0d7ff1", // 3's
"bf382d8394213b897424803c27f3e2ec2223e5fd", // 4's
}
h := NewHash(DefaultHashMaker, bs)
if _, err = io.Copy(h, fh); err != nil {
t.Fatal(err)
}
h.Sum(nil)
for i, node := range h.Nodes() {
c, err := node.Checksum()
if err != nil {
t.Fatal(err)
}
if cs := fmt.Sprintf("%x", c); cs != expectedSums[i] {
t.Errorf("expected sum %q; got %q", expectedSums[i], cs)
}
}
}
func TestMerkleHashWriter(t *testing.T) {
msg := []byte("the quick brown fox jumps over the lazy dog")
expectedSum := "48940c1c72636648ad40aa59c162f2208e835b38"
h := NewHash(DefaultHashMaker, 10)
i, err := h.Write(msg)
if err != nil {
t.Fatal(err)
}
if i != len(msg) {
t.Fatalf("expected to write %d, only wrote %d", len(msg), i)
}
// We're left with a partial lastBlock
expectedNum := 4
if len(h.Nodes()) != expectedNum {
t.Errorf("expected %d nodes, got %d", expectedNum, len(h.Nodes()))
}
// Next test Sum()
gotSum := fmt.Sprintf("%x", h.Sum(nil))
if expectedSum != gotSum {
t.Errorf("expected initial checksum %q; got %q", expectedSum, gotSum)
}
// count blocks again, we should get 5 nodes now
expectedNum = 5
if len(h.Nodes()) != expectedNum {
t.Errorf("expected %d nodes, got %d", expectedNum, len(h.Nodes()))
}
// Test Sum() again, ensure same sum
gotSum = fmt.Sprintf("%x", h.Sum(nil))
if expectedSum != gotSum {
t.Errorf("expected checksum %q; got %q", expectedSum, gotSum)
}
// test that Reset() nulls us out
h.Reset()
gotSum = fmt.Sprintf("%x", h.Sum(nil))
if expectedSum == gotSum {
t.Errorf("expected reset checksum to not equal %q; got %q", expectedSum, gotSum)
}
// write our msg again and get the same sum
i, err = h.Write(msg)
if err != nil {
t.Fatal(err)
}
if i != len(msg) {
t.Fatalf("expected to write %d, only wrote %d", len(msg), i)
}
// Test Sum(), ensure same sum
gotSum = fmt.Sprintf("%x", h.Sum(nil))
if expectedSum != gotSum {
t.Errorf("expected checksum %q; got %q", expectedSum, gotSum)
}
// Write more. This should pop the last node, and use the lastBlock.
i, err = h.Write(msg)
if err != nil {
t.Fatal(err)
}
if i != len(msg) {
t.Fatalf("expected to write %d, only wrote %d", len(msg), i)
}
expectedNum = 9
if len(h.Nodes()) != expectedNum {
t.Errorf("expected %d nodes, got %d", expectedNum, len(h.Nodes()))
}
gotSum = fmt.Sprintf("%x", h.Sum(nil))
if expectedSum == gotSum {
t.Errorf("expected reset checksum to not equal %q; got %q", expectedSum, gotSum)
}
if len(h.Nodes()) != expectedNum {
t.Errorf("expected %d nodes, got %d", expectedNum, len(h.Nodes()))
}
}
var benchDefault = NewHash(DefaultHashMaker, 8192)
var benchSha256 = NewHash(func() hash.Hash { return sha256.New() }, 8192)
var benchSha512 = NewHash(func() hash.Hash { return sha512.New() }, 8192)
var buf = make([]byte, 8192)
func benchmarkSize(bench hash.Hash, b *testing.B, size int) {
b.SetBytes(int64(size))
sum := make([]byte, bench.Size())
for i := 0; i < b.N; i++ {
bench.Reset()
bench.Write(buf[:size])
bench.Sum(sum[:0])
}
}
func BenchmarkHash8Bytes(b *testing.B) {
benchmarkSize(benchDefault, b, 8)
}
func BenchmarkHash1K(b *testing.B) {
benchmarkSize(benchDefault, b, 1024)
}
func BenchmarkHash8K(b *testing.B) {
benchmarkSize(benchDefault, b, 8192)
}
func BenchmarkSha256Hash8Bytes(b *testing.B) {
benchmarkSize(benchSha256, b, 8)
}
func BenchmarkSha256Hash1K(b *testing.B) {
benchmarkSize(benchSha256, b, 1024)
}
func BenchmarkSha256Hash8K(b *testing.B) {
benchmarkSize(benchSha256, b, 8192)
}
func BenchmarkSha512Hash8Bytes(b *testing.B) {
benchmarkSize(benchSha512, b, 8)
}
func BenchmarkSha512Hash1K(b *testing.B) {
benchmarkSize(benchSha512, b, 1024)
}
func BenchmarkSha512Hash8K(b *testing.B) {
benchmarkSize(benchSha512, b, 8192)
}
|
package main
import (
"flag"
"fmt"
"io"
"os"
"strings"
)
const (
// Production Values
API_URL = "http://api.dnsmadeeasy.com/V1.2"
// Development Values
//API_URL = "http://api.sandbox.dnsmadeeasy.com/V1.2"
)
var (
api_url string
api_key string
secret_key string
outputType string
debug bool
requestsRemaining int
)
var commands = []*Command{
listDomains,
infoDomain,
addNewDomain,
delDomain,
listSecondaries,
infoSecondary,
addNewSecondary,
delSecondary,
records,
record,
addRecord,
updateRecord,
deleteRecord,
importData,
exportData,
/*
addRecord,
search, */
}
// A Command is an implementation of a go command
// like go build or go fix.
type Command struct {
// Run runs the command.
// The args are the arguments after the command name.
Run func(cmd *Command, args []string) error
// UsageLine is the one-line usage message.
// The first word in the line is taken to be the command name.
UsageLine string
// Short is the short description shown in the 'dnsme help' output.
Short string
// Long is the long message shown in the 'dnsme help <this-command>' output.
Long string
// Flag is a set of flags specific to this command.
Flag flag.FlagSet
// CustomFlags indicates that the command will do its own
// flag parsing.
CustomFlags func(cmd *flag.FlagSet)
}
// Name returns the command's name: the first word in the usage line.
func (c *Command) Name() string {
name := c.UsageLine
i := strings.Index(name, " ")
if i >= 0 {
name = name[:i]
}
return name
}
func (c *Command) Usage() {
fmt.Fprintf(os.Stderr, "usage: %s\n\n", c.UsageLine)
fmt.Fprintf(os.Stderr, "%s\n", strings.TrimSpace(c.Long))
os.Exit(2)
}
func (c *Command) Runnable() bool {
return c.Run != nil
}
func main() {
flag.Usage = usage
flag.Parse()
args := flag.Args()
if len(args) < 1 {
usage()
return
}
if args[0] == "help" {
help(args[1:])
return
}
api_url = os.Getenv("DNSME_API_URL")
if api_url == "" {
api_url = API_URL
}
api_key = os.Getenv("DNSME_API_KEY")
if api_key == "" {
fmt.Fprint(os.Stderr, "DNSME_API_KEY environment variable is not set\n")
os.Exit(1)
}
secret_key = os.Getenv("DNSME_SECRET_KEY")
if secret_key == "" {
fmt.Fprint(os.Stderr, "DNSME_SECRET_KEY environment variable is not set\n")
os.Exit(1)
}
for _, cmd := range commands {
if cmd.Name() == args[0] && cmd.Run != nil {
addGlobalFlags(&cmd.Flag)
cmd.Flag.Usage = func() { cmd.Usage() }
if cmd.CustomFlags != nil {
cmd.CustomFlags(&cmd.Flag)
}
// args = args[1:]
// } else {
cmd.Flag.Parse(args[1:])
args = cmd.Flag.Args()
// }
err := cmd.Run(cmd, args)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
return
}
}
fmt.Fprintf(os.Stderr, "Unknown command %#q\n\n", args[0])
usage()
}
func addGlobalFlags(fs *flag.FlagSet) {
fs.StringVar(&outputType, "o", "std", "Output type (std, json, csv)")
fs.BoolVar(&debug, "d", false, "Debug output")
}
func printUsage(w io.Writer) {
tmpl(w, usageTemplate, commands)
}
func usage() {
printUsage(os.Stderr)
os.Exit(2)
}
// help implements the 'help' command.
func help(args []string) {
if len(args) == 0 {
printUsage(os.Stdout)
// not exit 2: succeeded at 'dnsme help'.
return
}
if len(args) != 1 {
fmt.Fprintf(os.Stderr, "usage: dnsme help command\n\nToo many arguments given.\n")
os.Exit(2) // failed at 'dnsme help'
}
arg := args[0]
for _, cmd := range commands {
if cmd.Name() == arg {
tmpl(os.Stdout, helpTemplate, cmd)
// not exit 2: succeeded at 'dnsme help cmd'.
return
}
}
fmt.Fprintf(os.Stderr, "Unknown help topic %#q. Run 'dnsme help'.\n", arg)
os.Exit(2) // failed at 'dnsme help cmd'
}
/*
func printOutput(r *http.Response) {
io.Copy(os.Stdout, r.Body)
fmt.Println()
}
*/
|
package main
import (
"fmt"
"log"
"net/http"
"strings"
)
func sayhelloName(w http.ResponseWriter, r *http.Request) {
r.ParseForm() // разбор аргументов, необходимо вызвать самостоятельно
fmt.Println(r.Form) // печать данных формы на стороне сервера
fmt.Println("path", r.URL.Path)
fmt.Println("scheme", r.URL.Scheme)
fmt.Println(r.Form["url_long"])
for k, v:= range r.Form {
fmt.Println("key:", k)
fmt.Println("val:", strings.Join(v, ""))
}
fmt.Println(r.Form["lol"])
for l:= range r.Form {/*Ищем нужные нам значения в строке адреса типа
http://localhost:9090/?url_long=111&url_long=222&lol=517&lol=125 */
fmt.Println("lol:", l)
fmt.Println("val:", strings.Join(l, ""))
}
fmt.Fprintf(w, "Hello, Oleg!") // отправка данных на клиент
}
func main() {
http.HandleFunc("/", sayhelloName) // устанавливаем обработчик
err := http.ListenAndServe(":9090", nil) // устанавливаем порт, который будем слушать
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
//http://localhost:9090/ Shows "Hello, Oleg!" in browser
/*http://localhost:9090/?url_long=111&url_long=222 send information
Console:
map[url_long:[111 222] lol:[517 125]]
path /
scheme
[111 222]
key: url_long
val: 111222
key: lol
val: 517125
map[]
path /favicon.ico
scheme
[]
*/ |
package main
import (
"testing"
)
func TestLongestCommonPrefix(t *testing.T) {
strs := []string{
"hello world",
"good good study,day day up",
}
if longestCommonPrefix(strs) != "" {
t.Errorf("Test longestCommonPrefix fail.")
}
strs = []string{
"abstract",
"abandon",
}
if longestCommonPrefix(strs) != "ab" {
t.Errorf("Test longestCommonPrefix fail.")
}
strs = []string{
"abcdefg",
"abcdefg",
}
if longestCommonPrefix(strs) != "abcdefg" {
t.Errorf("Test longestCommonPrefix fail.")
}
strs = []string{
"abcdef",
"abcdefg",
}
if longestCommonPrefix(strs) != "abcdef" {
t.Errorf("Test longestCommonPrefix fail.")
}
strs = []string{
"",
"",
}
if longestCommonPrefix(strs) != "" {
t.Errorf("Test longestCommonPrefix fail.")
}
strs = []string{
"a",
"a",
}
if longestCommonPrefix(strs) != "a" {
t.Errorf("Test longestCommonPrefix fail.")
}
}
|
// Provide command-line arguments in the form of 'YYYY-mmm-DD' where:
// - YYYY is a four digit year
// - mmm is a three letter month abbreviation (Jan, Feb, Mar, et al)
// - DD is a two digit date
package main
import (
"fmt"
"os"
"time"
)
func main() {
from, to := os.Args[1], os.Args[2]
const shortForm = "2006-Jan-02"
fromTime, _ := time.Parse(shortForm, from)
toTime, _ := time.Parse(shortForm, to)
dur := toTime.Sub(fromTime)
fmt.Println("elapsed days:", int(dur/(time.Hour*24)))
}
|
package main
import "fmt"
func main() {
const firstName string = "veto"
fmt.Print("halo ", firstName, "!\n")
const lastName = "firmandianta"
fmt.Print("nice to meet you ", lastName, "!\n")
}
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chunk
import (
"encoding/binary"
"math"
"unsafe"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/hack"
)
// MutRow represents a mutable Row.
// The underlying columns only contains one row and not exposed to the user.
type MutRow Row
// ToRow converts the MutRow to Row, so it can be used to read data.
func (mr MutRow) ToRow() Row {
return Row(mr)
}
// Len returns the number of columns.
func (mr MutRow) Len() int {
return len(mr.c.columns)
}
// Clone deep clone a MutRow.
func (mr MutRow) Clone() MutRow {
newChk := mr.c
if mr.c != nil {
newChk = mr.c.CopyConstruct()
}
return MutRow{
c: newChk,
idx: mr.idx,
}
}
// MutRowFromValues creates a MutRow from a interface slice.
func MutRowFromValues(vals ...interface{}) MutRow {
c := &Chunk{columns: make([]*Column, 0, len(vals))}
for _, val := range vals {
col := makeMutRowColumn(val)
c.columns = append(c.columns, col)
}
return MutRow{c: c}
}
// MutRowFromDatums creates a MutRow from a datum slice.
func MutRowFromDatums(datums []types.Datum) MutRow {
c := &Chunk{columns: make([]*Column, 0, len(datums))}
for _, d := range datums {
col := makeMutRowColumn(d.GetValue())
c.columns = append(c.columns, col)
}
return MutRow{c: c, idx: 0}
}
// MutRowFromTypes creates a MutRow from a FieldType slice, each Column is initialized to zero value.
func MutRowFromTypes(types []*types.FieldType) MutRow {
c := &Chunk{columns: make([]*Column, 0, len(types))}
for _, tp := range types {
col := makeMutRowColumn(zeroValForType(tp))
c.columns = append(c.columns, col)
}
return MutRow{c: c, idx: 0}
}
func zeroValForType(tp *types.FieldType) interface{} {
switch tp.GetType() {
case mysql.TypeFloat:
return float32(0)
case mysql.TypeDouble:
return float64(0)
case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeYear:
if mysql.HasUnsignedFlag(tp.GetFlag()) {
return uint64(0)
}
return int64(0)
case mysql.TypeString, mysql.TypeVarString, mysql.TypeVarchar:
return ""
case mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
return []byte{}
case mysql.TypeDuration:
return types.ZeroDuration
case mysql.TypeNewDecimal:
return types.NewDecFromInt(0)
case mysql.TypeDate:
return types.ZeroDate
case mysql.TypeDatetime:
return types.ZeroDatetime
case mysql.TypeTimestamp:
return types.ZeroTimestamp
case mysql.TypeBit:
return types.BinaryLiteral{}
case mysql.TypeSet:
return types.Set{}
case mysql.TypeEnum:
return types.Enum{}
case mysql.TypeJSON:
return types.CreateBinaryJSON(nil)
default:
return nil
}
}
func makeMutRowColumn(in interface{}) *Column {
switch x := in.(type) {
case nil:
col := makeMutRowBytesColumn(nil)
col.nullBitmap[0] = 0
return col
case int:
return makeMutRowUint64Column(uint64(x))
case int64:
return makeMutRowUint64Column(uint64(x))
case uint64:
return makeMutRowUint64Column(x)
case float64:
return makeMutRowUint64Column(math.Float64bits(x))
case float32:
col := newMutRowFixedLenColumn(4)
*(*uint32)(unsafe.Pointer(&col.data[0])) = math.Float32bits(x)
return col
case string:
return makeMutRowBytesColumn(hack.Slice(x))
case []byte:
return makeMutRowBytesColumn(x)
case types.BinaryLiteral:
return makeMutRowBytesColumn(x)
case *types.MyDecimal:
col := newMutRowFixedLenColumn(types.MyDecimalStructSize)
*(*types.MyDecimal)(unsafe.Pointer(&col.data[0])) = *x
return col
case types.Time:
col := newMutRowFixedLenColumn(sizeTime)
*(*types.Time)(unsafe.Pointer(&col.data[0])) = x
return col
case types.BinaryJSON:
col := newMutRowVarLenColumn(len(x.Value) + 1)
col.data[0] = x.TypeCode
copy(col.data[1:], x.Value)
return col
case types.Duration:
col := newMutRowFixedLenColumn(8)
*(*int64)(unsafe.Pointer(&col.data[0])) = int64(x.Duration)
return col
case types.Enum:
col := newMutRowVarLenColumn(len(x.Name) + 8)
copy(col.data, (*[8]byte)(unsafe.Pointer(&x.Value))[:])
copy(col.data[8:], x.Name)
return col
case types.Set:
col := newMutRowVarLenColumn(len(x.Name) + 8)
copy(col.data, (*[8]byte)(unsafe.Pointer(&x.Value))[:])
copy(col.data[8:], x.Name)
return col
default:
return nil
}
}
func newMutRowFixedLenColumn(elemSize int) *Column {
buf := make([]byte, elemSize)
col := &Column{
length: 1,
elemBuf: buf,
data: buf,
nullBitmap: make([]byte, 1),
}
col.nullBitmap[0] = 1
return col
}
func newMutRowVarLenColumn(valSize int) *Column {
buf := make([]byte, valSize+1)
col := &Column{
length: 1,
offsets: []int64{0, int64(valSize)},
data: buf[:valSize],
nullBitmap: buf[valSize:],
}
col.nullBitmap[0] = 1
return col
}
func makeMutRowUint64Column(val uint64) *Column {
col := newMutRowFixedLenColumn(8)
*(*uint64)(unsafe.Pointer(&col.data[0])) = val
return col
}
func makeMutRowBytesColumn(bin []byte) *Column {
col := newMutRowVarLenColumn(len(bin))
copy(col.data, bin)
return col
}
func cleanColOfMutRow(col *Column) {
for i := range col.offsets {
col.offsets[i] = 0
}
col.nullBitmap[0] = 0
}
// SetRow sets the MutRow with Row.
func (mr MutRow) SetRow(row Row) {
for colIdx, rCol := range row.c.columns {
mrCol := mr.c.columns[colIdx]
cleanColOfMutRow(mrCol)
if rCol.IsNull(row.idx) {
continue
}
elemLen := len(rCol.elemBuf)
if elemLen > 0 {
copy(mrCol.data, rCol.data[row.idx*elemLen:(row.idx+1)*elemLen])
} else {
setMutRowBytes(mrCol, rCol.data[rCol.offsets[row.idx]:rCol.offsets[row.idx+1]])
}
mrCol.nullBitmap[0] = 1
}
}
// SetValues sets the MutRow with values.
func (mr MutRow) SetValues(vals ...interface{}) {
for i, v := range vals {
mr.SetValue(i, v)
}
}
// SetValue sets the MutRow with colIdx and value.
func (mr MutRow) SetValue(colIdx int, val interface{}) {
col := mr.c.columns[colIdx]
cleanColOfMutRow(col)
if val == nil {
return
}
switch x := val.(type) {
case int:
binary.LittleEndian.PutUint64(col.data, uint64(x))
case int64:
binary.LittleEndian.PutUint64(col.data, uint64(x))
case uint64:
binary.LittleEndian.PutUint64(col.data, x)
case float64:
binary.LittleEndian.PutUint64(col.data, math.Float64bits(x))
case float32:
binary.LittleEndian.PutUint32(col.data, math.Float32bits(x))
case string:
setMutRowBytes(col, hack.Slice(x))
case []byte:
setMutRowBytes(col, x)
case types.BinaryLiteral:
setMutRowBytes(col, x)
case types.Duration:
*(*int64)(unsafe.Pointer(&col.data[0])) = int64(x.Duration)
case *types.MyDecimal:
*(*types.MyDecimal)(unsafe.Pointer(&col.data[0])) = *x
case types.Time:
*(*types.Time)(unsafe.Pointer(&col.data[0])) = x
case types.Enum:
setMutRowNameValue(col, x.Name, x.Value)
case types.Set:
setMutRowNameValue(col, x.Name, x.Value)
case types.BinaryJSON:
setMutRowJSON(col, x)
}
col.nullBitmap[0] = 1
}
// SetDatums sets the MutRow with datum slice.
func (mr MutRow) SetDatums(datums ...types.Datum) {
for i, d := range datums {
mr.SetDatum(i, d)
}
}
// SetDatum sets the MutRow with colIdx and datum.
func (mr MutRow) SetDatum(colIdx int, d types.Datum) {
col := mr.c.columns[colIdx]
cleanColOfMutRow(col)
if d.IsNull() {
return
}
switch d.Kind() {
case types.KindInt64, types.KindUint64, types.KindFloat64:
binary.LittleEndian.PutUint64(mr.c.columns[colIdx].data, d.GetUint64())
case types.KindFloat32:
binary.LittleEndian.PutUint32(mr.c.columns[colIdx].data, math.Float32bits(d.GetFloat32()))
case types.KindString, types.KindBytes, types.KindBinaryLiteral:
setMutRowBytes(col, d.GetBytes())
case types.KindMysqlTime:
*(*types.Time)(unsafe.Pointer(&col.data[0])) = d.GetMysqlTime()
case types.KindMysqlDuration:
*(*int64)(unsafe.Pointer(&col.data[0])) = int64(d.GetMysqlDuration().Duration)
case types.KindMysqlDecimal:
*(*types.MyDecimal)(unsafe.Pointer(&col.data[0])) = *d.GetMysqlDecimal()
case types.KindMysqlJSON:
setMutRowJSON(col, d.GetMysqlJSON())
case types.KindMysqlEnum:
e := d.GetMysqlEnum()
setMutRowNameValue(col, e.Name, e.Value)
case types.KindMysqlSet:
s := d.GetMysqlSet()
setMutRowNameValue(col, s.Name, s.Value)
default:
mr.c.columns[colIdx] = makeMutRowColumn(d.GetValue())
}
col.nullBitmap[0] = 1
}
func setMutRowBytes(col *Column, bin []byte) {
if len(col.data) >= len(bin) {
col.data = col.data[:len(bin)]
} else {
buf := make([]byte, len(bin)+1)
col.data = buf[:len(bin)]
col.nullBitmap = buf[len(bin):]
}
copy(col.data, bin)
col.offsets[1] = int64(len(bin))
}
func setMutRowNameValue(col *Column, name string, val uint64) {
dataLen := len(name) + 8
if len(col.data) >= dataLen {
col.data = col.data[:dataLen]
} else {
buf := make([]byte, dataLen+1)
col.data = buf[:dataLen]
col.nullBitmap = buf[dataLen:]
}
binary.LittleEndian.PutUint64(col.data, val)
copy(col.data[8:], name)
col.offsets[1] = int64(dataLen)
}
func setMutRowJSON(col *Column, j types.BinaryJSON) {
dataLen := len(j.Value) + 1
if len(col.data) >= dataLen {
col.data = col.data[:dataLen]
} else {
// In MutRow, there always exists 1 data in every Column,
// we should allocate one more byte for null bitmap.
buf := make([]byte, dataLen+1)
col.data = buf[:dataLen]
col.nullBitmap = buf[dataLen:]
}
col.data[0] = j.TypeCode
copy(col.data[1:], j.Value)
col.offsets[1] = int64(dataLen)
}
// ShallowCopyPartialRow shallow copies the data of `row` to MutRow.
func (mr MutRow) ShallowCopyPartialRow(colIdx int, row Row) {
for i, srcCol := range row.c.columns {
dstCol := mr.c.columns[colIdx+i]
if !srcCol.IsNull(row.idx) {
// MutRow only contains one row, so we can directly set the whole byte.
dstCol.nullBitmap[0] = 1
} else {
dstCol.nullBitmap[0] = 0
}
if srcCol.isFixed() {
elemLen := len(srcCol.elemBuf)
offset := row.idx * elemLen
dstCol.data = srcCol.data[offset : offset+elemLen]
} else {
start, end := srcCol.offsets[row.idx], srcCol.offsets[row.idx+1]
dstCol.data = srcCol.data[start:end]
dstCol.offsets[1] = int64(len(dstCol.data))
}
}
}
|
package _19_Remove_Nth_Node_From_End_of_List
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
type ListNode struct {
Val int
Next *ListNode
}
func removeNthFromEnd(head *ListNode, n int) *ListNode {
pre := &ListNode{Next: head}
ret, fast, slow := pre, pre, pre
for i := 0; i < n; i++ {
fast = fast.Next
}
for fast.Next != nil {
fast = fast.Next
slow = slow.Next
}
slow.Next = slow.Next.Next
return ret.Next
}
|
package hpke
import (
"crypto/rand"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/require"
)
var (
fixedPSK = []byte{0x02, 0x47, 0xFD, 0x33, 0xB9, 0x13, 0x76, 0x0F,
0xA1, 0xFA, 0x51, 0xE1, 0x89, 0x2D, 0x9F, 0x30,
0x7F, 0xBE, 0x65, 0xEB, 0x17, 0x1E, 0x81, 0x32,
0xC2, 0xAF, 0x18, 0x55, 0x5A, 0x73, 0x8B, 0x82} // 32 bytes
fixedPSKID = []byte("Ennyn Durin aran Moria")
original = []byte("Beauty is truth, truth beauty")
aad = []byte("that is all // Ye know on earth, and all ye need to know")
info = []byte("Ode on a Grecian Urn")
rtts = 10
exportContext = []byte("test export")
exportLength = 32
)
const (
outputTestVectorEnvironmentKey = "HPKE_TEST_VECTORS_OUT"
inputTestVectorEnvironmentKey = "HPKE_TEST_VECTORS_IN"
testVectorEncryptionCount = 257
testVectorExportLength = 32
)
///////
// Infallible Serialize / Deserialize
func fatalOnError(t *testing.T, err error, msg string) {
realMsg := fmt.Sprintf("%s: %v", msg, err)
if t != nil {
require.Nil(t, err, realMsg)
} else if err != nil {
panic(realMsg)
}
}
func mustUnhex(t *testing.T, h string) []byte {
out, err := hex.DecodeString(h)
fatalOnError(t, err, "Unhex failed")
return out
}
func mustHex(d []byte) string {
return hex.EncodeToString(d)
}
func mustDeserializePriv(t *testing.T, suite CipherSuite, h string, required bool) KEMPrivateKey {
skm := mustUnhex(t, h)
sk, err := suite.KEM.DeserializePrivateKey(skm)
if required {
fatalOnError(t, err, "DeserializePrivate failed")
}
return sk
}
func mustSerializePriv(suite CipherSuite, priv KEMPrivateKey) string {
return mustHex(suite.KEM.SerializePrivateKey(priv))
}
func mustDeserializePub(t *testing.T, suite CipherSuite, h string, required bool) KEMPublicKey {
pkm := mustUnhex(t, h)
pk, err := suite.KEM.DeserializePublicKey(pkm)
if required {
fatalOnError(t, err, "DeserializePublicKey failed")
}
return pk
}
func mustSerializePub(suite CipherSuite, pub KEMPublicKey) string {
return mustHex(suite.KEM.SerializePublicKey(pub))
}
func mustGenerateKeyPair(t *testing.T, suite CipherSuite) (KEMPrivateKey, KEMPublicKey, []byte) {
ikm := make([]byte, suite.KEM.PrivateKeySize())
rand.Reader.Read(ikm)
sk, pk, err := suite.KEM.DeriveKeyPair(ikm)
fatalOnError(t, err, "Error generating DH key pair")
return sk, pk, ikm
}
///////
// Assertions
func assert(t *testing.T, suite CipherSuite, msg string, test bool) {
require.True(t, test, "[%04x, %04x, %04x] %s", suite.KEM.ID(), suite.KDF.ID(), suite.AEAD.ID(), msg)
}
func assertNotError(t *testing.T, suite CipherSuite, msg string, err error) {
require.Nil(t, err, "%s: %v", msg, err)
}
func assertBytesEqual(t *testing.T, suite CipherSuite, msg string, lhs, rhs []byte) {
// require.Equal treates a nil slice as different from a zero-length slice, so
// we handle this as a special case.
if len(lhs) == 0 && len(rhs) == 0 {
return
}
require.Equal(t, lhs, rhs, "%s: [%x] != [%x]", msg, lhs, rhs)
}
func assertCipherContextEqual(t *testing.T, suite CipherSuite, msg string, lhs, rhs context) {
// Verify the serialized fields match.
assert(t, suite, fmt.Sprintf("%s: %s", msg, "role"), lhs.Role == rhs.Role)
assert(t, suite, fmt.Sprintf("%s: %s", msg, "KEM id"), lhs.KEMID == rhs.KEMID)
assert(t, suite, fmt.Sprintf("%s: %s", msg, "KDF id"), lhs.KDFID == rhs.KDFID)
assert(t, suite, fmt.Sprintf("%s: %s", msg, "AEAD id"), lhs.AEADID == rhs.AEADID)
assertBytesEqual(t, suite, fmt.Sprintf("%s: %s", msg, "exporter secret"), lhs.ExporterSecret, rhs.ExporterSecret)
assertBytesEqual(t, suite, fmt.Sprintf("%s: %s", msg, "key"), lhs.Key, rhs.Key)
assertBytesEqual(t, suite, fmt.Sprintf("%s: %s", msg, "base_nonce"), lhs.BaseNonce, rhs.BaseNonce)
assert(t, suite, fmt.Sprintf("%s: %s", msg, "sequence number"), lhs.Seq == rhs.Seq)
// Verify that the internal representation of the cipher suite matches.
assert(t, suite, fmt.Sprintf("%s: %s", msg, "KEM scheme representation"), lhs.suite.KEM.ID() == rhs.suite.KEM.ID())
assert(t, suite, fmt.Sprintf("%s: %s", msg, "KDF scheme representation"), lhs.suite.KDF.ID() == rhs.suite.KDF.ID())
assert(t, suite, fmt.Sprintf("%s: %s", msg, "AEAD scheme representation"), lhs.suite.AEAD.ID() == rhs.suite.AEAD.ID())
if lhs.AEADID == AEAD_EXPORT_ONLY {
return
}
// Verify that the internal AEAD object uses the same algorithm and is keyed
// with the same key.
var got, want []byte
lhs.aead.Seal(got, lhs.BaseNonce, nil, nil)
rhs.aead.Seal(want, rhs.BaseNonce, nil, nil)
assertBytesEqual(t, suite, fmt.Sprintf("%s: %s", msg, "internal AEAD representation"), got, want)
}
///////
// Symmetric encryption test vector structure
type encryptionTestVector struct {
plaintext []byte
aad []byte
nonce []byte
ciphertext []byte
}
func (etv encryptionTestVector) MarshalJSON() ([]byte, error) {
return json.Marshal(map[string]string{
"pt": mustHex(etv.plaintext),
"aad": mustHex(etv.aad),
"nonce": mustHex(etv.nonce),
"ct": mustHex(etv.ciphertext),
})
}
func (etv *encryptionTestVector) UnmarshalJSON(data []byte) error {
raw := map[string]string{}
err := json.Unmarshal(data, &raw)
if err != nil {
return err
}
etv.plaintext = mustUnhex(nil, raw["pt"])
etv.aad = mustUnhex(nil, raw["aad"])
etv.nonce = mustUnhex(nil, raw["nonce"])
etv.ciphertext = mustUnhex(nil, raw["ct"])
return nil
}
///////
// Exporter test vector structures
type rawExporterTestVector struct {
ExportContext string `json:"exporter_context"`
ExportLength int `json:"L"`
ExportValue string `json:"exported_value"`
}
type exporterTestVector struct {
exportContext []byte
exportLength int
exportValue []byte
}
func (etv exporterTestVector) MarshalJSON() ([]byte, error) {
return json.Marshal(rawExporterTestVector{
ExportContext: mustHex(etv.exportContext),
ExportLength: etv.exportLength,
ExportValue: mustHex(etv.exportValue),
})
}
func (etv *exporterTestVector) UnmarshalJSON(data []byte) error {
raw := rawExporterTestVector{}
err := json.Unmarshal(data, &raw)
if err != nil {
return err
}
etv.exportContext = mustUnhex(nil, raw.ExportContext)
etv.exportLength = raw.ExportLength
etv.exportValue = mustUnhex(nil, raw.ExportValue)
return nil
}
///////
// HPKE test vector structures
type rawTestVector struct {
// Parameters
Mode Mode `json:"mode"`
KEMID KEMID `json:"kem_id"`
KDFID KDFID `json:"kdf_id"`
AEADID AEADID `json:"aead_id"`
Info string `json:"info"`
// Private keys
IKMR string `json:"ikmR"`
IKMS string `json:"ikmS,omitempty"`
IKME string `json:"ikmE"`
SKR string `json:"skRm"`
SKS string `json:"skSm,omitempty"`
SKE string `json:"skEm"`
PSK string `json:"psk,omitempty"`
PSKID string `json:"psk_id,omitempty"`
// Public keys
PKR string `json:"pkRm"`
PKS string `json:"pkSm,omitempty"`
PKE string `json:"pkEm"`
// Key schedule inputs and computations
Enc string `json:"enc"`
SharedSecret string `json:"shared_secret"`
KeyScheduleContext string `json:"key_schedule_context"`
Secret string `json:"secret"`
Key string `json:"key"`
BaseNonce string `json:"base_nonce"`
ExporterSecret string `json:"exporter_secret"`
Encryptions []encryptionTestVector `json:"encryptions"`
Exports []exporterTestVector `json:"exports"`
}
type testVector struct {
t *testing.T
suite CipherSuite
// Parameters
mode Mode
kem_id KEMID
kdf_id KDFID
aead_id AEADID
info []byte
// Private keys
skR KEMPrivateKey
skS KEMPrivateKey
skE KEMPrivateKey
ikmR []byte
ikmS []byte
ikmE []byte
psk []byte
psk_id []byte
// Public keys
pkR KEMPublicKey
pkS KEMPublicKey
pkE KEMPublicKey
// Key schedule inputs and computations
enc []byte
sharedSecret []byte
keyScheduleContext []byte
secret []byte
key []byte
baseNonce []byte
exporterSecret []byte
encryptions []encryptionTestVector
exports []exporterTestVector
}
func (tv testVector) MarshalJSON() ([]byte, error) {
return json.Marshal(rawTestVector{
Mode: tv.mode,
KEMID: tv.kem_id,
KDFID: tv.kdf_id,
AEADID: tv.aead_id,
Info: mustHex(tv.info),
IKMR: mustHex(tv.ikmR),
IKMS: mustHex(tv.ikmS),
IKME: mustHex(tv.ikmE),
SKR: mustSerializePriv(tv.suite, tv.skR),
SKS: mustSerializePriv(tv.suite, tv.skS),
SKE: mustSerializePriv(tv.suite, tv.skE),
PSK: mustHex(tv.psk),
PSKID: mustHex(tv.psk_id),
PKR: mustSerializePub(tv.suite, tv.pkR),
PKS: mustSerializePub(tv.suite, tv.pkS),
PKE: mustSerializePub(tv.suite, tv.pkE),
Enc: mustHex(tv.enc),
SharedSecret: mustHex(tv.sharedSecret),
KeyScheduleContext: mustHex(tv.keyScheduleContext),
Secret: mustHex(tv.secret),
Key: mustHex(tv.key),
BaseNonce: mustHex(tv.baseNonce),
ExporterSecret: mustHex(tv.exporterSecret),
Encryptions: tv.encryptions,
Exports: tv.exports,
})
}
func (tv *testVector) UnmarshalJSON(data []byte) error {
raw := rawTestVector{}
err := json.Unmarshal(data, &raw)
if err != nil {
return err
}
tv.mode = raw.Mode
tv.kem_id = raw.KEMID
tv.kdf_id = raw.KDFID
tv.aead_id = raw.AEADID
tv.info = mustUnhex(tv.t, raw.Info)
tv.suite, err = AssembleCipherSuite(raw.KEMID, raw.KDFID, raw.AEADID)
if err != nil {
return err
}
modeRequiresSenderKey := (tv.mode == modeAuth || tv.mode == modeAuthPSK)
tv.skR = mustDeserializePriv(tv.t, tv.suite, raw.SKR, true)
tv.skS = mustDeserializePriv(tv.t, tv.suite, raw.SKS, modeRequiresSenderKey)
tv.skE = mustDeserializePriv(tv.t, tv.suite, raw.SKE, true)
tv.pkR = mustDeserializePub(tv.t, tv.suite, raw.PKR, true)
tv.pkS = mustDeserializePub(tv.t, tv.suite, raw.PKS, modeRequiresSenderKey)
tv.pkE = mustDeserializePub(tv.t, tv.suite, raw.PKE, true)
tv.psk = mustUnhex(tv.t, raw.PSK)
tv.psk_id = mustUnhex(tv.t, raw.PSKID)
tv.ikmR = mustUnhex(tv.t, raw.IKMR)
tv.ikmS = mustUnhex(tv.t, raw.IKMS)
tv.ikmE = mustUnhex(tv.t, raw.IKME)
tv.enc = mustUnhex(tv.t, raw.Enc)
tv.sharedSecret = mustUnhex(tv.t, raw.SharedSecret)
tv.keyScheduleContext = mustUnhex(tv.t, raw.KeyScheduleContext)
tv.secret = mustUnhex(tv.t, raw.Secret)
tv.key = mustUnhex(tv.t, raw.Key)
tv.baseNonce = mustUnhex(tv.t, raw.BaseNonce)
tv.exporterSecret = mustUnhex(tv.t, raw.ExporterSecret)
tv.encryptions = raw.Encryptions
tv.exports = raw.Exports
return nil
}
type testVectorArray struct {
t *testing.T
vectors []testVector
}
func (tva testVectorArray) MarshalJSON() ([]byte, error) {
return json.Marshal(tva.vectors)
}
func (tva *testVectorArray) UnmarshalJSON(data []byte) error {
err := json.Unmarshal(data, &tva.vectors)
if err != nil {
return err
}
for i := range tva.vectors {
tva.vectors[i].t = tva.t
}
return nil
}
///////
// Generalize setup functions so that we can iterate over them easily
type setupMode struct {
Mode Mode
OK func(suite CipherSuite) bool
I func(suite CipherSuite, pkR KEMPublicKey, info []byte, skS KEMPrivateKey, psk, psk_id []byte) ([]byte, *SenderContext, error)
R func(suite CipherSuite, skR KEMPrivateKey, enc, info []byte, pkS KEMPublicKey, psk, psk_id []byte) (*ReceiverContext, error)
}
var setupModes = map[Mode]setupMode{
modeBase: {
Mode: modeBase,
OK: func(suite CipherSuite) bool { return true },
I: func(suite CipherSuite, pkR KEMPublicKey, info []byte, skS KEMPrivateKey, psk, psk_id []byte) ([]byte, *SenderContext, error) {
return SetupBaseS(suite, rand.Reader, pkR, info)
},
R: func(suite CipherSuite, skR KEMPrivateKey, enc, info []byte, pkS KEMPublicKey, psk, psk_id []byte) (*ReceiverContext, error) {
return SetupBaseR(suite, skR, enc, info)
},
},
modePSK: {
Mode: modePSK,
OK: func(suite CipherSuite) bool { return true },
I: func(suite CipherSuite, pkR KEMPublicKey, info []byte, skS KEMPrivateKey, psk, psk_id []byte) ([]byte, *SenderContext, error) {
return SetupPSKS(suite, rand.Reader, pkR, psk, psk_id, info)
},
R: func(suite CipherSuite, skR KEMPrivateKey, enc, info []byte, pkS KEMPublicKey, psk, psk_id []byte) (*ReceiverContext, error) {
return SetupPSKR(suite, skR, enc, psk, psk_id, info)
},
},
modeAuth: {
Mode: modeAuth,
OK: func(suite CipherSuite) bool {
_, ok := suite.KEM.(AuthKEMScheme)
return ok
},
I: func(suite CipherSuite, pkR KEMPublicKey, info []byte, skS KEMPrivateKey, psk, psk_id []byte) ([]byte, *SenderContext, error) {
return SetupAuthS(suite, rand.Reader, pkR, skS, info)
},
R: func(suite CipherSuite, skR KEMPrivateKey, enc, info []byte, pkS KEMPublicKey, psk, psk_id []byte) (*ReceiverContext, error) {
return SetupAuthR(suite, skR, pkS, enc, info)
},
},
modeAuthPSK: {
Mode: modeAuthPSK,
OK: func(suite CipherSuite) bool {
_, ok := suite.KEM.(AuthKEMScheme)
return ok
},
I: func(suite CipherSuite, pkR KEMPublicKey, info []byte, skS KEMPrivateKey, psk, psk_id []byte) ([]byte, *SenderContext, error) {
return SetupAuthPSKS(suite, rand.Reader, pkR, skS, psk, psk_id, info)
},
R: func(suite CipherSuite, skR KEMPrivateKey, enc, info []byte, pkS KEMPublicKey, psk, psk_id []byte) (*ReceiverContext, error) {
return SetupAuthPSKR(suite, skR, pkS, enc, psk, psk_id, info)
},
},
}
///////
// Direct tests
type roundTripTest struct {
kem_id KEMID
kdf_id KDFID
aead_id AEADID
setup setupMode
}
func (rtt roundTripTest) Test(t *testing.T) {
suite, err := AssembleCipherSuite(rtt.kem_id, rtt.kdf_id, rtt.aead_id)
require.Nil(t, err, "[%04x, %04x, %04x] Error looking up ciphersuite: %v", rtt.kem_id, rtt.kdf_id, rtt.aead_id, err)
if !rtt.setup.OK(suite) {
return
}
skS, pkS, _ := mustGenerateKeyPair(t, suite)
skR, pkR, _ := mustGenerateKeyPair(t, suite)
enc, ctxS, err := rtt.setup.I(suite, pkR, info, skS, fixedPSK, fixedPSKID)
assertNotError(t, suite, "Error in SetupI", err)
ctxR, err := rtt.setup.R(suite, skR, enc, info, pkS, fixedPSK, fixedPSKID)
assertNotError(t, suite, "Error in SetupR", err)
// Verify encryption functionality, if applicable
if rtt.aead_id != AEAD_EXPORT_ONLY {
for range make([]struct{}, rtts) {
encrypted := ctxS.Seal(aad, original)
decrypted, err := ctxR.Open(aad, encrypted)
assertNotError(t, suite, "Error in Open", err)
assertBytesEqual(t, suite, "Incorrect decryption", decrypted, original)
}
}
// Verify exporter functionality
exportedI := ctxS.Export(exportContext, exportLength)
exportedR := ctxR.Export(exportContext, exportLength)
assertBytesEqual(t, suite, "Incorrect exported secret", exportedI, exportedR)
// Verify encryption context serialization functionality
opaqueI, err := ctxS.Marshal()
assertNotError(t, suite, "Error serializing encrypt context", err)
unmarshaledI, err := UnmarshalSenderContext(opaqueI)
assertNotError(t, suite, "Error deserializing encrypt context", err)
assertCipherContextEqual(t, suite, "Encrypt context serialization mismatch", ctxS.context, unmarshaledI.context)
// Verify decryption context serialization functionality
opaqueR, err := ctxR.Marshal()
assertNotError(t, suite, "Error serializing decrypt context", err)
unmarshaledR, err := UnmarshalReceiverContext(opaqueR)
assertNotError(t, suite, "Error deserializing decrypt context", err)
assertCipherContextEqual(t, suite, "Decrypt context serialization mismatch", ctxR.context, unmarshaledR.context)
// Verify exporter functionality for a deserialized context
assertBytesEqual(t, suite, "Export after serialization fails for sender", exportedI, unmarshaledI.Export(exportContext, exportLength))
assertBytesEqual(t, suite, "Export after serialization fails for receiver", exportedR, unmarshaledR.Export(exportContext, exportLength))
}
func TestModes(t *testing.T) {
for kem_id, _ := range kems {
for kdf_id, _ := range kdfs {
for aead_id, _ := range aeads {
for mode, setup := range setupModes {
label := fmt.Sprintf("kem=%04x/kdf=%04x/aead=%04x/mode=%02x", kem_id, kdf_id, aead_id, mode)
rtt := roundTripTest{kem_id, kdf_id, aead_id, setup}
t.Run(label, rtt.Test)
}
}
}
}
}
///////
// Generation and processing of test vectors
func verifyEncryptions(tv testVector, enc *SenderContext, dec *ReceiverContext) {
for _, data := range tv.encryptions {
encrypted := enc.Seal(data.aad, data.plaintext)
decrypted, err := dec.Open(data.aad, encrypted)
assertNotError(tv.t, tv.suite, "Error in Open", err)
assertBytesEqual(tv.t, tv.suite, "Incorrect encryption", encrypted, data.ciphertext)
assertBytesEqual(tv.t, tv.suite, "Incorrect decryption", decrypted, data.plaintext)
}
}
func verifyParameters(tv testVector, ctx context) {
assertBytesEqual(tv.t, tv.suite, "Incorrect parameter 'shared_secret'", tv.sharedSecret, ctx.setupParams.sharedSecret)
assertBytesEqual(tv.t, tv.suite, "Incorrect parameter 'enc'", tv.enc, ctx.setupParams.enc)
assertBytesEqual(tv.t, tv.suite, "Incorrect parameter 'key_schedule_context'", tv.keyScheduleContext, ctx.contextParams.keyScheduleContext)
assertBytesEqual(tv.t, tv.suite, "Incorrect parameter 'secret'", tv.secret, ctx.contextParams.secret)
assertBytesEqual(tv.t, tv.suite, "Incorrect parameter 'key'", tv.key, ctx.Key)
assertBytesEqual(tv.t, tv.suite, "Incorrect parameter 'base_nonce'", tv.baseNonce, ctx.BaseNonce)
assertBytesEqual(tv.t, tv.suite, "Incorrect parameter 'exporter_secret'", tv.exporterSecret, ctx.ExporterSecret)
}
func verifyPublicKeysEqual(tv testVector, pkX, pkY KEMPublicKey) {
pkXm := mustSerializePub(tv.suite, pkX)
pkYm := mustSerializePub(tv.suite, pkY)
assertBytesEqual(tv.t, tv.suite, "Incorrect public key", []byte(pkXm), []byte(pkYm))
}
func verifyPrivateKeysEqual(tv testVector, skX, skY KEMPrivateKey) {
skXm := mustSerializePriv(tv.suite, skX)
skYm := mustSerializePriv(tv.suite, skY)
assertBytesEqual(tv.t, tv.suite, "Incorrect private key", []byte(skXm), []byte(skYm))
}
func verifyTestVector(tv testVector) {
setup := setupModes[tv.mode]
skR, pkR, err := tv.suite.KEM.DeriveKeyPair(tv.ikmR)
assertNotError(tv.t, tv.suite, "Error in DeriveKeyPair", err)
verifyPublicKeysEqual(tv, tv.pkR, pkR)
verifyPrivateKeysEqual(tv, tv.skR, skR)
skE, pkE, err := tv.suite.KEM.DeriveKeyPair(tv.ikmE)
assertNotError(tv.t, tv.suite, "Error in DeriveKeyPair", err)
verifyPublicKeysEqual(tv, tv.pkE, pkE)
verifyPrivateKeysEqual(tv, tv.skE, skE)
tv.suite.KEM.setEphemeralKeyPair(skE)
var pkS KEMPublicKey
var skS KEMPrivateKey
if setup.Mode == modeAuth || setup.Mode == modeAuthPSK {
skS, pkS, err = tv.suite.KEM.DeriveKeyPair(tv.ikmS)
assertNotError(tv.t, tv.suite, "Error in DeriveKeyPair", err)
verifyPublicKeysEqual(tv, tv.pkS, pkS)
verifyPrivateKeysEqual(tv, tv.skS, skS)
}
enc, ctxS, err := setup.I(tv.suite, pkR, tv.info, skS, tv.psk, tv.psk_id)
assertNotError(tv.t, tv.suite, "Error in SetupI", err)
assertBytesEqual(tv.t, tv.suite, "Encapsulated key mismatch", enc, tv.enc)
ctxR, err := setup.R(tv.suite, skR, tv.enc, tv.info, pkS, tv.psk, tv.psk_id)
assertNotError(tv.t, tv.suite, "Error in SetupR", err)
verifyParameters(tv, ctxS.context)
verifyParameters(tv, ctxR.context)
verifyEncryptions(tv, ctxS, ctxR)
}
func vectorTest(vector testVector) func(t *testing.T) {
return func(t *testing.T) {
verifyTestVector(vector)
}
}
func verifyTestVectors(t *testing.T, vectorString []byte, subtest bool) {
vectors := testVectorArray{t: t}
err := json.Unmarshal(vectorString, &vectors)
require.Nil(t, err, "Error decoding test vector string: %v", err)
for _, tv := range vectors.vectors {
test := vectorTest(tv)
if !subtest {
test(t)
} else {
label := fmt.Sprintf("kem=%04x/kdf=%04x/aead=%04x/mode=%02x", tv.kem_id, tv.kdf_id, tv.aead_id, tv.mode)
t.Run(label, test)
}
}
}
func generateEncryptions(t *testing.T, suite CipherSuite, ctxS *SenderContext, ctxR *ReceiverContext) ([]encryptionTestVector, error) {
vectors := make([]encryptionTestVector, testVectorEncryptionCount)
for i := 0; i < len(vectors); i++ {
aad := []byte(fmt.Sprintf("Count-%d", i))
encrypted := ctxS.Seal(aad, original)
decrypted, err := ctxR.Open(aad, encrypted)
assertNotError(t, suite, "Decryption failure", err)
assertBytesEqual(t, suite, "Incorrect decryption", original, decrypted)
vectors[i] = encryptionTestVector{
plaintext: original,
aad: aad,
nonce: ctxS.nonces[i],
ciphertext: encrypted,
}
}
return vectors, nil
}
func generateExports(t *testing.T, suite CipherSuite, ctxS *SenderContext, ctxR *ReceiverContext) ([]exporterTestVector, error) {
exportContexts := [][]byte{
[]byte(""),
[]byte{0x00},
[]byte("TestContext"),
}
vectors := make([]exporterTestVector, len(exportContexts))
for i := 0; i < len(vectors); i++ {
exportI := ctxS.Export(exportContexts[i], testVectorExportLength)
exportR := ctxR.Export(exportContexts[i], testVectorExportLength)
assertBytesEqual(t, suite, "Incorrect export", exportI, exportR)
vectors[i] = exporterTestVector{
exportContext: exportContexts[i],
exportLength: testVectorExportLength,
exportValue: exportI,
}
}
return vectors, nil
}
func generateTestVector(t *testing.T, setup setupMode, kem_id KEMID, kdf_id KDFID, aead_id AEADID) testVector {
suite, err := AssembleCipherSuite(kem_id, kdf_id, aead_id)
require.Nil(t, err, "[%x, %x, %x] Error looking up ciphersuite: %s", kem_id, kdf_id, aead_id, err)
skR, pkR, ikmR := mustGenerateKeyPair(t, suite)
skE, pkE, ikmE := mustGenerateKeyPair(t, suite)
// The sender key share is only required for Auth mode variants.
var pkS KEMPublicKey
var skS KEMPrivateKey
var ikmS []byte
if setup.Mode == modeAuth || setup.Mode == modeAuthPSK {
skS, pkS, ikmS = mustGenerateKeyPair(t, suite)
}
// A PSK is only required for PSK mode variants.
var psk []byte
var psk_id []byte
if setup.Mode == modePSK || setup.Mode == modeAuthPSK {
psk = fixedPSK
psk_id = fixedPSKID
}
suite.KEM.setEphemeralKeyPair(skE)
enc, ctxS, err := setup.I(suite, pkR, info, skS, psk, psk_id)
assertNotError(t, suite, "Error in SetupPSKS", err)
ctxR, err := setup.R(suite, skR, enc, info, pkS, psk, psk_id)
assertNotError(t, suite, "Error in SetupPSKR", err)
encryptionVectors := []encryptionTestVector{}
if aead_id != AEAD_EXPORT_ONLY {
encryptionVectors, err = generateEncryptions(t, suite, ctxS, ctxR)
assertNotError(t, suite, "Error in generateEncryptions", err)
}
exportVectors, err := generateExports(t, suite, ctxS, ctxR)
assertNotError(t, suite, "Error in generateExports", err)
vector := testVector{
t: t,
suite: suite,
mode: setup.Mode,
kem_id: kem_id,
kdf_id: kdf_id,
aead_id: aead_id,
info: info,
skR: skR,
pkR: pkR,
skS: skS,
pkS: pkS,
skE: skE,
pkE: pkE,
ikmR: ikmR,
ikmS: ikmS,
ikmE: ikmE,
psk: psk,
psk_id: psk_id,
enc: ctxS.setupParams.enc,
sharedSecret: ctxS.setupParams.sharedSecret,
keyScheduleContext: ctxS.contextParams.keyScheduleContext,
secret: ctxS.contextParams.secret,
key: ctxS.Key,
baseNonce: ctxS.BaseNonce,
exporterSecret: ctxS.ExporterSecret,
encryptions: encryptionVectors,
exports: exportVectors,
}
return vector
}
func TestVectorGenerate(t *testing.T) {
// We only generate test vectors for select ciphersuites
supportedKEMs := []KEMID{DHKEM_X25519, DHKEM_X448, DHKEM_P256, DHKEM_P521}
supportedKDFs := []KDFID{KDF_HKDF_SHA256, KDF_HKDF_SHA512}
supportedAEADs := []AEADID{AEAD_AESGCM128, AEAD_AESGCM256, AEAD_CHACHA20POLY1305, AEAD_EXPORT_ONLY}
vectors := make([]testVector, 0)
for _, kem_id := range supportedKEMs {
for _, kdf_id := range supportedKDFs {
for _, aead_id := range supportedAEADs {
for _, setup := range setupModes {
vectors = append(vectors, generateTestVector(t, setup, kem_id, kdf_id, aead_id))
}
}
}
}
// Encode the test vectors
encoded, err := json.Marshal(vectors)
require.Nil(t, err, "Error producing test vectors: %v", err)
// Verify that we process them correctly
verifyTestVectors(t, encoded, false)
// Write them to a file if requested
var outputFile string
if outputFile = os.Getenv(outputTestVectorEnvironmentKey); len(outputFile) > 0 {
err = ioutil.WriteFile(outputFile, encoded, 0644)
require.Nil(t, err, "Error writing test vectors: %v", err)
}
}
func TestVectorVerify(t *testing.T) {
var inputFile string
if inputFile = os.Getenv(inputTestVectorEnvironmentKey); len(inputFile) == 0 {
t.Skip("Test vectors were not provided")
}
encoded, err := ioutil.ReadFile(inputFile)
require.Nil(t, err, "Failed reading test vectors: %v", err)
verifyTestVectors(t, encoded, true)
}
|
package schema
import (
"errors"
"fmt"
"log"
)
/* 全局接口错误码定义 99开头的为系统级错误 */
// 错误码:10***
var (
// 通用错误码
SUCCESS = errors.New("00000:ok")
FAIL = errors.New("99999:%s")
MONGO_ERROR = errors.New("99100:mongo error -> %s")
TCC_VALUE_ERROR = errors.New("99201:无效tcc值 %s")
// 业务错误码
)
func Panic(userDefinedErr error, args ...interface{}) {
if userDefinedErr == nil {
Panic(FAIL, "Panic is nil")
}
e := userDefinedErr.Error()
errMsg := fmt.Sprintf(e, args...)
log.Printf("Panic Exception: %s", errMsg)
panic(errors.New(errMsg))
}
|
package futures
import (
"testing"
"github.com/stretchr/testify/suite"
)
type longShortRatioServiceTestSuite struct {
baseTestSuite
}
func TestLongShortRatioService(t *testing.T) {
suite.Run(t, new(longShortRatioServiceTestSuite))
}
func (s *longShortRatioServiceTestSuite) TestOpenInterestStatistics() {
data := []byte(`[
{
"symbol":"BTCUSDT",
"longShortRatio":"1.8105",
"longAccount": "0.6442",
"shortAccount":"0.3558",
"timestamp":1583139600000
},
{
"symbol":"BTCUSDT",
"longShortRatio":"0.5576",
"longAccount": "0.3580",
"shortAccount":"0.6420",
"timestamp":1583139900000
}
]`)
s.mockDo(data, nil)
defer s.assertDo()
symbol := "BTCUSDT"
period := "15m"
limit := 10
startTime := int64(1583139600000)
endTime := int64(1583139900000)
s.assertReq(func(r *request) {
e := newRequest().setParams(params{
"symbol": symbol,
"period": period,
"limit": limit,
"startTime": startTime,
"endTime": endTime,
})
s.assertRequestEqual(e, r)
})
longShortRatios, err := s.client.NewLongShortRatioService().Symbol(symbol).
Period(period).Limit(limit).StartTime(startTime).
EndTime(endTime).Do(newContext())
s.r().NoError(err)
s.Len(longShortRatios, 2)
longShortRatio1 := &LongShortRatio{
Symbol: "BTCUSDT",
LongShortRatio: "1.8105",
ShortAccount: "0.3558",
LongAccount: "0.6442",
Timestamp: 1583139600000,
}
longShortRatio2 := &LongShortRatio{
Symbol: "BTCUSDT",
LongShortRatio: "0.5576",
ShortAccount: "0.6420",
LongAccount: "0.3580",
Timestamp: 1583139900000,
}
s.assertLongShortRatioEqual(longShortRatio1, longShortRatios[0])
s.assertLongShortRatioEqual(longShortRatio2, longShortRatios[1])
}
func (s *longShortRatioServiceTestSuite) assertLongShortRatioEqual(e, a *LongShortRatio) {
r := s.r()
r.Equal(e.Symbol, a.Symbol, "Symbol")
r.Equal(e.Timestamp, a.Timestamp, "Timestamp")
r.Equal(e.LongShortRatio, a.LongShortRatio, "LongShortRatio")
r.Equal(e.LongAccount, a.LongAccount, "LongAccount")
r.Equal(e.ShortAccount, a.ShortAccount, "ShortAccount")
}
|
package service_test
import (
"log"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/danikarik/handler/pkg/service"
)
func TestUrlHandlerRequest(t *testing.T) {
ts := httptest.NewServer(service.New())
defer ts.Close()
testCases := []struct {
Name string
Body string
StatusCode int
}{
{
Name: "OK",
Body: `["https://kaspi.kz"]`,
StatusCode: http.StatusOK,
},
{
Name: "Empty",
Body: `[]`,
StatusCode: http.StatusOK,
},
{
Name: "AboveLimit",
Body: `[
"https://google.com", "https://apple.com",
"https://google.com", "https://apple.com",
"https://google.com", "https://apple.com",
"https://google.com", "https://apple.com",
"https://google.com", "https://apple.com",
"https://google.com", "https://apple.com",
"https://google.com", "https://apple.com",
"https://google.com", "https://apple.com",
"https://google.com", "https://apple.com",
"https://google.com", "https://apple.com",
"https://google.com", "https://apple.com",
"https://google.com", "https://apple.com",
]`,
StatusCode: http.StatusBadRequest,
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
resp, err := ts.Client().Post(ts.URL, "application/json", strings.NewReader(tc.Body))
if err != nil {
log.Fatalf("got error: %v", err)
}
if resp.StatusCode != tc.StatusCode {
log.Fatalf("got: %v, expected: %v", resp.StatusCode, tc.StatusCode)
}
})
}
}
|
package PDA
import (
"fmt"
"testing"
)
func TestPDARulebook_NextConfig(t *testing.T) {
rulebook := PDARulebook{rules: []PDARule{{
state: 1,
character: '(',
nextState: 2,
popCharacter: '$',
pushCharacters: []int32{'b', '$'},
}, {
state: 2,
character: '(',
nextState: 2,
popCharacter: 'b',
pushCharacters: []int32{'b', 'b'},
}, {
state: 2,
character: ')',
nextState: 2,
popCharacter: 'b',
pushCharacters: []int32{},
}, {
state: 2,
character: -1,
nextState: 1,
popCharacter: '$',
pushCharacters: []int32{'$'},
}}}
configuration := PDAConfiguration{
state: 1,
stack: Stack{contents: []int32{'$'}},
}
configuration = rulebook.NextConfig(configuration, '(')
PrintConfig(configuration)
configuration = rulebook.NextConfig(configuration, '(')
PrintConfig(configuration)
configuration = rulebook.NextConfig(configuration, ')')
PrintConfig(configuration)
configuration = PDAConfiguration{2, Stack{contents: []int32{'$'}}}
configuration = rulebook.followFreeMoves(configuration)
PrintConfig(configuration)
}
func PrintConfig(configuration PDAConfiguration) {
fmt.Print(configuration.state, " ")
for _, v := range configuration.stack.contents {
fmt.Print(string(v))
}
fmt.Println()
}
|
package main
import (
"fmt"
"os"
)
func main() {
var num
var x
for x == 0{
fmt.Println("Insira um número:")
fmt.Fscanf(os.Stdin, "%d", &num)
if num % 2 == 0{
n=n+1
}
}
fmt.Println("Agora sim podemos dividir igualmente entre mim e você!")
}
|
package orm
import (
"video_server/api/defs"
"video_server/api/utils"
)
func AddNewComments(vid string, aid int, content string) error {
id, err := utils.NewUUID()
if err != nil {
return err
}
stmt, err := conn.Prepare("INSERT INTO comments (id, video_id, author_id, content) VALUES (?,?,?,?)")
if err != nil {
return err
}
_, err = stmt.Exec(id, vid, aid, content)
if err != nil {
return err
}
defer stmt.Close()
return nil
}
func GetCommentList(vid string, from, to int) (list []*defs.Comment, errs error) {
stmt, err := conn.Prepare("SELECT a.id,b.login_name,a.content FROM comments a INNER JOIN user b ON a.author_id = b.id WHERE a.video_id = ? AND a.create_time > FROM_UNIXTIME(?) AND a.create_time <= FROM_UNIXTIME(?); ")
var res []*defs.Comment
if err != nil {
return res, err
}
rows, err := stmt.Query(vid, from, to)
if err != nil {
return res, err
}
for rows.Next() {
var id, author, content string
if err := rows.Scan(&id, &author, &content); err != nil {
return res, err
}
comment := &defs.Comment{id, vid, author, content}
res = append(res, comment)
}
defer stmt.Close()
/*for i, obj := range res {
log.Printf("结果:%d, %v \n", i, obj)
//fmt.Printf("结果:%d, %v \n", i, obj)
}*/
return res, nil
}
|
package runner
import (
"net"
"github.com/go-stack/stack"
"github.com/karlmutch/errors"
)
// Functions related to networking needs for the runner
// GetFreePort will find and return a port number that is found to be available
//
func GetFreePort(hint string) (port int, err errors.Error) {
addr, errGo := net.ResolveTCPAddr("tcp", hint)
if errGo != nil {
return 0, errors.Wrap(errGo).With("stack", stack.Trace().TrimRuntime())
}
l, errGo := net.ListenTCP("tcp", addr)
if errGo != nil {
return 0, errors.Wrap(errGo).With("stack", stack.Trace().TrimRuntime())
}
port = l.Addr().(*net.TCPAddr).Port
// Dont defer as the port will be quickly reused
l.Close()
return port, nil
}
|
package requestBody
const (
TAG = "tag"
TAG_AND = "tag_and"
TAG_NOT = "tag_not"
ALIAS = "alias"
REGISTRATION_ID = "registration_id"
SEGMENT = "segment"
ABTEST = "abtest"
)
type Audience struct {
Object interface{}
audience map[string][]string
}
func (a *Audience) set(key string, v []string) {
if a.audience == nil {
a.audience = make(map[string][]string)
a.Object = a.audience
}
//v, ok = this.audience[key]
//if ok {
// return
//}
a.audience[key] = v
}
func (a *Audience) SetTag(tags []string) {
a.set(TAG, tags)
}
func (a *Audience) SetTagAnd(tags []string) {
a.set(TAG_AND, tags)
}
func (a *Audience) SetTagNot(tags []string) {
a.set(TAG_NOT, tags)
}
func (a *Audience) SetAlias(alias []string) {
a.set(ALIAS, alias)
}
func (a *Audience) SetRegistrationId(ids []string) {
a.set(REGISTRATION_ID, ids)
}
func (a *Audience) SetAll() {
a.Object = "all"
}
|
package weapp
import (
"fmt"
"io"
"net/http"
"net/http/httptest"
"os"
"path"
"testing"
)
func TestBankCardByURL(t *testing.T) {
server := http.NewServeMux()
server.HandleFunc(apiBankcard, func(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
t.Fatalf("Expect 'POST' get '%s'", r.Method)
}
path := r.URL.EscapedPath()
if path != apiBankcard {
t.Fatalf("Except to path '%s',get '%s'", apiBankcard, path)
}
if err := r.ParseForm(); err != nil {
t.Fatal(err)
}
queries := []string{"type", "access_token", "img_url"}
for _, v := range queries {
content := r.Form.Get(v)
if content == "" {
t.Fatalf("Params [%s] can not be empty", v)
}
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
raw := `{
"errcode": 0,
"errmsg": "ok",
"id": "622213XXXXXXXXX"
}`
if _, err := w.Write([]byte(raw)); err != nil {
t.Fatal(err)
}
})
server.HandleFunc("/mediaurl", func(w http.ResponseWriter, r *http.Request) {
filename := testIMGName
file, err := os.Open(filename)
if err != nil {
t.Fatal((err))
}
defer file.Close()
ext := path.Ext(filename)
ext = ext[1:len(ext)]
w.Header().Set("Content-Type", "image/"+ext)
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", path.Base(filename)))
w.WriteHeader(http.StatusOK)
if _, err := io.Copy(w, file); err != nil {
t.Fatal(err)
}
})
ts := httptest.NewServer(server)
defer ts.Close()
_, err := bankCardByURL(ts.URL+apiBankcard, "mock-access-token", ts.URL+"/mediaurl", RecognizeModePhoto)
if err != nil {
t.Fatal(err)
}
_, err = bankCardByURL(ts.URL+apiBankcard, "mock-access-token", ts.URL+"/mediaurl", RecognizeModeScan)
if err != nil {
t.Fatal(err)
}
}
func TestBankCard(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
t.Fatalf("Expect 'POST' get '%s'", r.Method)
}
path := r.URL.EscapedPath()
if path != apiBankcard {
t.Fatalf("Except to path '%s',get '%s'", apiBankcard, path)
}
if err := r.ParseForm(); err != nil {
t.Fatal(err)
}
queries := []string{"type", "access_token"}
for _, v := range queries {
content := r.Form.Get(v)
if content == "" {
t.Fatalf("Params [%s] can not be empty", v)
}
}
if _, _, err := r.FormFile("img"); err != nil {
t.Fatal(err)
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
raw := `{
"errcode": 0,
"errmsg": "ok",
"id": "622213XXXXXXXXX"
}`
if _, err := w.Write([]byte(raw)); err != nil {
t.Fatal(err)
}
}))
defer ts.Close()
_, err := bankCard(ts.URL+apiBankcard, "mock-access-token", testIMGName, RecognizeModePhoto)
if err != nil {
t.Fatal(err)
}
_, err = bankCard(ts.URL+apiBankcard, "mock-access-token", testIMGName, RecognizeModeScan)
if err != nil {
t.Fatal(err)
}
}
func TestDriverLicenseByURL(t *testing.T) {
server := http.NewServeMux()
server.HandleFunc(apiDrivingLicense, func(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
t.Fatalf("Expect 'POST' get '%s'", r.Method)
}
path := r.URL.EscapedPath()
if path != apiDrivingLicense {
t.Fatalf("Except to path '%s',get '%s'", apiDrivingLicense, path)
}
if err := r.ParseForm(); err != nil {
t.Fatal(err)
}
queries := []string{"access_token", "img_url"}
for _, v := range queries {
content := r.Form.Get(v)
if content == "" {
t.Fatalf("Params [%s] can not be empty", v)
}
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
raw := `{
"errcode": 0,
"errmsg": "ok",
"id_num": "660601xxxxxxxx1234",
"name": "张三",
"sex": "男",
"nationality": "中国",
"address": "广东省东莞市xxxxx号",
"birth_date": "1990-12-21",
"issue_date": "2012-12-21",
"car_class": "C1",
"valid_from": "2018-07-06",
"valid_to": "2020-07-01",
"official_seal": "xx市公安局公安交通管理局"
}`
if _, err := w.Write([]byte(raw)); err != nil {
t.Fatal(err)
}
})
server.HandleFunc("/mediaurl", func(w http.ResponseWriter, r *http.Request) {
filename := testIMGName
file, err := os.Open(filename)
if err != nil {
t.Fatal((err))
}
defer file.Close()
ext := path.Ext(filename)
ext = ext[1:len(ext)]
w.Header().Set("Content-Type", "image/"+ext)
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", path.Base(filename)))
w.WriteHeader(http.StatusOK)
if _, err := io.Copy(w, file); err != nil {
t.Fatal(err)
}
})
ts := httptest.NewServer(server)
defer ts.Close()
_, err := driverLicenseByURL(ts.URL+apiDrivingLicense, "mock-access-token", ts.URL+"/mediaurl")
if err != nil {
t.Fatal(err)
}
}
func TestDriverLicense(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
t.Fatalf("Expect 'POST' get '%s'", r.Method)
}
path := r.URL.EscapedPath()
if path != apiDrivingLicense {
t.Fatalf("Except to path '%s',get '%s'", apiDrivingLicense, path)
}
if err := r.ParseForm(); err != nil {
t.Fatal(err)
}
if r.Form.Get("access_token") == "" {
t.Fatalf("access_token can not be empty")
}
if _, _, err := r.FormFile("img"); err != nil {
t.Fatal(err)
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
raw := `{
"errcode": 0,
"errmsg": "ok",
"id_num": "660601xxxxxxxx1234",
"name": "张三",
"sex": "男",
"nationality": "中国",
"address": "广东省东莞市xxxxx号",
"birth_date": "1990-12-21",
"issue_date": "2012-12-21",
"car_class": "C1",
"valid_from": "2018-07-06",
"valid_to": "2020-07-01",
"official_seal": "xx市公安局公安交通管理局"
}`
if _, err := w.Write([]byte(raw)); err != nil {
t.Fatal(err)
}
}))
defer ts.Close()
_, err := driverLicense(ts.URL+apiDrivingLicense, "mock-access-token", testIMGName)
if err != nil {
t.Fatal(err)
}
}
func TestBusinessLicenseByURL(t *testing.T) {
server := http.NewServeMux()
server.HandleFunc("/cv/ocr/bizlicense", func(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
t.Fatalf("Expect 'POST' get '%s'", r.Method)
}
if r.URL.EscapedPath() != "/cv/ocr/bizlicense" {
t.Error("Invalid request path")
}
if err := r.ParseForm(); err != nil {
t.Fatal(err)
}
queries := []string{"access_token", "img_url"}
for _, v := range queries {
content := r.Form.Get(v)
if content == "" {
t.Fatalf("Params [%s] can not be empty", v)
}
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
raw := `{
"errcode": 0,
"errmsg": "ok",
"reg_num": "123123",
"serial": "123123",
"legal_representative": "张三",
"enterprise_name": "XX饮食店",
"type_of_organization": "个人经营",
"address": "XX市XX区XX路XX号",
"type_of_enterprise": "xxx",
"business_scope": "中型餐馆(不含凉菜、不含裱花蛋糕,不含生食海产品)。",
"registered_capital": "200万",
"paid_in_capital": "200万",
"valid_period": "2019年1月1日",
"registered_date": "2018年1月1日",
"cert_position": {
"pos": {
"left_top": {
"x": 155,
"y": 191
},
"right_top": {
"x": 725,
"y": 157
},
"right_bottom": {
"x": 743,
"y": 512
},
"left_bottom": {
"x": 164,
"y": 525
}
}
},
"img_size": {
"w": 966,
"h": 728
}
}`
if _, err := w.Write([]byte(raw)); err != nil {
t.Fatal(err)
}
})
server.HandleFunc("/mediaurl", func(w http.ResponseWriter, r *http.Request) {
filename := testIMGName
file, err := os.Open(filename)
if err != nil {
t.Fatal((err))
}
defer file.Close()
ext := path.Ext(filename)
ext = ext[1:len(ext)]
w.Header().Set("Content-Type", "image/"+ext)
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", path.Base(filename)))
w.WriteHeader(http.StatusOK)
if _, err := io.Copy(w, file); err != nil {
t.Fatal(err)
}
})
ts := httptest.NewServer(server)
defer ts.Close()
_, err := businessLicenseByURL(ts.URL+apiBusinessLicense, "mock-access-token", ts.URL+"/mediaurl")
if err != nil {
t.Fatal(err)
}
}
func TestBusinessLicense(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
t.Fatalf("Expect 'POST' get '%s'", r.Method)
}
if r.URL.EscapedPath() != "/cv/ocr/bizlicense" {
t.Error("Invalid request path")
}
if err := r.ParseForm(); err != nil {
t.Fatal(err)
}
if r.Form.Get("access_token") == "" {
t.Fatalf("access_token can not be empty")
}
if _, _, err := r.FormFile("img"); err != nil {
t.Fatal(err)
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
raw := `{
"errcode": 0,
"errmsg": "ok",
"reg_num": "123123",
"serial": "123123",
"legal_representative": "张三",
"enterprise_name": "XX饮食店",
"type_of_organization": "个人经营",
"address": "XX市XX区XX路XX号",
"type_of_enterprise": "xxx",
"business_scope": "中型餐馆(不含凉菜、不含裱花蛋糕,不含生食海产品)。",
"registered_capital": "200万",
"paid_in_capital": "200万",
"valid_period": "2019年1月1日",
"registered_date": "2018年1月1日",
"cert_position": {
"pos": {
"left_top": {
"x": 155,
"y": 191
},
"right_top": {
"x": 725,
"y": 157
},
"right_bottom": {
"x": 743,
"y": 512
},
"left_bottom": {
"x": 164,
"y": 525
}
}
},
"img_size": {
"w": 966,
"h": 728
}
}`
if _, err := w.Write([]byte(raw)); err != nil {
t.Fatal(err)
}
}))
defer ts.Close()
_, err := businessLicense(ts.URL+apiBusinessLicense, "mock-access-token", testIMGName)
if err != nil {
t.Fatal(err)
}
}
func TestPrintedTextByURL(t *testing.T) {
server := http.NewServeMux()
server.HandleFunc("/cv/ocr/comm", func(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
t.Fatalf("Expect 'POST' get '%s'", r.Method)
}
if r.URL.EscapedPath() != "/cv/ocr/comm" {
t.Error("Invalid request path")
}
if err := r.ParseForm(); err != nil {
t.Fatal(err)
}
queries := []string{"access_token", "img_url"}
for _, v := range queries {
content := r.Form.Get(v)
if content == "" {
t.Fatalf("Params [%s] can not be empty", v)
}
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
raw := `{
"errcode": 0,
"errmsg": "ok",
"items": [
{
"text": "腾讯",
"pos": {
"left_top": {
"x": 575,
"y": 519
},
"right_top": {
"x": 744,
"y": 519
},
"right_bottom": {
"x": 744,
"y": 532
},
"left_bottom": {
"x": 573,
"y": 532
}
}
},
{
"text": "微信团队",
"pos": {
"left_top": {
"x": 670,
"y": 516
},
"right_top": {
"x": 762,
"y": 517
},
"right_bottom": {
"x": 762,
"y": 532
},
"left_bottom": {
"x": 670,
"y": 531
}
}
}
],
"img_size": {
"w": 1280,
"h": 720
}
}`
if _, err := w.Write([]byte(raw)); err != nil {
t.Fatal(err)
}
})
server.HandleFunc("/mediaurl", func(w http.ResponseWriter, r *http.Request) {
filename := testIMGName
file, err := os.Open(filename)
if err != nil {
t.Fatal((err))
}
defer file.Close()
ext := path.Ext(filename)
ext = ext[1:len(ext)]
w.Header().Set("Content-Type", "image/"+ext)
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", path.Base(filename)))
w.WriteHeader(http.StatusOK)
if _, err := io.Copy(w, file); err != nil {
t.Fatal(err)
}
})
ts := httptest.NewServer(server)
defer ts.Close()
_, err := printedTextByURL(ts.URL+apiPrintedText, "mock-access-token", ts.URL+"/mediaurl")
if err != nil {
t.Fatal(err)
}
}
func TestPrintedText(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
t.Fatalf("Expect 'POST' get '%s'", r.Method)
}
if r.URL.EscapedPath() != "/cv/ocr/comm" {
t.Error("Invalid request path")
}
if err := r.ParseForm(); err != nil {
t.Fatal(err)
}
if r.Form.Get("access_token") == "" {
t.Fatalf("access_token can not be empty")
}
if _, _, err := r.FormFile("img"); err != nil {
t.Fatal(err)
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
raw := `{
"errcode": 0,
"errmsg": "ok",
"items": [
{
"text": "腾讯",
"pos": {
"left_top": {
"x": 575,
"y": 519
},
"right_top": {
"x": 744,
"y": 519
},
"right_bottom": {
"x": 744,
"y": 532
},
"left_bottom": {
"x": 573,
"y": 532
}
}
},
{
"text": "微信团队",
"pos": {
"left_top": {
"x": 670,
"y": 516
},
"right_top": {
"x": 762,
"y": 517
},
"right_bottom": {
"x": 762,
"y": 532
},
"left_bottom": {
"x": 670,
"y": 531
}
}
}
],
"img_size": {
"w": 1280,
"h": 720
}
}`
if _, err := w.Write([]byte(raw)); err != nil {
t.Fatal(err)
}
}))
defer ts.Close()
_, err := printedText(ts.URL+apiPrintedText, "mock-access-token", testIMGName)
if err != nil {
t.Fatal(err)
}
}
func TestIDCardByURL(t *testing.T) {
server := http.NewServeMux()
server.HandleFunc(apiIDCard, func(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
t.Fatalf("Expect 'POST' get '%s'", r.Method)
}
path := r.URL.EscapedPath()
if path != apiIDCard {
t.Fatalf("Except to path '%s',get '%s'", apiIDCard, path)
}
if err := r.ParseForm(); err != nil {
t.Fatal(err)
}
queries := []string{"type", "access_token", "img_url"}
for _, v := range queries {
content := r.Form.Get(v)
if content == "" {
t.Fatalf("Params [%s] can not be empty", v)
}
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
raw := `{
"errcode": 0,
"errmsg": "ok",
"type": "Front",
"id": "44XXXXXXXXXXXXXXX1"
}`
if _, err := w.Write([]byte(raw)); err != nil {
t.Fatal(err)
}
})
server.HandleFunc("/mediaurl", func(w http.ResponseWriter, r *http.Request) {
filename := testIMGName
file, err := os.Open(filename)
if err != nil {
t.Fatal((err))
}
defer file.Close()
ext := path.Ext(filename)
ext = ext[1:len(ext)]
w.Header().Set("Content-Type", "image/"+ext)
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", path.Base(filename)))
w.WriteHeader(http.StatusOK)
if _, err := io.Copy(w, file); err != nil {
t.Fatal(err)
}
})
ts := httptest.NewServer(server)
defer ts.Close()
_, err := idCardByURL(ts.URL+apiIDCard, "mock-access-token", ts.URL+"/mediaurl", RecognizeModePhoto)
if err != nil {
t.Fatal(err)
}
_, err = idCardByURL(ts.URL+apiIDCard, "mock-access-token", ts.URL+"/mediaurl", RecognizeModeScan)
if err != nil {
t.Fatal(err)
}
}
func TestIDCard(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
t.Fatalf("Expect 'POST' get '%s'", r.Method)
}
path := r.URL.EscapedPath()
if path != apiIDCard {
t.Fatalf("Except to path '%s',get '%s'", apiIDCard, path)
}
if err := r.ParseForm(); err != nil {
t.Fatal(err)
}
queries := []string{"type", "access_token"}
for _, v := range queries {
content := r.Form.Get(v)
if content == "" {
t.Fatalf("Params [%s] can not be empty", v)
}
}
if _, _, err := r.FormFile("img"); err != nil {
t.Fatal(err)
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
raw := `{
"errcode": 0,
"errmsg": "ok",
"type": "Front",
"id": "44XXXXXXXXXXXXXXX1"
}`
if _, err := w.Write([]byte(raw)); err != nil {
t.Fatal(err)
}
}))
defer ts.Close()
_, err := idCard(ts.URL+apiIDCard, "mock-access-token", testIMGName, RecognizeModePhoto)
if err != nil {
t.Fatal(err)
}
_, err = idCard(ts.URL+apiIDCard, "mock-access-token", testIMGName, RecognizeModeScan)
if err != nil {
t.Fatal(err)
}
}
func TestVehicleLicenseByURL(t *testing.T) {
server := http.NewServeMux()
server.HandleFunc(apiVehicleLicense, func(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
t.Fatalf("Expect 'POST' get '%s'", r.Method)
}
path := r.URL.EscapedPath()
if path != apiVehicleLicense {
t.Fatalf("Except to path '%s',get '%s'", apiVehicleLicense, path)
}
if err := r.ParseForm(); err != nil {
t.Fatal(err)
}
queries := []string{"type", "access_token", "img_url"}
for _, v := range queries {
content := r.Form.Get(v)
if content == "" {
t.Fatalf("Params [%s] can not be empty", v)
}
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
raw := `{
"vhicle_type": "小型普通客⻋",
"owner": "东莞市xxxxx机械厂",
"addr": "广东省东莞市xxxxx号",
"use_character": "非营运",
"model": "江淮牌HFCxxxxxxx",
"vin": "LJ166xxxxxxxx51",
"engine_num": "J3xxxxx3",
"register_date": "2018-07-06",
"issue_date": "2018-07-01",
"plate_num_b": "粤xxxxx",
"record": "441xxxxxx3",
"passengers_num": "7人",
"total_quality": "2700kg",
"prepare_quality": "1995kg"
}`
if _, err := w.Write([]byte(raw)); err != nil {
t.Fatal(err)
}
})
server.HandleFunc("/mediaurl", func(w http.ResponseWriter, r *http.Request) {
filename := testIMGName
file, err := os.Open(filename)
if err != nil {
t.Fatal((err))
}
defer file.Close()
ext := path.Ext(filename)
ext = ext[1:len(ext)]
w.Header().Set("Content-Type", "image/"+ext)
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", path.Base(filename)))
w.WriteHeader(http.StatusOK)
if _, err := io.Copy(w, file); err != nil {
t.Fatal(err)
}
})
ts := httptest.NewServer(server)
defer ts.Close()
_, err := vehicleLicenseByURL(ts.URL+apiVehicleLicense, "mock-access-token", ts.URL+"/mediaurl", RecognizeModePhoto)
if err != nil {
t.Fatal(err)
}
_, err = vehicleLicenseByURL(ts.URL+apiVehicleLicense, "mock-access-token", ts.URL+"/mediaurl", RecognizeModeScan)
if err != nil {
t.Fatal(err)
}
}
func TestVehicleLicense(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
t.Fatalf("Expect 'POST' get '%s'", r.Method)
}
path := r.URL.EscapedPath()
if path != apiVehicleLicense {
t.Fatalf("Except to path '%s',get '%s'", apiVehicleLicense, path)
}
if err := r.ParseForm(); err != nil {
t.Fatal(err)
}
queries := []string{"type", "access_token"}
for _, v := range queries {
content := r.Form.Get(v)
if content == "" {
t.Fatalf("Params [%s] can not be empty", v)
}
}
if _, _, err := r.FormFile("img"); err != nil {
t.Fatal(err)
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
raw := `{
"vhicle_type": "小型普通客⻋",
"owner": "东莞市xxxxx机械厂",
"addr": "广东省东莞市xxxxx号",
"use_character": "非营运",
"model": "江淮牌HFCxxxxxxx",
"vin": "LJ166xxxxxxxx51",
"engine_num": "J3xxxxx3",
"register_date": "2018-07-06",
"issue_date": "2018-07-01",
"plate_num_b": "粤xxxxx",
"record": "441xxxxxx3",
"passengers_num": "7人",
"total_quality": "2700kg",
"prepare_quality": "1995kg"
}`
if _, err := w.Write([]byte(raw)); err != nil {
t.Fatal(err)
}
}))
defer ts.Close()
_, err := vehicleLicense(ts.URL+apiVehicleLicense, "mock-access-token", testIMGName, RecognizeModePhoto)
if err != nil {
t.Fatal(err)
}
_, err = vehicleLicense(ts.URL+apiVehicleLicense, "mock-access-token", testIMGName, RecognizeModeScan)
if err != nil {
t.Fatal(err)
}
}
|
package models
import (
"fmt"
"strings"
"testing"
"bytes"
"github.com/ONSdigital/dp-map-renderer/testdata"
. "github.com/smartystreets/goconvey/convey"
)
// A Mock io.reader to trigger errors on reading
type reader struct {
}
func (f reader) Read(bytes []byte) (int, error) {
return 0, fmt.Errorf("Reader failed")
}
func TestCreateRenderRequestFromFile(t *testing.T) {
Convey("When a render request is passed, a valid struct is returned", t, func() {
reader := bytes.NewReader(testdata.LoadExampleRequest(t))
request, err := CreateRenderRequest(reader)
So(err, ShouldBeNil)
So(request.ValidateRenderRequest(), ShouldBeNil)
So(request.Title, ShouldEqual, "Non-UK born population, Great Britain, 2015")
So(request.Filename, ShouldEqual, "abcd1234")
})
}
func TestCreateRenderRequestWithNoBody(t *testing.T) {
Convey("When a render request has no body, an error is returned", t, func() {
_, err := CreateRenderRequest(reader{})
So(err, ShouldNotBeNil)
So(err, ShouldEqual, ErrorReadingBody)
})
Convey("When a render request has an empty body, an error is returned", t, func() {
filter, err := CreateRenderRequest(strings.NewReader("{}"))
So(err, ShouldNotBeNil)
So(err, ShouldResemble, ErrorNoData)
So(filter, ShouldNotBeNil)
})
}
func TestCreateRenderRequestWithInvalidJSON(t *testing.T) {
Convey("When a render request contains json with an invalid syntax, and error is returned", t, func() {
_, err := CreateRenderRequest(strings.NewReader(`{"foo`))
So(err, ShouldNotBeNil)
So(err.Error(), ShouldContainSubstring, "unexpected end of")
})
}
func TestValidateRenderRequestRejectsMissingFields(t *testing.T) {
Convey("When a Render request has missing fields, an error is returned", t, func() {
request := RenderRequest{}
err := request.ValidateRenderRequest()
So(err, ShouldNotBeNil)
So(err.Error(), ShouldContainSubstring, "Missing mandatory field(s)")
So(err.Error(), ShouldContainSubstring, "geography")
So(err.Error(), ShouldContainSubstring, "data")
})
Convey("When a Render request has missing geography fields, an error is returned", t, func() {
reader := bytes.NewReader(testdata.LoadExampleRequest(t))
request, _ := CreateRenderRequest(reader)
request.Geography.Topojson = nil
request.Geography.IDProperty = ""
err := request.ValidateRenderRequest()
So(err, ShouldNotBeNil)
So(err.Error(), ShouldContainSubstring, "Missing mandatory field(s)")
So(err.Error(), ShouldContainSubstring, "geography.topojson")
So(err.Error(), ShouldContainSubstring, "geography.id_property")
})
}
func TestCreateAnalyseRequestFromFile(t *testing.T) {
Convey("When an analyse request is passed, a valid struct is returned", t, func() {
reader := bytes.NewReader(testdata.LoadExampleAnalyseRequest(t))
request, err := CreateAnalyseRequest(reader)
So(err, ShouldBeNil)
So(request.ValidateAnalyseRequest(), ShouldBeNil)
So(request.Geography, ShouldNotBeNil)
So(len(request.CSV), ShouldBeGreaterThan, 0)
})
}
func TestCreateAnalyseRequestWithNoBody(t *testing.T) {
Convey("When an analyse request has no body, an error is returned", t, func() {
_, err := CreateAnalyseRequest(reader{})
So(err, ShouldNotBeNil)
So(err, ShouldEqual, ErrorReadingBody)
})
Convey("When an analyse request has an empty body, an error is returned", t, func() {
filter, err := CreateAnalyseRequest(strings.NewReader("{}"))
So(err, ShouldNotBeNil)
So(err, ShouldResemble, ErrorNoData)
So(filter, ShouldNotBeNil)
})
}
func TestCreateAnalyseRequestWithInvalidJSON(t *testing.T) {
Convey("When an analyse request contains json with an invalid syntax, an error is returned", t, func() {
_, err := CreateAnalyseRequest(strings.NewReader(`{"foo`))
So(err, ShouldNotBeNil)
So(err.Error(), ShouldResemble, "unexpected end of JSON input")
})
}
func TestValidateAnalyseRequestRejectsMissingFields(t *testing.T) {
Convey("When an analyse request has missing fields, an error is returned", t, func() {
request := AnalyseRequest{}
err := request.ValidateAnalyseRequest()
So(err, ShouldNotBeNil)
So(err.Error(), ShouldContainSubstring, "Missing mandatory field(s)")
So(err.Error(), ShouldContainSubstring, "geography")
So(err.Error(), ShouldContainSubstring, "csv")
})
Convey("When an analyse request has missing geography fields, an error is returned", t, func() {
request := AnalyseRequest{Geography: &Geography{}, CSV: "foo,bar"}
err := request.ValidateAnalyseRequest()
So(err, ShouldNotBeNil)
So(err.Error(), ShouldContainSubstring, "Missing mandatory field(s)")
So(err.Error(), ShouldNotContainSubstring, "csv")
So(err.Error(), ShouldContainSubstring, "geography.topojson")
So(err.Error(), ShouldContainSubstring, "geography.id_property")
})
}
func TestValidateAnalyseRequestRejectsInvalidValues(t *testing.T) {
Convey("When an analyse request has indexes below zero, an error is returned", t, func() {
reader := bytes.NewReader(testdata.LoadExampleAnalyseRequest(t))
request, _ := CreateAnalyseRequest(reader)
request.ValueIndex = -1
request.IDIndex = -2
err := request.ValidateAnalyseRequest()
So(err, ShouldNotBeNil)
So(err.Error(), ShouldContainSubstring, "value_index")
So(err.Error(), ShouldContainSubstring, "id_index")
})
Convey("When an analyse request has the same value for value and id indexes, an error is returned", t, func() {
reader := bytes.NewReader(testdata.LoadExampleAnalyseRequest(t))
request, _ := CreateAnalyseRequest(reader)
request.ValueIndex = 0
request.IDIndex = 0
err := request.ValidateAnalyseRequest()
So(err, ShouldNotBeNil)
So(err.Error(), ShouldContainSubstring, "id_index and value_index cannot refer to the same column")
})
}
|
package main
import "fmt"
import "os"
import "math"
import "math/rand"
import "strconv"
func randTheta() float64{
return (rand.Float64() )
}
func randomwalk(w int,h float64,d float64,n float64){
var c = float64(w)
var a,b = c/2,h/2
var x,y float64
x=float64(a)
y=float64(b)
for i := 0.0; i < n; i++ {
var dx, dy float64
dx = d*math.Cos(randTheta()*2*math.Pi)
for x+dx < 0 || x+dx >= c{
dx = d*math.Cos(randTheta()*2*math.Pi)
}
x=x+dx
dy = d*math.Sin(randTheta()*2*math.Pi)
for y+dy < 0 || y+dy >= h {
dy = d*math.Sin(randTheta()*2*math.Pi)
}
y=y+dy
fmt.Println(x,y)
}
var distance float64 = (math.Sqrt(math.Pow((x-c),2)+math.Pow((y-h),2)))
fmt.Println("distance=",distance)
}
func main() {
var w,_=strconv.Atoi(os.Args[1])
var h,_=strconv.ParseFloat((os.Args[2]),64)
var d,_=strconv.ParseFloat((os.Args[3]),64)
var n,_=strconv.ParseFloat((os.Args[4]),64)
var s,_=strconv.ParseInt((os.Args[5]),10,64)
if (w <= 0 || h <= 0 || d <=0 || n <=0 || s <= 0) {
fmt.Println("error:argument is zero or negative")
os.Exit(0)
}
// fmt.Println(h)
rand.Seed(s)
randomwalk(w,h,d,n)
}
|
/*
* @lc app=leetcode.cn id=85 lang=golang
*
* [85] 最大矩形
*/
package main
import (
"fmt"
)
// @lc code=start
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
/*
将每一列高度统计,将参数传递给84题
func largestRectangleArea(heights []int) int {
var maxArea, left, right, cur, curHeight, width int
stack := []int{}
for i, v := range heights {
for len(stack) > 0 && stack[len(stack)-1] < v {
cur = stack[len(stack)-1]
stack = stack[:len(stack)-1]
right = i
left = stack[len(stack)-1]
width = right - left - 1
curHeight = heights[cur]
maxArea = max(maxArea, curHeight*width)
}
stack = append(stack, i)
}
return maxArea
}
func maximalRectangle1(martix [][]byte) int {
heights := make([]int, len(martix))
var maxArea int
for i := range martix {
for j, col := range martix[i] {
if col == '1' {
heights[j] += 1
}
}
maxArea = max(maxArea, largestRectangleArea(heights))
}
return maxArea
} */
func maximalRectangle(matrix [][]byte) int {
var ans int
if len(matrix) == 0 {
return 0
}
rows, cols := len(matrix), len(matrix[0])
// 获取当前行当前列左边最多连续的1
left := make([][]int, rows)
for i, row := range matrix {
left[i] = make([]int, cols)
for j, col := range row {
if col == '0' {
continue
}
if j == 0 {
left[i][j] = 1
} else {
left[i][j] = left[i][j-1] + 1
}
width := left[i][j]
area := width
// 向上累加 如果当前行列没有1直接跳出
for k := i - 1; k >= 0; k-- {
if left[k][j] == 0 {
break
}
width = min(width, left[k][j])
area = max(area, (i-k+1)*width)
}
ans = max(ans, area)
}
}
return ans
}
// @lc code=end
func main() {
fmt.Println(maximalRectangle([][]byte{{'0'}}))
}
|
/**
* Problem from leetcode.com
*
* Balanced Binary Tree
*
* Given a binary tree, determine if it is height-balanced.
*
* For this problem, a height-balanced binary tree is defined as a binary tree
* in which the depth of the two subtrees of every node never differ by more than 1.
*
*/
/**
* Definition for a binary tree node.
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
/**
* Naive approach: Top down approach, traversing the tree BFS, O(N^2)
*/
func depthBFS( root *TreeNode ) float64 {
// Fail fast: a tree with no nodes has no length
if( root == nil ) { return 0 }
return math.Max( depth(root.Left), depth(root.Right) ) + 1
}
func isBalancedNaive(root *TreeNode) bool {
// Fail fast: an empty tree is balanced
if( root == nil ) { return true }
// A tree is balanced if both left and right subtrees are balanced and are equally deep with an error of 1 unit
return isBalanced( root.Left ) && isBalanced( root.Right ) && math.Abs(depth(root.Left) - depth(root.Right)) <= 1
}
/**
* Smart approach: Bottom up approach, traversing the tree DFS, O(N)
*/
func depthDFS( root *TreeNode ) float64 {
if( root == nil ) { return 0; }
left := depth( root.Left )
if( left == -1 ) { return -1 }
right := depth( root.Right )
if( right == -1 ) { return -1 }
if ( math.Abs( left - right ) > 1 ) { return -1 }
return math.Max(left, right) + 1
}
func isBalancedSmart(root *TreeNode) bool { return depth( root ) != -1 }
|
package store
import (
"github.com/aws/aws-sdk-go/service/s3"
"github.com/tobyjsullivan/event-store.v3/events"
"github.com/aws/aws-sdk-go/aws"
"encoding/base64"
"bytes"
"encoding/json"
)
type Store struct {
s3svc *s3.S3
bucket string
}
func NewS3Store(svc *s3.S3, bucket string) *Store {
return &Store{
s3svc: svc,
bucket: bucket,
}
}
type eventFormat struct {
Prev string `json:"previous"`
Type string `json:"type"`
Data string `json:"data"`
}
func (s *Store) Save(e *events.Event) error {
id := e.ID()
key := id.String()
content := &eventFormat{
Prev: e.PreviousEvent.String(),
Type: e.Type,
Data: base64.StdEncoding.EncodeToString(e.Data),
}
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
err := encoder.Encode(content)
if err != nil {
return err
}
_, err = s.s3svc.PutObject(&s3.PutObjectInput{
Body: bytes.NewReader(buf.Bytes()),
Bucket: aws.String(s.bucket),
Key: aws.String(key),
})
if err != nil {
return err
}
return nil
}
|
package main
import (
"encoding/json"
"fmt"
)
type person struct {
Name string `json:"name"`
Age int `json:"age"`
}
func main() {
p1 := person{
Name: "周林",
Age: 9000,
}
//序列化
b, _ := json.Marshal(p1)
fmt.Println(string(b))
//反序列化
str := `{"name":"理想","age":18}`
var p2 person //如果是p2是值拷贝
json.Unmarshal([]byte(str), &p2) //传指针是为了能在json.Unmarshal内部修改P2的值
fmt.Println(p2)
var a = struct {
x int
y int
}{10, 20}
fmt.Println(a)
a1 := 100
b1 := &a1
fmt.Println(a1, b1)
*(b1) = 20
fmt.Println(a1)
}
|
package crawlers
type InfoarenaCrawler struct {
}
|
package utils
import (
"errors"
"net"
"net/http"
"strconv"
"time"
)
var (
NotStartedErr = errors.New("not started")
)
type Service interface {
Router() http.Handler
}
type Options struct {
ReadTimeout time.Duration
WriteTimeout time.Duration
}
var defaultOpts = &Options{
ReadTimeout: time.Second,
WriteTimeout: time.Second,
}
type Server interface {
// StartOnPort starts the server on the specified port, creating and managing a listener for it
StartOnPort(int) error
// StartAnyhwere starts the server on _any_ available port, and returns the port to the caller
StartAnywhere() (int, error)
// StartListener accepts a listener and starts the service running on it
StartListener(net.Listener) error
// GetPort returns the port that this server is listening on
GetPort() (int, error)
// Stop closes the connections and rejects connections immediately
Stop() error
}
func NewDefaultServer(service Service) Server {
return &server{
opts: defaultOpts,
service: service,
doneCh: make(chan error, 1),
}
}
func NewServer(service Service, opts *Options) Server {
return &server{
opts: opts,
service: service,
}
}
type server struct {
opts *Options
service Service
listener net.Listener
started bool
doneCh chan error
}
func (s *server) StartOnPort(port int) error {
listener, err := net.Listen("tcp", ":"+strconv.Itoa(port))
if err != nil {
return err
}
return s.StartListener(listener)
}
func (s *server) StartAnywhere() (int, error) {
listener, err := net.Listen("tcp", ":0")
if err != nil {
return 0, err
}
port, err := GetListenerPort(listener)
if err != nil {
listener.Close()
return 0, err
}
if err := s.StartListener(listener); err != nil {
return 0, err
}
return port, nil
}
func (s *server) StartListener(listener net.Listener) error {
s.listener = listener
go func() {
s.doneCh <- s.run()
}()
s.started = true
return nil
}
// Stop stops the currently listening server if it has been started
func (s *server) Stop() error {
if !s.started {
return NotStartedErr
}
s.started = false
// close the listener and then wait for any returned errors from the
// server; returning the listener error if it occurs
err := s.listener.Close()
if e := <-s.doneCh; e != nil {
return e
}
return err
}
func (s *server) run() error {
// http.Server accepts a *net.TCPListener; cast the Listener to the
// explicit type to create an http.Server listener.
if _, ok := s.listener.(*net.TCPListener); !ok {
return errors.New("programming error; invalid listener")
}
server := &http.Server{
Handler: s.service.Router(),
}
return server.Serve(s.listener)
}
func (s *server) GetPort() (int, error) {
return GetListenerPort(s.listener)
}
|
package main
//思路:push时,先将x加入q2,然后将q1元素依次加入q2,再把q2元素一次加入q1
//pop,top直接取q1即可
type MyStack struct {
q1 []int
q2 []int
}
/** Initialize your data structure here. */
func Constructor() MyStack {
mystack := new(MyStack)
mystack.q1 = make([]int, 0)
mystack.q2 = make([]int, 0)
return *mystack
}
/** Push element x onto stack. */
func (this *MyStack) Push(x int) {
this.q2 = append(this.q2, x)
for i := 0; i < len(this.q1); i++ {
this.q2 = append(this.q2, this.q1[i])
}
this.q1 = this.q1[:0]
for i := 0; i < len(this.q2); i++ {
this.q1 = append(this.q1, this.q2[i])
}
this.q2 = this.q2[:0]
}
/** Removes the element on top of the stack and returns that element. */
func (this *MyStack) Pop() int {
val := this.q1[0]
this.q1 = this.q1[1:]
return val
}
/** Get the top element. */
func (this *MyStack) Top() int {
return this.q1[0]
}
/** Returns whether the stack is empty. */
func (this *MyStack) Empty() bool {
return len(this.q1) == 0
}
|
package server
import "server/libs/log"
type Scheduler interface {
SetSchedulerID(id int32)
GetSchedulerID() int32
OnUpdate()
}
type SchedulerBase struct {
id int32
}
func (sb *SchedulerBase) SetSchedulerID(id int32) {
sb.id = id
}
func (sb *SchedulerBase) GetSchedulerID() int32 {
return sb.id
}
func (k *Kernel) AddScheduler(s Scheduler) {
if s == nil {
return
}
k.schedulerid++
s.SetSchedulerID(k.schedulerid)
k.scheduler[k.schedulerid] = s
log.LogDebug("add scheduler:", k.schedulerid)
}
func (k *Kernel) GetScheduler(id int32) Scheduler {
if s, exist := k.scheduler[id]; exist {
return s
}
return nil
}
func (k *Kernel) RemoveScheduler(s Scheduler) {
if s == nil {
return
}
if _, exist := k.scheduler[s.GetSchedulerID()]; exist {
delete(k.scheduler, s.GetSchedulerID())
log.LogDebug("remove scheduler:", s.GetSchedulerID(), " total:", len(k.scheduler))
s.SetSchedulerID(-1)
}
}
func (k *Kernel) RemoveSchedulerById(id int32) {
if _, exist := k.scheduler[id]; exist {
delete(k.scheduler, id)
log.LogDebug("remove scheduler:", id, " total:", len(k.scheduler))
}
}
func (k *Kernel) OnUpdate() {
//更新调度器
for _, s := range k.scheduler {
s.OnUpdate()
}
}
|
package timingwheel
import (
"context"
"sync"
"sync/atomic"
"time"
"unsafe"
)
// WithTimeout returns a context value with time.Now().Add(timeout) as deadline.
//
// If the provided timeout is greater than 80 seconds, or the parent context
// already has deadline setup, this function simply calls
// `context.WithDeadline(parent, time.Now().Add(timeout))`.
//
// Otherwise, it uses a shared timing wheel in underlying to achieve high
// performance when managing massive contexts with deadlines. The timing
// wheel's tick interval is 10 milliseconds, thus the precision of the
// timeout is roughly ±10 milliseconds. When shared timing wheel
// implementation is used, the returned context does not support canceling,
// and does not propagate.
//
// NOTE: this context implementation is not intended to replace usage of the
// standard context package. Generally, the standard context package is
// preferred over this in most use cases, only use this where you are managing
// truly massive contexts, and super high performance really outweighted
// functionality and compatibility provided by the standard context package.
func WithTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) {
if timeout > ctxMaxTimeout {
if parent == nil {
parent = context.Background()
}
return context.WithDeadline(parent, time.Now().Add(timeout))
}
// The context may already has deadline setup.
if parent != nil {
if _, ok := parent.Deadline(); ok {
return context.WithDeadline(parent, time.Now().Add(timeout))
}
}
// lazy initialization
initCtxWheel()
deadline := time.Now().Add(timeout)
done := ctxWheel.getOrCreateTimeoutChan(timeout)
ctx := &timeoutCtx{
parent: parent,
deadline: deadline,
done: done,
}
return ctx, emptyCancel
}
func initCtxWheel() {
ctxOnce.Do(func() {
ctxWheel = &contextWheel{}
go ctxWheel.run()
})
}
const (
ctxWheelSize = 8192 // 81.92 seconds
ctxWheelMask = ctxWheelSize - 1
ctxTickInterval = 10 * time.Millisecond
ctxMaxTimeout = 80 * time.Second
)
var (
ctxOnce sync.Once
ctxWheel *contextWheel
emptyCancel = func() {}
)
type contextWheel struct {
jiffies int64
buckets [ctxWheelSize]unsafe.Pointer
}
func (p *contextWheel) run() {
var jiffies int64
var ticker = time.NewTicker(ctxTickInterval)
for range ticker.C {
idx := jiffies & ctxWheelMask
chptr := atomic.SwapPointer(&p.buckets[idx], nil)
if chptr != nil {
done := *(*chan struct{})(chptr)
close(done)
}
jiffies += 1
atomic.StoreInt64(&p.jiffies, jiffies)
}
}
func (p *contextWheel) getOrCreateTimeoutChan(timeout time.Duration) <-chan struct{} {
expires := int64(timeout / ctxTickInterval)
if expires > 0 {
expires -= 1
}
for {
jiffies := atomic.LoadInt64(&p.jiffies)
idx := (jiffies + expires) & ctxWheelMask
ch := atomic.LoadPointer(&p.buckets[idx])
if ch != nil {
return *(*chan struct{})(ch)
}
newCh := make(chan struct{})
swapped := atomic.CompareAndSwapPointer(&p.buckets[idx], nil, unsafe.Pointer(&newCh))
if swapped {
return newCh
}
}
}
// timeoutCtx implements the context.Context interface.
type timeoutCtx struct {
parent context.Context
deadline time.Time
done <-chan struct{}
}
func (c *timeoutCtx) Deadline() (deadline time.Time, ok bool) {
return c.deadline, true
}
func (c *timeoutCtx) Done() <-chan struct{} {
return c.done
}
func (c *timeoutCtx) Err() error {
if _, ok := <-c.done; ok {
return context.DeadlineExceeded
}
return nil
}
func (c *timeoutCtx) Value(key interface{}) interface{} {
if c.parent == nil {
return nil
}
return c.parent.Value(key)
}
|
package zseek
import (
"io"
"io/ioutil"
"os"
"testing"
)
func testSetup(t *testing.T) (z *ZSeek, cleanup func()) {
f, err := ioutil.TempFile(os.TempDir(), "zseek")
if err != nil {
t.Skip("temp file creation failed: ", err)
t.SkipNow()
}
z, err = New(f)
if err != nil {
t.Skip("New failed: ", err)
t.SkipNow()
}
cleanup = func() {
err := z.Close()
if err != nil {
t.Error("closing ZSeek: ", err)
}
name := f.Name()
err = f.Close()
if err != nil {
t.Error("closing file: ", err)
}
err = os.Remove(name)
if err != nil {
t.Error("removing file: ", err)
}
}
return
}
func TestEmpty(t *testing.T) {
z, cleanup := testSetup(t)
defer cleanup()
b := make([]byte, 10)
n, err := z.Read(b)
if n != 0 {
t.Error("expected 0 bytes, but got ", n, " bytes")
}
if err != io.EOF {
t.Error("expected io.EOF, but got ", err)
}
n, err = z.Read(b[:0])
if n != 0 {
t.Error("expected 0 bytes, but got ", n, " bytes")
}
if err != nil {
t.Error("expected nil, but got ", err)
}
o, err := z.Seek(0, SeekStart)
if o != 0 {
t.Error("expected offset 0, but got offset ", o)
}
if err != nil {
t.Error("expected nil, but got ", err)
}
o, err = z.Seek(0, SeekCur)
if o != 0 {
t.Error("expected offset 0, but got offset ", o)
}
if err != nil {
t.Error("expected nil, but got ", err)
}
o, err = z.Seek(0, SeekEnd)
if o != 0 {
t.Error("expected offset 0, but got offset ", o)
}
if err != nil {
t.Error("expected nil, but got ", err)
}
o, err = z.Seek(-1, SeekStart)
if o != 0 {
t.Error("expected offset 0, but got offset ", o)
}
if err != ErrInvalidSeek {
t.Error("expected ErrInvalidSeek, but got ", err)
}
o, err = z.Seek(-1, SeekCur)
if o != 0 {
t.Error("expected offset 0, but got offset ", o)
}
if err != ErrInvalidSeek {
t.Error("expected ErrInvalidSeek, but got ", err)
}
o, err = z.Seek(-1, SeekEnd)
if o != 0 {
t.Error("expected offset 0, but got offset ", o)
}
if err != ErrInvalidSeek {
t.Error("expected ErrInvalidSeek, but got ", err)
}
o, err = z.Seek(1, SeekStart)
if o != 0 {
t.Error("expected offset 0, but got offset ", o)
}
if err != ErrInvalidSeek {
t.Error("expected ErrInvalidSeek, but got ", err)
}
o, err = z.Seek(1, SeekCur)
if o != 0 {
t.Error("expected offset 0, but got offset ", o)
}
if err != ErrInvalidSeek {
t.Error("expected ErrInvalidSeek, but got ", err)
}
o, err = z.Seek(1, SeekEnd)
if o != 0 {
t.Error("expected offset 0, but got offset ", o)
}
if err != ErrInvalidSeek {
t.Error("expected ErrInvalidSeek, but got ", err)
}
}
func TestWrite(t *testing.T) {
z, cleanup := testSetup(t)
defer cleanup()
b := make([]byte, 1024*1024)
n, err := z.Write(b)
if n != 1024*1024 {
t.Error("expected ", 1024*1024, " bytes, but got ", n, " bytes")
}
if err != nil {
t.Error("expected nil, but got ", err)
}
o, err := z.Seek(0, SeekCur)
if o != 1024*1024 {
t.Error("expected offset ", 1024*1024, ", but got offset ", o)
}
if err != nil {
t.Error("expected nil, but got ", err)
}
o, err = z.Seek(-1, SeekCur)
if o != 1024*1024-1 {
t.Error("expected offset ", 1024*1024-1, ", but got offset ", o)
}
if err != nil {
t.Error("expected nil, but got ", err)
}
n, err = z.Read(b)
if n != 1 {
t.Error("expected ", 1, " bytes, but got ", n, " bytes")
}
if err != nil {
t.Error("expected nil, but got ", err)
}
n, err = z.Read(b)
if n != 0 {
t.Error("expected ", 1, " bytes, but got ", n, " bytes")
}
if err != io.EOF {
t.Error("expected io.EOF, but got ", err)
}
o, err = z.Seek(0, SeekEnd)
if o != 1024*1024 {
t.Error("expected offset ", 1024*1024, ", but got offset ", o)
}
if err != nil {
t.Error("expected nil, but got ", err)
}
err = z.Flush()
if err != nil {
t.Error("expected nil, but got ", err)
}
z2, err := New(z.f)
if err != nil {
t.Error("expected nil, but got ", err)
}
o, err = z2.Seek(0, SeekEnd)
if o != 1024*1024 {
t.Error("expected offset ", 1024*1024, ", but got offset ", o)
}
if err != nil {
t.Error("expected nil, but got ", err)
}
z2, err = New(z.f)
if err != nil {
t.Error("expected nil, but got ", err)
}
n, err = io.ReadFull(z2, b)
if n != 1024*1024 {
t.Error("expected ", 1024*1024, " bytes, but got ", n, " bytes")
}
if err != nil {
t.Error("expected nil, but got ", err)
}
n, err = io.ReadFull(z2, b)
if n != 0 {
t.Error("expected ", 0, " bytes, but got ", n, " bytes")
}
if err != io.EOF {
t.Error("expected io.EOF, but got ", err)
}
}
|
package dbops
import (
"database/sql"
"fmt"
_ "github.com/go-sql-driver/mysql"
"log"
"time"
"video_server/api/defs"
"video_server/api/utils"
)
// 添加用户
func AddUserCredential(loginName string, pwd string) error {
stmtIns, err := dbConn.Prepare("INSERT INTO users(login_name,pwd) VALUES(?,?)")
if err != nil {
return err
}
_, err = stmtIns.Exec(loginName, pwd)
stmtIns.Exec(loginName, pwd)
defer stmtIns.Close()
return nil
}
// 获取用户信息
func GetUserCredential(loginName string) (string, error) {
stmtOut, err := dbConn.Prepare("SELECT pwd FROM users where login_name = ? ")
if err != nil {
log.Printf("%s", err)
return "", err
}
var pwd string
err = stmtOut.QueryRow(loginName).Scan(&pwd)
if err != nil && err != sql.ErrNoRows {
return "", err
}
defer stmtOut.Close()
return pwd, nil
}
// 获取用户
func GetUser(loginName string)(*defs.User,error){
stmtOut, err := dbConn.Prepare("select id,pwd from users where login_name=?")
if err != nil{
log.Printf("%s",err)
return nil,err
}
var id int
var pwd string
err = stmtOut.QueryRow(loginName).Scan(&id,&pwd)
if err != nil && err != sql.ErrNoRows{
return nil,err
}
if err == sql.ErrNoRows{
return nil,nil
}
res := &defs.User{Id:id,LoginName:loginName,Pwd:pwd}
defer stmtOut.Close()
return res,nil
}
// 删除用户
func DeleteUser(loginName string, pwd string) error {
stmDel, err := dbConn.Prepare("DELETE FROM users where login_name=? and pwd=?")
if err != nil {
log.Printf("DeleteUser error:%s", err)
return err
}
_, err = stmDel.Exec(loginName, pwd)
if err != err {
return err
}
defer stmDel.Close()
return nil
}
// 添加视频
func AddNewVideo(aid int, name string) (*defs.VideoInfo, error) {
vid, err := utils.NewUUID()
if err != nil {
return nil, err
}
t := time.Now()
ctime := t.Format("Jan 02 2006, 15:04:05")
stmtIns, err := dbConn.Prepare(
`INSERT INTO video_info (id, author_id, name,display_ctime) VALUES (?,?,?,?)`)
if err != nil {
return nil, err
}
_, err = stmtIns.Exec(vid, aid, name, ctime)
if err != nil {
return nil, err
}
res := &defs.VideoInfo{Id: vid, AuthorId: aid, Name: name, DisplayCtime: ctime}
defer stmtIns.Close()
return res, nil
}
//// 获取视频
func GetVideoInfo(vid string) (*defs.VideoInfo, error) {
stmtOut, err := dbConn.Prepare("select author_id,name,display_ctime from video_info where id =?")
var aid int
var dct string
var name string
err = stmtOut.QueryRow(vid).Scan(&aid, &name, &dct)
if err != nil && err != sql.ErrNoRows {
return nil, err
}
if err == sql.ErrNoRows {
return nil, nil
}
defer stmtOut.Close()
res := &defs.VideoInfo{Id: vid, AuthorId: aid, Name: name, DisplayCtime: dct}
return res, nil
}
// 删除视频
func DeleteVideoInfo(vid string) error {
stmtDel, err := dbConn.Prepare("delete from video_info where id=? ")
if err != nil {
return err
}
_, err = stmtDel.Exec(vid)
if err != nil {
return err
}
defer stmtDel.Close()
return nil
}
// 添加评论
func AddNewComments(vid string, aid int, content string) error {
id, err := utils.NewUUID()
if err != nil {
return err
}
stmtIns, err := dbConn.Prepare("INSERT INTO comments(id,video_id,author_id,content) values (?,?,?,?)")
if err != nil {
return err
}
_, err = stmtIns.Exec(id, vid, aid, content)
if err != nil {
return err
}
defer stmtIns.Close()
return nil
}
// 查看全部评论
func ListComments(vid string, from, to int) ([]*defs.Comment, error) {
stmtOut, err := dbConn.Prepare(`SELECT comments.id,users.Login_name,comments.content FROM comments
inner join users on comments.author_id = users.id
where comments.video_id = ? and comments.time > FROM_UNIXTIME(?) and comments.time <= FROM_UNIXTIME(?)`)
var res []*defs.Comment
rows, err := stmtOut.Query(vid, from, to)
if err != nil {
return res, err
}
for rows.Next() {
var id, name, content string
if err := rows.Scan(&id, &name, &content); err != nil {
return res, err
}
c := &defs.Comment{Id: id, VideoId: vid, Author: name, Content: content}
res = append(res, c)
}
defer stmtOut.Close()
fmt.Println(res)
return res, nil
}
// 所有视频
func ListVideoInfo(uname string,from,to int )([]*defs.VideoInfo,error){
stmtOut, err := dbConn.Prepare(`select video_info.id,video_info.author_id,video_info.name,video_info.display_ctime from video_info
INNER join users ON video_info.author_id=users.id where users.login_name=? and video_info,create_time>from_unixtime(?) and
video_info.create_time<= from_unixtime(?) order by video_info.create_time DESC `)
var res []*defs.VideoInfo
if err != nil{
return res,err
}
rows,err := stmtOut.Query(uname,from,to)
if err != nil{
return res,err
}
for rows.Next(){
var id,name,ctime string
var aid int
if err := rows.Scan(&id,&aid,&name,&ctime);err != nil{
return res,err
}
vi := &defs.VideoInfo{Id:id,AuthorId:aid,Name:name,DisplayCtime:ctime}
res = append(res,vi)
}
defer stmtOut.Close()
return res,nil
}
//func DeleteVideoInfo(vid string) error{
// stmtDel, err := dbConn.Prepare("delete from video_info where id = ?")
// if err != nil{
// return err
// }
// _,err = stmtDel.Exec(vid)
// if err != nil{
// return err
// }
// defer stmtDel.Close()
// return nil
//}
//
//func AddNewComments(vid string,aid int,content string)error{
//
//}
|
package calldiv
import (
"syscall"
"github.com/b-2019-apt-test/divider/pkg/div"
)
var (
math = syscall.NewLazyDLL(mathDLLName)
divProc = math.NewProc("Div")
)
// Divider performs decimal division by calling external lib (math.dll).
var Divider = divider{}
type divider struct{}
// Div divides a by b with rounding down. The actual returned value is of type
// int32 regardles of the current platform arch.
func (d divider) Div(a, b int) (int, error) {
if b == 0 {
return 0, div.ErrDivZero
}
r, _, err := divProc.Call(uintptr(a), uintptr(b))
if uintptr(err.(syscall.Errno)) != 0 {
return 0, err
}
return int(int32(r)), nil
}
|
package main
import (
"context"
"fmt"
"os"
"path"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/improbable-eng/thanos/pkg/compact"
"github.com/improbable-eng/thanos/pkg/compact/downsample"
"github.com/improbable-eng/thanos/pkg/objstore/client"
"github.com/improbable-eng/thanos/pkg/runutil"
"github.com/oklog/run"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/tsdb"
"gopkg.in/alecthomas/kingpin.v2"
)
func registerCompact(m map[string]setupFunc, app *kingpin.Application, name string) {
cmd := app.Command(name, "continuously compacts blocks in an object store bucket")
haltOnError := cmd.Flag("debug.halt-on-error", "Halt the process if a critical compaction error is detected.").
Hidden().Default("true").Bool()
httpAddr := regHTTPAddrFlag(cmd)
dataDir := cmd.Flag("data-dir", "Data directory in which to cache blocks and process compactions.").
Default("./data").String()
objStoreConfig := regCommonObjStoreFlags(cmd, "")
syncDelay := modelDuration(cmd.Flag("sync-delay", "Minimum age of fresh (non-compacted) blocks before they are being processed.").
Default("30m"))
retentionRaw := modelDuration(cmd.Flag("retention.resolution-raw", "How long to retain raw samples in bucket. 0d - disables this retention").Default("0d"))
retention5m := modelDuration(cmd.Flag("retention.resolution-5m", "How long to retain samples of resolution 1 (5 minutes) in bucket. 0d - disables this retention").Default("0d"))
retention1h := modelDuration(cmd.Flag("retention.resolution-1h", "How long to retain samples of resolution 2 (1 hour) in bucket. 0d - disables this retention").Default("0d"))
wait := cmd.Flag("wait", "Do not exit after all compactions have been processed and wait for new work.").
Short('w').Bool()
// TODO(bplotka): Remove this flag once https://github.com/improbable-eng/thanos/issues/297 is fixed.
disableDownsampling := cmd.Flag("debug.disable-downsampling", "Disables downsampling. This is not recommended "+
"as querying long time ranges without non-downsampled data is not efficient and not useful (is not possible to render all for human eye).").
Hidden().Default("false").Bool()
m[name] = func(g *run.Group, logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, _ bool) error {
return runCompact(g, logger, reg,
*httpAddr,
*dataDir,
objStoreConfig,
time.Duration(*syncDelay),
*haltOnError,
*wait,
map[compact.ResolutionLevel]time.Duration{
compact.ResolutionLevelRaw: time.Duration(*retentionRaw),
compact.ResolutionLevel5m: time.Duration(*retention5m),
compact.ResolutionLevel1h: time.Duration(*retention1h),
},
name,
*disableDownsampling,
)
}
}
func runCompact(
g *run.Group,
logger log.Logger,
reg *prometheus.Registry,
httpBindAddr string,
dataDir string,
objStoreConfig *pathOrContent,
syncDelay time.Duration,
haltOnError bool,
wait bool,
retentionByResolution map[compact.ResolutionLevel]time.Duration,
component string,
disableDownsampling bool,
) error {
halted := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "thanos_compactor_halted",
Help: "Set to 1 if the compactor halted due to an unexpected error",
})
retried := prometheus.NewCounter(prometheus.CounterOpts{
Name: "thanos_compactor_retries_total",
Help: "Total number of retries after retriable compactor error",
})
halted.Set(0)
reg.MustRegister(halted)
bucketConfig, err := objStoreConfig.Content()
if err != nil {
return err
}
bkt, err := client.NewBucket(logger, bucketConfig, reg, component)
if err != nil {
return err
}
// Ensure we close up everything properly.
defer func() {
if err != nil {
runutil.CloseWithLogOnErr(logger, bkt, "bucket client")
}
}()
sy, err := compact.NewSyncer(logger, reg, bkt, syncDelay)
if err != nil {
return errors.Wrap(err, "create syncer")
}
// Instantiate the compactor with different time slices. Timestamps in TSDB
// are in milliseconds.
comp, err := tsdb.NewLeveledCompactor(reg, logger, []int64{
int64(1 * time.Hour / time.Millisecond),
int64(2 * time.Hour / time.Millisecond),
int64(8 * time.Hour / time.Millisecond),
int64(2 * 24 * time.Hour / time.Millisecond), // 2 days
int64(14 * 24 * time.Hour / time.Millisecond), // 2 weeks
}, downsample.NewPool())
if err != nil {
return errors.Wrap(err, "create compactor")
}
var (
compactDir = path.Join(dataDir, "compact")
downsamplingDir = path.Join(dataDir, "downsample")
)
if err := os.RemoveAll(dataDir); err != nil {
return errors.Wrap(err, "clean working temporary directory")
}
compactor := compact.NewBucketCompactor(logger, sy, comp, compactDir, bkt)
if retentionByResolution[compact.ResolutionLevelRaw].Seconds() != 0 {
level.Info(logger).Log("msg", "retention policy of raw samples is enabled", "duration", retentionByResolution[compact.ResolutionLevelRaw])
}
if retentionByResolution[compact.ResolutionLevel5m].Seconds() != 0 {
level.Info(logger).Log("msg", "retention policy of 5 min aggregated samples is enabled", "duration", retentionByResolution[compact.ResolutionLevel5m])
}
if retentionByResolution[compact.ResolutionLevel1h].Seconds() != 0 {
level.Info(logger).Log("msg", "retention policy of 1 hour aggregated samples is enabled", "duration", retentionByResolution[compact.ResolutionLevel1h])
}
ctx, cancel := context.WithCancel(context.Background())
f := func() error {
if err := compactor.Compact(ctx); err != nil {
return errors.Wrap(err, "compaction failed")
}
level.Info(logger).Log("msg", "compaction iterations done")
// TODO(bplotka): Remove "disableDownsampling" once https://github.com/improbable-eng/thanos/issues/297 is fixed.
if !disableDownsampling {
// After all compactions are done, work down the downsampling backlog.
// We run two passes of this to ensure that the 1h downsampling is generated
// for 5m downsamplings created in the first run.
level.Info(logger).Log("msg", "start first pass of downsampling")
if err := downsampleBucket(ctx, logger, bkt, downsamplingDir); err != nil {
return errors.Wrap(err, "first pass of downsampling failed")
}
level.Info(logger).Log("msg", "start second pass of downsampling")
if err := downsampleBucket(ctx, logger, bkt, downsamplingDir); err != nil {
return errors.Wrap(err, "second pass of downsampling failed")
}
level.Info(logger).Log("msg", "downsampling iterations done")
} else {
level.Warn(logger).Log("msg", "downsampling was explicitly disabled")
}
if err := compact.ApplyRetentionPolicyByResolution(ctx, logger, bkt, retentionByResolution); err != nil {
return errors.Wrap(err, fmt.Sprintf("retention failed"))
}
return nil
}
g.Add(func() error {
defer runutil.CloseWithLogOnErr(logger, bkt, "bucket client")
if !wait {
return f()
}
// --wait=true is specified.
return runutil.Repeat(5*time.Minute, ctx.Done(), func() error {
err := f()
if err == nil {
return nil
}
// The HaltError type signals that we hit a critical bug and should block
// for investigation.
// You should alert on this being halted.
if compact.IsHaltError(err) {
if haltOnError {
level.Error(logger).Log("msg", "critical error detected; halting", "err", err)
halted.Set(1)
select {}
} else {
return errors.Wrap(err, "critical error detected")
}
}
// The RetryError signals that we hit an retriable error (transient error, no connection).
// You should alert on this being triggered to frequently.
if compact.IsRetryError(err) {
level.Error(logger).Log("msg", "retriable error", "err", err)
retried.Inc()
// TODO(bplotka): use actual "retry()" here instead of waiting 5 minutes?
return nil
}
return errors.Wrap(err, "error executing compaction")
})
}, func(error) {
cancel()
})
if err := metricHTTPListenGroup(g, logger, reg, httpBindAddr); err != nil {
return err
}
level.Info(logger).Log("msg", "starting compact node")
return nil
}
|
/*
A thin wrapper for the lmdb C library. These are low-level bindings for the C
API. The C documentation should be used as a reference while developing
(http://symas.com/mdb/doc/group__mdb.html).
Errors
The errors returned by the package API will with few exceptions be of type
Errno or syscall.Errno. The only errors of type Errno returned are those
defined in lmdb.h. Other errno values like EINVAL will by of type
syscall.Errno.
*/
package mdb
|
package types
// An Item represents an effective record crawled by Spider.
type Item interface {
Content() string
}
|
// Package parser turns the source text into a sequence of blocks.
package parser
import (
"bytes"
"github.com/bouncepaw/mycomarkup/v2/blocks"
"github.com/bouncepaw/mycomarkup/v2/mycocontext"
"sync"
)
// Parse parses the Mycomarkup document in the given context. All parsed blocks are written to out.
func Parse(ctx mycocontext.Context, out chan blocks.Block) {
// Using a channel seems like a good idea. The downside is that using this function is harder. But does it matter in this case? Not really. Channel supremacy all the way down.
var (
token blocks.Block
done bool
)
defer close(out)
for !done {
select {
case <-ctx.Done():
return
default:
token, done = nextToken(ctx)
if token != nil {
out <- token
}
}
}
}
// parseSubdocumentForEachBlock replaces the buffer in the given context and parses the document contained in the buffer. The function is called on every block.
func parseSubdocumentForEachBlock(ctx mycocontext.Context, buf *bytes.Buffer, f func(block blocks.Block)) {
var (
wg sync.WaitGroup
blocksCh = make(chan blocks.Block)
)
wg.Add(1)
go func() {
Parse(mycocontext.WithBuffer(ctx, buf), blocksCh)
wg.Done()
}()
for block := range blocksCh {
f(block)
}
wg.Wait()
}
// MakeHeading parses the heading on the given line and returns it. Set its level by yourself though.
func MakeHeading(line, hyphaName string, level uint) blocks.Heading {
// TODO: figure out the level here.
h := blocks.Heading{
Level: level,
Contents: MakeFormatted(line[level+1:], hyphaName),
Src: line,
}
return h
}
|
package parametrs
// http://ocrsdk.com/documentation/apireference/processBarcodeField/
type ProcessBarcodeField struct {
IParamers
Region string `tag_name:"region"`
BarcodeType string `tag_name:"barcodeType"`
ContainsBinaryData string `tag_name:"containsBinaryData"`
Description string `tag_name:"description"`
PdfPassword string `tag_name:"pdfPassword"`
}
func (p *ProcessBarcodeField) ToMap() map[string]string {
return TypeToMap(p)
}
|
package drivestream
import "github.com/scjalliance/drivestream/resource"
// DriveMap is a map of drivestream drives.
type DriveMap interface {
// List returns a list of all drives within the map.
List() (ids []resource.ID, err error)
// Ref returns a drive reference.
Ref(driveID resource.ID) DriveReference
}
|
// Copyright © 2018 Inanc Gumus
// Learn Go Programming Course
// License: https://creativecommons.org/licenses/by-nc-sa/4.0/
//
// For more tutorials : https://learngoprogramming.com
// In-person training : https://www.linkedin.com/in/inancgumus/
// Follow me on twitter: https://twitter.com/inancgumus
package main
// ---------------------------------------------------------
// CHALLENGE #1
// Create a user/password protected program.
//
// EXAMPLE USER
// username: jack
// password: 1888
//
// EXPECTED OUTPUT
// go run main.go
// Usage: [username] [password]
//
// go run main.go albert
// Usage: [username] [password]
//
// go run main.go hacker 42
// Access denied for "hacker".
//
// go run main.go jack 6475
// Invalid password for "jack".
//
// go run main.go jack 1888
// Access granted to "jack".
// ---------------------------------------------------------
func main() {
}
|
// +build agrasta383
package agrasta
import "fmt"
// Number of rounds.
const Rounds = 6
// Block size.
const BlockSize = 383
const BlockWords = 6
// Total size (in 64bit words) of signle matrix triangle.
// The series must go on up until BlockWords
const LTSize = 64 + 64*2 + 64*3 + 64*4 + 64*5 + 64*6
func (r *Block) String() string {
return fmt.Sprintf("%063b%064b%064b%064b%064b%064b", r[5], r[4], r[3], r[2], r[1], r[0])
}
|
package services
// import (
// "errors"
// "github.com/messagedb/messagedb/meta/bindings"
// "github.com/messagedb/messagedb/meta/models"
// "github.com/messagedb/messagedb/meta/schema"
// log "github.com/Sirupsen/logrus"
// "gopkg.in/mgo.v2"
// )
// var (
// ErrTeamDuplicateKey = errors.New("Team duplicate key error")
// ErrTeamNameAlreadyExists = errors.New("Team name already exists")
// ErrTeamHasMembers = errors.New("Team has members")
// ErrForbiddenToDeleteOwnersTeam = errors.New("Forbiden to delete the organization Owners team")
// ErrForbiddenNotOrganizationOwner = errors.New("User is not part of the Organization Owners")
// ErrForbiddenNotOrganizationMember = errors.New("User is not part of the Organization Members")
// )
// var Team *teamService = &teamService{}
// type teamService struct{}
// func (s *teamService) CreateOwnersTeam(org *schema.Organization) (*schema.Team, error) {
// newTeam := bindings.CreateUpdateTeam{Name: "Owners", Description: ""}
// return s.CreateTeam(org, newTeam, schema.TeamTypeOwners)
// }
// func (s *teamService) CreateAdminsTeam(org *schema.Organization) (*schema.Team, error) {
// newTeam := bindings.CreateUpdateTeam{Name: "Admins", Description: ""}
// return s.CreateTeam(org, newTeam, schema.TeamTypeAdmins)
// }
// func (s *teamService) CreateTeam(org *schema.Organization, newTeam bindings.CreateUpdateTeam, teamType schema.TeamType) (*schema.Team, error) {
// var err error
// var existingTeam *schema.Team
// if existingTeam, err = s.getExistingTeam(org, newTeam.Name); err != nil {
// return nil, err
// }
// // make sure team name doesn't already exists
// if existingTeam != nil {
// log.Warnf("Trying creating team with same name: %s", newTeam.Name)
// return nil, ErrTeamNameAlreadyExists
// }
// team := models.Team.New()
// team.Name = newTeam.Name
// team.Description = newTeam.Description
// team.TeamType = teamType
// team.OrganizationId = org.Id
// err = team.Save()
// if err != nil {
// if mgo.IsDup(err) {
// log.Errorf("Duplicate key when saving team %v", team)
// return nil, ErrTeamDuplicateKey
// }
// log.Errorf("Error saving team %v", team)
// return nil, err
// }
// return team, nil
// }
// func (s *teamService) UpdateTeam(org *schema.Organization, team *schema.Team, newTeam bindings.CreateUpdateTeam) (*schema.Team, error) {
// var err error
// var existingTeam *schema.Team
// if existingTeam, err = s.getExistingTeam(org, newTeam.Name); err != nil {
// return nil, err
// }
// // make sure team name doesn't already exists and doesn't match the team we are trying to edit
// if existingTeam != nil && existingTeam.Id != team.Id {
// log.Warnf("Trying creating team with same name: %s", newTeam.Name)
// return nil, ErrTeamNameAlreadyExists
// }
// // copy fields from bindings payload into the target object
// team.Name = newTeam.Name
// team.Description = newTeam.Description
// err = team.Save()
// if err != nil {
// log.Errorf("Error saving Team %v", team)
// return nil, err
// }
// return team, nil
// }
// func (s *teamService) DeleteTeam(org *schema.Organization, team *schema.Team) error {
// // we should not be able to delete Owners Team
// if org.OwnersTeamId == team.Id {
// return ErrForbiddenToDeleteOwnersTeam
// }
// // we shouldn't delete a team if it has members
// // so we try to retrieve all members that belongs to the team first
// members, err := models.Member.FindByTeam(team)
// if err != nil {
// log.Errorf("Error retrieving members for Team %v", team)
// return err
// }
// // if we found any members, then we bail with an error
// if len(members) > 0 {
// return ErrTeamHasMembers
// }
// err = models.Team.RemoveID(team.Id)
// if err != nil {
// log.Errorf("Error deleting Team %v", team)
// return err
// }
// return nil
// }
// func (s *teamService) getOrganizationFromTeam(team *schema.Team) (*schema.Organization, error) {
// org := &schema.Organization{}
// err := models.Organization.FindId(team.OrganizationId).One(org)
// if err != nil {
// log.Errorf("Failed to retrieve organization (%s) from database: %v ", team.OrganizationId.Hex(), err)
// return nil, err
// }
// return org, nil
// }
// func (s *teamService) getExistingTeam(org *schema.Organization, name string) (*schema.Team, error) {
// existingTeam, err := models.Team.FindByOrganizationIdAndName(org.Id, name)
// if err != nil && err != storage.ErrNotFound {
// log.Errorf("Failed to find team by organization_id and name: %v", err)
// return nil, err
// }
// return existingTeam, nil
// }
// func (s *teamService) checkOrganizationOwnership(user *schema.User, org *schema.Organization) error {
// // check if the current user is part of the organization owners
// ok, err := models.Member.IsOrganizationOwner(org, user)
// if err != nil {
// log.Errorf("Error checking if user (%s) is an organization owner (%s): %v ", user.Id.Hex(), org.Id.Hex(), err)
// return err
// }
// if !ok {
// log.Warnf("Unauthorized user (%s) for organization (%s) ", user.Id.Hex(), org.Id.Hex())
// return ErrForbiddenNotOrganizationOwner
// }
// return nil
// }
// func (s *teamService) checkOrganizationMembership(user *schema.User, org *schema.Organization) error {
// // check if the current user is part of the organization owners
// ok, err := models.Member.IsOrganizationMember(org, user)
// if err != nil {
// log.Errorf("Error checking if user (%s) is an organization member (%s): %v ", user.Id.Hex(), org.Id.Hex(), err)
// return err
// }
// if !ok {
// log.Warnf("Unauthorized user (%s) for organization (%s) ", user.Id.Hex(), org.Id.Hex())
// return ErrForbiddenNotOrganizationMember
// }
// return nil
// }
|
package api
import (
"github.com/gocarina/gocsv"
"os"
)
type Route struct {
ID string `csv:"route_id"`
Name string `csv:"route_short_name"`
LongName string `csv:"route_long_name"`
}
type RouteReader struct {
routes map[string]Route
}
func NewRouteReader() RouteReader {
var routes = []Route{}
var r = RouteReader{}
routesFile, err := os.OpenFile("./data/routes.csv", os.O_RDWR|os.O_CREATE, os.ModePerm)
if err != nil {
panic(err)
}
defer routesFile.Close()
if err := gocsv.UnmarshalFile(routesFile, &routes); err != nil {
panic(err)
}
r.routes = make(map[string]Route, len(routes))
for _, route := range routes {
r.routes[route.ID] = route
}
return r
}
func (r RouteReader) GetRoute(id string) Route {
return r.routes[id]
}
func (r RouteReader) Routes() []Route {
routes := make([]Route, 0, len(r.routes))
for _, route := range r.routes {
routes = append(routes, route)
}
return routes
}
|
package window
import (
"time"
"github.com/idiocracy/mrv/streams"
)
// Window holds window state
type Window struct {
stream chan streams.Ticker
size time.Duration
state slidingWindow
}
// New creates a window aggregator.
// Takes a Ticker channel and a window size as input
func New(stream chan streams.Ticker, size time.Duration) Window {
state := make(slidingWindow)
retentionCheck := time.NewTicker(time.Millisecond * 500).C
go func() {
for {
select {
case <-retentionCheck:
state.assertRetention(size)
case tick := <-stream:
state.addTick(tick)
}
}
}()
return Window{
stream: stream,
size: size,
state: state,
}
}
// ListCurrencyPairs lists all seen CurrencyPairs
func (w Window) ListCurrencyPairs() (pairs []string) {
for currencyPair := range w.state {
pairs = append(pairs, currencyPair)
}
return
}
// GetCurrencyPair returns all tickers in current window for a CurrencyPair
func (w Window) GetCurrencyPair(cp string) (t []streams.Ticker) {
if tickerMap, ok := w.state[cp]; ok {
for _, ticker := range tickerMap {
t = append(t, ticker)
}
}
return
}
type slidingWindow map[string]map[time.Time]streams.Ticker
func (sw slidingWindow) addTick(tick streams.Ticker) {
if ticks, ok := sw[tick.CurrencyPair]; ok {
ticks[time.Now()] = tick
} else {
ticks := make(map[time.Time]streams.Ticker)
ticks[time.Now()] = tick
sw[tick.CurrencyPair] = ticks
}
}
func (sw slidingWindow) assertRetention(retention time.Duration) {
for _, ticks := range sw {
for added := range ticks {
if time.Since(added) > retention {
delete(ticks, added)
}
}
}
}
|
package apiserver
import (
"io"
"log"
"net/http"
"github.com/chlins/obs/pkg/register"
)
// Start api server
func Start(l string) {
http.HandleFunc("/objects/", handle)
register.Prepare()
log.Fatal(http.ListenAndServe(l, nil))
}
func handle(w http.ResponseWriter, r *http.Request) {
// select one data server
dataserver, err := register.RandomSelectDataServer()
if err != nil {
log.Printf("%s\n", err)
return
}
r.URL.Host = dataserver
client := new(http.Client)
req, err := http.NewRequest(r.Method, "http:"+r.URL.String(), r.Body)
if err == nil {
resp, err := client.Do(req)
if err == nil {
io.Copy(w, resp.Body)
defer resp.Body.Close()
return
}
}
w.WriteHeader(http.StatusInternalServerError)
}
|
package f3
import (
"bytes"
"net/http"
"time"
)
// AccountsEndpoint allows sending POST, GET, DELETE requests
const AccountsEndpoint = "/organisation/accounts"
// HealthEndpoint allows sending GET requests
const HealthEndpoint = "/health"
// HTTPParams is map used for url paramaters
type HTTPParams map[string]string
// Headers is set of headers used for quering F3 api
type Headers map[string]string
// NewRequest is a wrapper over http NewRequest function.
// Adds passed headers and query parameters to the request.
func NewRequest(method, url string, params HTTPParams, body []byte, h Headers) (*http.Request, error) {
req, err := http.NewRequest(method, url, bytes.NewBuffer(body))
if err != nil {
return nil, err
}
for k, v := range h {
req.Header.Add(k, v)
}
q := req.URL.Query()
for k, v := range params {
q.Add(k, v)
}
req.URL.RawQuery = q.Encode()
return req, nil
}
func timeToRFC1123(t time.Time) string {
return t.Format(time.RFC1123)
}
// DefaultHeaders is a set of headers required for every f3 request.
func DefaultHeaders() Headers {
return map[string]string{
"Date": timeToRFC1123(time.Now()),
"Accept": "application/vnd.api+json",
}
}
// ContentHeaders is a set of headers required for f3 requests that contain body.
func ContentHeaders() Headers {
h := make(map[string]string)
for k, v := range DefaultHeaders() {
h[k] = v
}
h["Content-Type"] = "application/vnd.api+json"
return h
}
|
package accounts
import (
"fmt"
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance"
"github.com/iotaledger/wasp/packages/coretypes"
"github.com/iotaledger/wasp/packages/coretypes/cbalances"
"github.com/iotaledger/wasp/packages/kv"
"github.com/iotaledger/wasp/packages/kv/codec"
"github.com/iotaledger/wasp/packages/kv/collections"
"github.com/iotaledger/wasp/packages/kv/dict"
"github.com/iotaledger/wasp/packages/util"
)
const (
varStateAccounts = "a"
varStateTotalAssets = "t"
)
func getAccountsMap(state kv.KVStore) *collections.Map {
return collections.NewMap(state, varStateAccounts)
}
func getAccountsMapR(state kv.KVStoreReader) *collections.ImmutableMap {
return collections.NewMapReadOnly(state, varStateAccounts)
}
func getAccount(state kv.KVStore, agentID coretypes.AgentID) *collections.Map {
return collections.NewMap(state, string(agentID[:]))
}
func getAccountR(state kv.KVStoreReader, agentID coretypes.AgentID) *collections.ImmutableMap {
return collections.NewMapReadOnly(state, string(agentID[:]))
}
func getTotalAssetsAccount(state kv.KVStore) *collections.Map {
return collections.NewMap(state, varStateTotalAssets)
}
func getTotalAssetsAccountR(state kv.KVStoreReader) *collections.ImmutableMap {
return collections.NewMapReadOnly(state, varStateTotalAssets)
}
// CreditToAccount brings new funds to the on chain ledger.
func CreditToAccount(state kv.KVStore, agentID coretypes.AgentID, transfer coretypes.ColoredBalances) {
creditToAccount(state, getAccount(state, agentID), transfer)
creditToAccount(state, getTotalAssetsAccount(state), transfer)
mustCheckLedger(state, "CreditToAccount")
}
// creditToAccount internal
func creditToAccount(state kv.KVStore, account *collections.Map, transfer coretypes.ColoredBalances) {
if transfer == nil || transfer.Len() == 0 {
return
}
defer touchAccount(state, account)
transfer.Iterate(func(col balance.Color, bal int64) bool {
var currentBalance int64
v := account.MustGetAt(col[:])
if v != nil {
currentBalance = int64(util.MustUint64From8Bytes(v))
}
account.MustSetAt(col[:], util.Uint64To8Bytes(uint64(currentBalance+bal)))
return true
})
}
// DebitFromAccount removes funds from the chain ledger.
func DebitFromAccount(state kv.KVStore, agentID coretypes.AgentID, transfer coretypes.ColoredBalances) bool {
if !debitFromAccount(state, getAccount(state, agentID), transfer) {
return false
}
if !debitFromAccount(state, getTotalAssetsAccount(state), transfer) {
panic("debitFromAccount: inconsistent accounts ledger state")
}
mustCheckLedger(state, "DebitFromAccount")
return true
}
// debitFromAccount internal
func debitFromAccount(state kv.KVStore, account *collections.Map, transfer coretypes.ColoredBalances) bool {
if transfer == nil || transfer.Len() == 0 {
return true
}
defer touchAccount(state, account)
current := getAccountBalances(account.Immutable())
ok := true
transfer.Iterate(func(col balance.Color, transferAmount int64) bool {
bal := current[col]
if bal < transferAmount {
ok = false
return false
}
current[col] = bal - transferAmount
return true
})
if !ok {
return false
}
for col, rem := range current {
if rem > 0 {
account.MustSetAt(col[:], util.Uint64To8Bytes(uint64(rem)))
} else {
account.MustDelAt(col[:])
}
}
return true
}
func MoveBetweenAccounts(state kv.KVStore, fromAgentID, toAgentID coretypes.AgentID, transfer coretypes.ColoredBalances) bool {
if fromAgentID == toAgentID {
// no need to move
return true
}
// total assets account doesn't change
if !debitFromAccount(state, getAccount(state, fromAgentID), transfer) {
return false
}
creditToAccount(state, getAccount(state, toAgentID), transfer)
return true
}
func touchAccount(state kv.KVStore, account *collections.Map) {
if account.Name() == varStateTotalAssets {
return
}
agentid := []byte(account.Name())
accounts := getAccountsMap(state)
if account.MustLen() == 0 {
accounts.MustDelAt(agentid)
} else {
accounts.MustSetAt(agentid, []byte{0xFF})
}
}
func GetBalance(state kv.KVStoreReader, agentID coretypes.AgentID, color balance.Color) int64 {
b := getAccountR(state, agentID).MustGetAt(color[:])
if b == nil {
return 0
}
ret, _ := util.Int64From8Bytes(b)
return ret
}
func getAccountsIntern(state kv.KVStoreReader) dict.Dict {
ret := dict.New()
getAccountsMapR(state).MustIterate(func(agentID []byte, val []byte) bool {
ret.Set(kv.Key(agentID), []byte{})
return true
})
return ret
}
func getAccountBalances(account *collections.ImmutableMap) map[balance.Color]int64 {
ret := make(map[balance.Color]int64)
err := account.IterateBalances(func(col balance.Color, bal int64) bool {
ret[col] = bal
return true
})
if err != nil {
panic(err)
}
return ret
}
// GetAccountBalances returns all colored balances belonging to the agentID on the state.
// Normally, the state is the partition of the 'accountsc'
func GetAccountBalances(state kv.KVStoreReader, agentID coretypes.AgentID) (map[balance.Color]int64, bool) {
account := getAccountR(state, agentID)
if account.MustLen() == 0 {
return nil, false
}
return getAccountBalances(account), true
}
func getTotalAssetsIntern(state kv.KVStoreReader) coretypes.ColoredBalances {
return cbalances.NewFromMap(getAccountBalances(getTotalAssetsAccountR(state)))
}
func calcTotalAssets(state kv.KVStoreReader) coretypes.ColoredBalances {
ret := make(map[balance.Color]int64)
getAccountsMapR(state).MustIterateKeys(func(key []byte) bool {
agentID, err := coretypes.NewAgentIDFromBytes([]byte(key))
if err != nil {
panic(err)
}
for col, b := range getAccountBalances(getAccountR(state, agentID)) {
ret[col] = ret[col] + b
}
return true
})
return cbalances.NewFromMap(ret)
}
func mustCheckLedger(state kv.KVStore, checkpoint string) {
a := getTotalAssetsIntern(state)
c := calcTotalAssets(state)
if !a.Equal(c) {
panic(fmt.Sprintf("inconsistent on-chain account ledger @ checkpoint '%s'", checkpoint))
}
}
func getAccountBalanceDict(ctx coretypes.SandboxView, account *collections.ImmutableMap, tag string) dict.Dict {
balances := getAccountBalances(account)
ctx.Log().Debugf("%s. balance = %s\n", tag, cbalances.NewFromMap(balances).String())
return EncodeBalances(balances)
}
func EncodeBalances(balances map[balance.Color]int64) dict.Dict {
ret := dict.New()
for col, bal := range balances {
ret.Set(kv.Key(col[:]), codec.EncodeInt64(bal))
}
return ret
}
func DecodeBalances(balances dict.Dict) (map[balance.Color]int64, error) {
ret := map[balance.Color]int64{}
for col, bal := range balances {
c, _, err := codec.DecodeColor([]byte(col))
if err != nil {
return nil, err
}
b, _, err := codec.DecodeInt64(bal)
if err != nil {
return nil, err
}
ret[c] = b
}
return ret, nil
}
|
package humanize
import (
"fmt"
"go/ast"
"go/parser"
"go/token"
"os"
"path/filepath"
"strings"
)
// Import is one imported path
type Import struct {
Package string
Canonical string
Path string
Docs Docs
Folder string
pkg *Package
}
func (i *Import) String() string {
if i.Canonical != i.Package {
return fmt.Sprintf(`%s "%s"`, i.Canonical, i.Path)
}
return fmt.Sprintf(`"%s"`, i.Path)
}
type importWalker struct {
pkgName string
resolved string
}
func (iw *importWalker) Visit(node ast.Node) ast.Visitor {
if node != nil {
switch t := node.(type) {
case *ast.File:
iw.pkgName = nameFromIdent(t.Name)
default:
}
}
return iw
}
// LoadPackage is the function to load import package
func (i Import) LoadPackage() (*Package, error) {
if i.Folder == "" {
return nil, fmt.Errorf("the package '%s' is not resolved", i.Path)
}
if i.pkg == nil {
p, err := parsePackageFullPath(i.Path, i.Folder)
if err != nil {
return nil, err
}
i.pkg = p
}
return i.pkg, nil
}
func peekPackageName(pkg string, base ...string) (string, string) {
_, name := filepath.Split(pkg)
folder, err := translateToFullPath(pkg, base...)
if err != nil {
return name, ""
}
iw := &importWalker{}
_ = filepath.Walk(
folder,
func(path string, f os.FileInfo, _ error) error {
data, err := getGoFileContent(path, folder, f)
if err != nil || data == "" {
return err
}
fset := token.NewFileSet()
fle, err := parser.ParseFile(fset, "", data, parser.PackageClauseOnly)
if err != nil {
return nil // try another file?
}
iw.resolved = folder
ast.Walk(iw, fle)
// no need to continue
return filepath.SkipDir
},
)
resolved := ""
if iw.pkgName != "" {
name = iw.pkgName
resolved = iw.resolved
}
// can not parse it, use the folder name
return name, resolved
}
// newImport extract a new import entry
func newImport(p *Package, f *File, i *ast.ImportSpec, c *ast.CommentGroup) *Import {
res := &Import{
Package: "",
Path: strings.Trim(i.Path.Value, `"`),
Docs: docsFromNodeDoc(c, i.Doc),
}
if i.Name != nil {
res.Canonical = i.Name.String()
}
res.Package, res.Folder = peekPackageName(res.Path, p.Path)
if res.Canonical == "" {
res.Canonical = res.Package
}
return res
}
|
package problem
import (
"fmt"
"time"
)
type number struct {
value int64
}
func (c *number) Add(x int64) {
c.value++
}
func syncWithMutex() {
counter := number{}
for i := 0; i < 100; i++ {
go func(no int) {
for i := 0; i < 10000; i++ {
counter.Add(1)
}
}(i)
}
time.Sleep(time.Second)
fmt.Printf("Not sync value %d must 1000000\n", counter.value)
}
|
package field_test
import (
"bytes"
"encoding/hex"
"io"
"reflect"
"testing"
"github.com/tombell/go-serato/serato/field"
)
func TestNewField69Field(t *testing.T) {
data, _ := hex.DecodeString("000000450000000400000000")
buf := bytes.NewBuffer(data)
hdr, err := field.NewHeader(buf)
if err != nil {
t.Fatalf("expected NewHeader err to be nil, got %v", err)
}
field, err := field.NewField69Field(hdr, buf)
if err != nil {
t.Fatalf("expected NewField69Field err to be nil, got %v", err)
}
if field == nil {
t.Fatal("expected field to not be nil")
}
}
func TestNewField69FieldUnexpectedEOF(t *testing.T) {
data, _ := hex.DecodeString("000000450000000400000")
buf := bytes.NewBuffer(data)
hdr, err := field.NewHeader(buf)
if err != nil {
t.Fatalf("expected NewHeader err to be nil, got %v", err)
}
_, err = field.NewField69Field(hdr, buf)
if err != io.ErrUnexpectedEOF {
t.Fatalf("expected NewField69Field err to be io.ErrUnexpectedEOF, got %v", err)
}
}
func TestNewField69FieldUnexpectedIdentifier(t *testing.T) {
data, _ := hex.DecodeString("000000440000000400000000")
buf := bytes.NewBuffer(data)
hdr, err := field.NewHeader(buf)
if err != nil {
t.Fatalf("expected NewHeader err to be nil, got %v", err)
}
_, err = field.NewField69Field(hdr, buf)
if err != field.ErrUnexpectedIdentifier {
t.Fatalf("expected NewField69Field err to be field.ErrUnexpectedIdentifier, got %v", err)
}
}
func TestField69Value(t *testing.T) {
data, _ := hex.DecodeString("000000450000000400000000")
buf := bytes.NewBuffer(data)
hdr, err := field.NewHeader(buf)
if err != nil {
t.Fatalf("expected NewHeader err to be nil, got %v", err)
}
field, err := field.NewField69Field(hdr, buf)
if err != nil {
t.Fatalf("expected NewField69Field err to be nil, got %v", err)
}
actual := field.Value()
expected := []byte{0, 0, 0, 0}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("expected value to be %v, got %v", expected, actual)
}
}
func TestField69String(t *testing.T) {
data, _ := hex.DecodeString("000000450000000400000000")
buf := bytes.NewBuffer(data)
hdr, err := field.NewHeader(buf)
if err != nil {
t.Fatalf("expected NewHeader err to be nil, got %v", err)
}
field, err := field.NewField69Field(hdr, buf)
if err != nil {
t.Fatalf("expected NewField69Field err to be nil, got %v", err)
}
actual := field.String()
expected := "[0 0 0 0]"
if actual != expected {
t.Fatalf("expected value to be %v, got %v", expected, actual)
}
}
|
// Copyright 2015-2018 trivago N.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package format
import (
"strconv"
"sync/atomic"
"gollum/core"
)
// Sequence formatter
//
// This formatter prefixes data with a sequence number managed by the
// formatter. All messages passing through an instance of the
// formatter will get a unique number. The number is not persisted,
// i.e. it restarts at 0 after each restart of gollum.
//
// Parameters
//
// - Separator: Defines the separator string placed between number and data.
// By default this parameter is set to ":".
//
// Examples
//
// This example will insert the sequence number into an existing JSON payload.
//
// exampleProducer:
// Type: producer.Console
// Streams: "*"
// Modulators:
// - format.Trim:
// LeftSeparator: "{"
// RightSeparator: "}"
// - format.Sequence
// Separator: ","
// - format.Envelope:
// Prefix: "{\"seq\":"
// Postfix: "}"
type Sequence struct {
core.SimpleFormatter `gollumdoc:"embed_type"`
separator []byte `config:"Separator" default:":"`
seq *int64
}
func init() {
core.TypeRegistry.Register(Sequence{})
}
// Configure initializes this formatter with values from a plugin config.
func (format *Sequence) Configure(conf core.PluginConfigReader) {
format.seq = new(int64)
}
// ApplyFormatter update message payload
func (format *Sequence) ApplyFormatter(msg *core.Message) error {
seq := atomic.AddInt64(format.seq, 1)
sequenceStr := strconv.FormatInt(seq, 10)
content := format.GetSourceDataAsBytes(msg)
dataSize := len(sequenceStr) + len(format.separator) + len(content)
payload := make([]byte, dataSize)
offset := copy(payload, []byte(sequenceStr))
offset += copy(payload[offset:], format.separator)
copy(payload[offset:], content)
format.SetTargetData(msg, payload)
return nil
}
|
// Copyright 2018 Andrew Bates
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package insteon
import (
"time"
)
// PacketRequest is used to request that a packetized (marshaled) insteon
// message be sent to the network. Once the upstream device (PLM usually)
// has attempted to send the packet, the Err field will be assigned and
// DoneCh will be written to and closed
type PacketRequest struct {
Payload []byte
Err error
DoneCh chan<- *PacketRequest
}
// MessageRequest is used to request a message be sent to a specific device.
// Once the connection has sent the message and either received an ack or
// encountered an error, the Ack and Err fields will be filled and DoneCh
// will be written to and closed
type MessageRequest struct {
Message *Message
timeout time.Time
Ack *Message
Err error
DoneCh chan<- *MessageRequest
}
// Network is the main means to communicate with
// devices on the Insteon network
type Network struct {
timeout time.Duration
DB ProductDatabase
connections []chan<- *Message
sendCh chan<- *PacketRequest
recvCh <-chan []byte
connectCh chan chan<- *Message
disconnectCh chan chan<- *Message
closeCh chan chan error
}
// New creates a new Insteon network instance for the send and receive channels. The timeout
// indicates how long the network (and subsuquent devices) should wait when expecting incoming
// messages/responses
func New(sendCh chan<- *PacketRequest, recvCh <-chan []byte, timeout time.Duration) *Network {
network := &Network{
timeout: timeout,
DB: NewProductDB(),
sendCh: sendCh,
recvCh: recvCh,
connectCh: make(chan chan<- *Message),
disconnectCh: make(chan chan<- *Message),
closeCh: make(chan chan error),
}
go network.process()
return network
}
func (network *Network) process() {
defer network.close()
for {
select {
case pkt, open := <-network.recvCh:
if !open {
return
}
network.receive(pkt)
case connection := <-network.connectCh:
network.connections = append(network.connections, connection)
case connection := <-network.disconnectCh:
network.disconnect(connection)
case ch := <-network.closeCh:
ch <- network.close()
return
}
}
}
func (network *Network) receive(buf []byte) {
msg := &Message{}
err := msg.UnmarshalBinary(buf)
if err == nil {
Log.Tracef("Received Insteon Message %v", msg)
if msg.Broadcast() {
// Set Button Pressed Controller/Responder
if msg.Command[1] == 0x01 || msg.Command[1] == 0x02 {
network.DB.UpdateFirmwareVersion(msg.Src, FirmwareVersion(msg.Dst[2]))
network.DB.UpdateDevCat(msg.Src, DevCat{msg.Dst[0], msg.Dst[1]})
}
} else if msg.Ack() && msg.Command[1] == 0x0d {
// Engine Version Request ACK
network.DB.UpdateEngineVersion(msg.Src, EngineVersion(msg.Command[2]))
}
for _, connection := range network.connections {
connection <- msg
}
}
Log.Errorf(err, "Failed unmarshalling message received from network: %v", err)
}
func (network *Network) disconnect(connection chan<- *Message) {
for i, conn := range network.connections {
if conn == connection {
close(conn)
network.connections = append(network.connections[0:i], network.connections[i+1:]...)
break
}
}
}
func (network *Network) sendMessage(msg *Message) error {
buf, err := msg.MarshalBinary()
if err == nil {
Log.Tracef("Sending %v to network", msg)
if info, found := network.DB.Find(msg.Dst); found {
if msg.Flags.Extended() && info.EngineVersion == VerI2Cs {
buf[len(buf)-1] = checksum(buf[7:22])
}
}
doneCh := make(chan *PacketRequest, 1)
request := &PacketRequest{buf, nil, doneCh}
network.sendCh <- request
<-doneCh
err = request.Err
}
return err
}
// EngineVersion will query the dst device to determine its Insteon engine
// version
func (network *Network) EngineVersion(dst Address) (engineVersion EngineVersion, err error) {
conn := network.connect(dst, 1, CmdGetEngineVersion)
defer func() { close(conn.sendCh) }()
doneCh := make(chan *MessageRequest, 1)
request := &MessageRequest{Message: &Message{Command: CmdGetEngineVersion, Flags: StandardDirectMessage}, DoneCh: doneCh}
conn.sendCh <- request
<-doneCh
if request.Err == nil {
engineVersion = EngineVersion(request.Ack.Command[2])
}
return engineVersion, request.Err
}
// IDRequest will send an ID Request message to the destination device and wait for
// either a "Set-button Pressed Controller" or "Set-button Pressed Responder" broadcast
// message. This message includes the device category and firmaware information which
// is then returned in the DeviceInfo object. It should be noted that the returned
// DeviceInfo object will not have the engine version field populated as this information
// is not included in the broadcast response.
func (network *Network) IDRequest(dst Address) (info DeviceInfo, err error) {
info = DeviceInfo{
Address: dst,
}
conn := network.connect(dst, 1, CmdSetButtonPressedResponder, CmdSetButtonPressedController)
defer func() { close(conn.sendCh) }()
doneCh := make(chan *MessageRequest, 1)
request := &MessageRequest{Message: &Message{Command: CmdIDRequest, Flags: StandardDirectMessage}, DoneCh: doneCh}
conn.sendCh <- request
<-doneCh
err = request.Err
if err == nil {
for {
select {
case msg := <-conn.recvCh:
if msg.Broadcast() {
info, _ = network.DB.Find(dst)
return
}
case <-time.After(network.timeout):
err = ErrReadTimeout
return
}
}
}
return
}
func (network *Network) connect(dst Address, version EngineVersion, match ...Command) *connection {
sendCh := make(chan *MessageRequest, 1)
recvCh := make(chan *Message, 1)
go func() {
for request := range sendCh {
request.Err = network.sendMessage(request.Message)
request.DoneCh <- request
}
network.disconnectCh <- recvCh
}()
connection := newConnection(sendCh, recvCh, dst, version, network.timeout, match...)
network.connectCh <- recvCh
return connection
}
// Dial will return a basic device object that can appropriately communicate
// with the physical device out on the insteon network. Dial will determine
// the engine version (1, 2, or 2CS) that the device is running and return
// either an I1Device, I2Device or I2CSDevice. For a fully initialized
// device (dimmer, switch, thermostat, etc) use Connect
func (network *Network) Dial(dst Address) (device Device, err error) {
var info DeviceInfo
var found bool
if info, found = network.DB.Find(dst); !found {
info.EngineVersion, err = network.EngineVersion(dst)
// ErrNotLinked here is only returned by i2cs devices
if err == ErrNotLinked {
network.DB.UpdateEngineVersion(dst, VerI2Cs)
info.EngineVersion = VerI2Cs
}
}
if err == nil || err == ErrNotLinked {
connection := network.connect(dst, info.EngineVersion)
switch info.EngineVersion {
case VerI1:
device = NewI1Device(dst, connection.sendCh, connection.recvCh, network.timeout)
case VerI2:
device = NewI2Device(dst, connection.sendCh, connection.recvCh, network.timeout)
case VerI2Cs:
device = NewI2CsDevice(dst, connection.sendCh, connection.recvCh, network.timeout)
default:
err = ErrVersion
}
}
return device, err
}
// Connect will Dial the destination device and then determine the device category
// in order to return a category specific device (dimmer, switch, etc). If, for
// some reason, the devcat cannot be determined, then the device returned
// by Dial is returned
func (network *Network) Connect(dst Address) (device Device, err error) {
var info DeviceInfo
var found bool
if info, found = network.DB.Find(dst); !found {
info.EngineVersion, err = network.EngineVersion(dst)
if err == nil {
info, err = network.IDRequest(dst)
}
}
if err == nil {
if constructor, found := Devices.Find(info.DevCat.Category()); found {
connection := network.connect(dst, info.EngineVersion)
device, err = constructor(info, dst, connection.sendCh, connection.recvCh, network.timeout)
} else {
device, err = network.Dial(dst)
}
}
return
}
func (network *Network) close() error {
for _, connection := range network.connections {
close(connection)
}
network.connections = nil
return nil
}
// Close will cleanup/close open connections and disconnect gracefully
func (network *Network) Close() error {
ch := make(chan error)
network.closeCh <- ch
close(network.closeCh)
err := <-ch
if err == nil {
close(network.sendCh)
}
return err
}
|
/*
* Copyright 2018- The Pixie Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package scriptrunner
import (
"context"
"px.dev/pixie/src/shared/cvmsgspb"
)
// A Source provides an initial set of cron scripts and sends incremental updates to that set.
type Source interface {
// Start sends updates on updatesCh and returns the initial set of scripts which the updates are based on. It does not block.
Start(baseCtx context.Context, updatesCh chan<- *cvmsgspb.CronScriptUpdate) (map[string]*cvmsgspb.CronScript, error)
// Stop sending updates on the updatesCh provided in Start.
// This method must not be called before Start.
Stop()
}
|
package main
import (
"fmt"
"go/wordfilter/trie"
"net/http"
"stayreal/goini"
"github.com/gorilla/mux"
"github.com/urfave/negroni"
log "common/log4go"
)
type WordFilterServer struct {
trie *trie.Trie
listen string
replace string
}
func NewWordFilterServer() *WordFilterServer {
iniFile := goini.Init("config.ini")
local := iniFile.ReadString("COMMON", "local", "word.txt")
remote := iniFile.ReadString("COMMON", "remote", "http://127.0.0.1:8080/word.txt")
timeout := iniFile.ReadInt("COMMON", "timeout", 5)
server := &WordFilterServer{
trie: trie.NewTrie(local),
listen: iniFile.ReadString("COMMON", "listen", "0.0.0.0:8080"),
replace: iniFile.ReadString("COMMON", "replace", "***"),
}
log.Info("NewWordFilterServer listen=%s local=%s remote=%s replace=%s timeout=%d",
server.listen, local, remote, server.replace, timeout)
var err error = nil
if len(remote) > 0 {
log.Info("NewWordFilterServer server.trie.LoadRemote remote=%s", remote)
err = server.trie.LoadRemote(remote, timeout)
if err != nil {
log.Error("NewWordFilterServer server.trie.LoadRemote : %s", err.Error())
}
} else {
log.Debug("NewWordFilterServer remote is empty")
}
if 0 == len(remote) || err != nil {
log.Info("NewWordFilterServer server.trie.LoadLocal local=%s", local)
err = server.trie.LoadLocal()
if err != nil {
log.Error("NewWordFilterServer server.trie.LoadLocal : %s", err.Error())
}
}
return server
}
func (server *WordFilterServer) FindAndReplace(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
query := r.URL.Query()
texts := make([]string, 0)
exist := false
if texts, exist = query["text"]; !exist || len(texts) <= 0 {
log.Warn("WordFilterServer.FindAndReplace text empty!")
w.Write([]byte("text is need!"))
return
}
text := texts[0]
replaced := server.trie.FindAndReplace(text, server.replace)
log.Debug("WordFilterServer.FindAndReplace text=%s replaced=%s", text, replaced)
w.Write([]byte(replaced))
}
func (server *WordFilterServer) AddWord(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
query := r.URL.Query()
words := make([]string, 0)
exist := false
if words, exist = query["word"]; !exist || len(words) <= 0 {
log.Warn("WordFilterServer.AddWord word empty!")
w.Write([]byte("word is need!"))
return
}
word := words[0]
log.Debug("WordFilterServer.AddWord word=%s", word)
err := server.trie.AddWord(word, true)
if err != nil {
log.Error("WordFilterServer.AddWord err=%s", err.Error())
w.Write([]byte(err.Error()))
return
}
w.Write([]byte("OK"))
}
func (server *WordFilterServer) DeleteWord(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
query := r.URL.Query()
words := make([]string, 0)
exist := false
if words, exist = query["word"]; !exist || len(words) <= 0 {
log.Warn("WordFilterServer.DeleteWord word empty!")
w.Write([]byte("word is need!"))
return
}
word := words[0]
log.Debug("WordFilterServer.DeleteWord word=%s", word)
err := server.trie.DeleteWord(word)
if err != nil {
log.Error("WordFilterServer.DeleteWord err=%s", err.Error())
w.Write([]byte(err.Error()))
return
}
w.Write([]byte("OK"))
}
func (server *WordFilterServer) Info(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
resp := ""
resp += fmt.Sprintf("words total:%d\n", server.trie.WordCount())
w.Write([]byte(resp))
}
func (server *WordFilterServer) Start() {
m := mux.NewRouter()
m.Handle("/wordfilter/find_and_replace", http.HandlerFunc(server.FindAndReplace))
m.Handle("/wordfilter/add_word", http.HandlerFunc(server.AddWord))
m.Handle("/wordfilter/delete_word", http.HandlerFunc(server.DeleteWord))
m.Handle("/wordfilter/info", http.HandlerFunc(server.Info))
m.PathPrefix("/").Handler(http.FileServer(http.Dir("/data")))
n := negroni.Classic()
n.UseHandler(m)
log.Debug("WordFilterServer.Start listen=%s", server.listen)
n.Run(server.listen)
}
|
package filecenter
import (
"log"
"moriaty.com/cia/cia-supporter/bean"
"moriaty.com/cia/cia-supporter/dao"
)
/**
* @author 16计算机 Moriaty
* @version 1.0
* @copyright :Moriaty 版权所有 © 2020
* @date 2020/4/7 14:25
* @Description TODO
* 文件中心服务 dao
*/
// 存入文件
func InsertFile(file *bean.File) (int64, error) {
sqlStr := "INSERT INTO file(file_name, file_location, secret, create_time) VALUES (?, ?, ?, ?)"
ret, err := dao.DB.Exec(sqlStr, file.FileName, file.FileLocation, file.Secret, file.CreateTime)
if err != nil {
log.Printf("insert file failed, err: %v", err)
return 0, err
}
id, err := ret.LastInsertId()
if err != nil {
log.Printf("get last insert Id failed, err: %v", err)
return 0, err
}
return id, nil
}
// 根据 id 获取文件
func FindFileById(id int64) (*bean.File, error) {
sqlStr := "SELECT id, file_name, file_location, secret, create_time FROM file WHERE id = ?"
var file bean.File
err := dao.DB.Get(&file, sqlStr, id)
if err != nil {
log.Printf("get file by id failed, err: %v", err)
return nil, err
}
return &file, nil
}
// 根据任务 id 和权限 id 获取文件
func FindFileByTaskAndSecret(task int64, secret string) ([]*bean.File, error) {
sqlStr := "SELECT id, file_name, file_location, secret, create_time FROM file WHERE task = ? AND secret = ?"
var files []*bean.File
err := dao.DB.Select(&files, sqlStr, task, secret)
if err != nil {
log.Printf("get file by task and secret failed, err: %v", err)
return nil, err
}
return files, nil
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package task
import (
"encoding/json"
"fmt"
"net"
"net/http"
"net/http/httptest"
"testing"
"github.com/kubevela/workflow/pkg/cue/model/value"
"cuelang.org/go/cue/cuecontext"
cueJson "cuelang.org/go/pkg/encoding/json"
"github.com/stretchr/testify/assert"
"github.com/oam-dev/kubevela/pkg/cue/process"
)
const TaskTemplate = `
parameter: {
serviceURL: string
}
processing: {
output: {
token ?: string
}
http: {
method: *"GET" | string
url: parameter.serviceURL
request: {
body ?: bytes
header: {}
trailer: {}
}
}
}
patch: {
data: token: processing.output.token
}
output: {
data: processing.output.token
}
`
func TestProcess(t *testing.T) {
s := NewMock()
defer s.Close()
taskTemplate := cuecontext.New().CompileString(TaskTemplate)
taskTemplate = taskTemplate.FillPath(value.FieldPath(process.ParameterFieldName), map[string]interface{}{
"serviceURL": "http://127.0.0.1:8090/api/v1/token?val=test-token",
})
inst, err := Process(taskTemplate)
if err != nil {
t.Fatal(err)
}
output := inst.LookupPath(value.FieldPath("output"))
data, _ := cueJson.Marshal(output)
assert.Equal(t, "{\"data\":\"test-token\"}", data)
}
func NewMock() *httptest.Server {
ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
fmt.Printf("Expected 'GET' request, got '%s'", r.Method)
}
if r.URL.EscapedPath() != "/api/v1/token" {
fmt.Printf("Expected request to '/person', got '%s'", r.URL.EscapedPath())
}
_ = r.ParseForm()
token := r.Form.Get("val")
tokenBytes, _ := json.Marshal(map[string]interface{}{"token": token})
w.WriteHeader(http.StatusOK)
w.Write(tokenBytes)
}))
l, _ := net.Listen("tcp", "127.0.0.1:8090")
_ = ts.Listener.Close()
ts.Listener = l
ts.Start()
return ts
}
|
package filemonitor
import (
"crypto/tls"
"crypto/x509"
"sync"
"github.com/fsnotify/fsnotify"
"github.com/sirupsen/logrus"
)
type certStore struct {
mutex sync.RWMutex
cert *tls.Certificate
tlsCrtPath string
tlsKeyPath string
}
// NewCertStore returns a store for storing the certificate data and the ability to retrieve it safely
func NewCertStore(tlsCrt, tlsKey string) (*certStore, error) {
cert, err := tls.LoadX509KeyPair(tlsCrt, tlsKey)
if err != nil {
return nil, err
}
return &certStore{
mutex: sync.RWMutex{},
cert: &cert,
tlsCrtPath: tlsCrt,
tlsKeyPath: tlsKey,
}, nil
}
// HandleFilesystemUpdate is intended to be used as the OnUpdateFn for a watcher
// and expects the certificate files to be in the same directory.
func (k *certStore) HandleFilesystemUpdate(logger logrus.FieldLogger, event fsnotify.Event) {
switch op := event.Op; op {
case fsnotify.Create:
logger.Debugf("got fs event for %v", event.Name)
if err := k.storeCertificate(k.tlsCrtPath, k.tlsKeyPath); err != nil {
// this can happen if both certificates aren't updated at the same
// time, but it's okay as replacement only occurs with a valid key pair
logger.Debugf("certificates not in sync: %v", err)
} else {
info, err := x509.ParseCertificate(k.cert.Certificate[0])
if err != nil {
logger.Debugf("certificates refreshed, but parsing returned error: %v", err)
} else {
logger.Debugf("certificates refreshed: Subject=%v NotBefore=%v NotAfter=%v", info.Subject, info.NotBefore, info.NotAfter)
}
}
}
}
func (k *certStore) storeCertificate(tlsCrt, tlsKey string) error {
cert, err := tls.LoadX509KeyPair(tlsCrt, tlsKey)
if err == nil {
k.mutex.Lock()
defer k.mutex.Unlock()
k.cert = &cert
}
return err
}
func (k *certStore) GetCertificate() *tls.Certificate {
k.mutex.RLock()
defer k.mutex.RUnlock()
return k.cert
}
|
package models
import (
// "encoding/json"
"errors"
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/go-xorm/xorm"
"time"
)
type MyDate time.Time
func (self MyDate) MarshalJSON() ([]byte, error) {
t := time.Time(self)
if y := t.Year(); y < 0 || y >= 10000 {
if y < 2000 {
return []byte(`"2000-01-01 00:00:00"`), nil
}
return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]")
}
return []byte(t.Format(`"2006-01-02 15:04:05"`)), nil
}
type VersionTag struct {
Versionid string `json:"versionid"`
Agent string `json:"agent"`
Interact string `json:"interact"`
Interactbusiness string `json:"interactbusiness"`
Jycenter string `json:"jycenter"`
Jyresource string `json:"jyresource"`
Middlecas string `json:"middlecas"`
Middlecenter string `json:"middlecenter"`
Middlecenterfile string `json:"middlecenterfile"`
Middlecenterres string `json:"middlecenterres"`
Middleclient string `json:"middleclient"`
Middledriver string `json:"middledriver"`
Middleresource string `json:"middleresource"`
Middlewaremcu string `json:"middleware-mcu"`
Middletherepair string `json:"middletherepair"`
Mysql string `json:"mysql"`
Nginx string `json:"nginx"`
Openfire string `json:"openfire"`
Redis string `json:"redis"`
Middledatabase string `json:"middledatabase"`
Filesrv string `json:"filesrv"`
Ftp string `json:"ftp"`
Mbs string `json:"mbs"`
Create_time MyDate `json:"ctime" xorm:"created"`
Update_time MyDate `json:"ctime" xorm:"updated"`
// Versionid string `xorm:"versionid varchar(36)"`
// Agent string `xorm:"'agent' varchar(100)"`
// Interact string `xorm:"'interact' varchar(100)"`
// Interactbusiness string `xorm:"'interactbusiness' varchar(100)"`
// Jycenter string `xorm:"'jycenter' varchar(100)"`
// Jyresource string `xorm:"'jyresource' varchar(100)"`
// Middlecas string `xorm:"'middlecas' varchar(100)"`
// Middlecenter string `xorm:"middlecenter varchar(36)"`
// Middlecenterfile string `xorm:"middlecenterfile varchar(36)"`
// Middlecenterres string `xorm:"middlecenterres varchar(36)"`
// Middleclient string `xorm:"middleclient varchar(36)"`
// Middledriver string `xorm:"middledriver varchar(36)"`
// Middleresource string `xorm:"middleresource varchar(36)"`
// Middlewaremcu string `xorm:"middlewaremcu varchar(36)"`
// Middletherepair string `xorm:"middletherepair varchar(36)"`
// Mysql string `xorm:"mysql varchar(36)"`
// Nginx string `xorm:"nginx varchar(36)"`
// Openfire string `xorm:"openfire varchar(36)"`
// Redis string `xorm:"redis varchar(36)"`
// Middledatabase string `xorm:"middledatabase varchar(36)"`
// Filesrv string `xorm:"filesrv varchar(36)"`
// Ftp string `xorm:"ftp varchar(36)"`
// Mbs string `xorm:"mbs varchar(36)"`
// Create_time time.Time `xorm:"create_time"`
// Update_time time.Time `xorm:"update_time"`
}
func (this *VersionTag) Add() (int64, error) {
engine, err := xorm.NewEngine("mysql", "root:123456@tcp(localhost:3306)/casedb?charset=utf8")
num, err := engine.Insert(&(*this))
return num, err
}
func GetJson() []map[string]string {
engine, err := xorm.NewEngine("mysql", "root:123456@tcp(localhost:3306)/casedb?charset=utf8")
// err := engine.Sync2(new(User))
CheckErr(err)
// ertr := engine.Sync2(new(User))
// CheckErr(err)
// engine.CreateTables(&case_info_t{})
res, err := engine.QueryString("select * from case_info;")
CheckErr(err)
// fmt.Println(res)
// jsonData, err := json.Marshal(res)
// CheckErr(err)
// fmt.Println(string(jsonData))
// return string(jsonData)
return res
}
func (this *VersionTag) GetAll() []VersionTag {
engine, err := xorm.NewEngine("mysql", "root:123456@tcp(localhost:3306)/casedb?charset=utf8")
engine.DatabaseTZ = time.UTC
engine.TZLocation = time.UTC
CheckErr(err)
all := make([]VersionTag, 0)
err = engine.Find(&all)
CheckErr(err)
if err != nil {
return nil
} else {
return all
}
}
func (this *VersionTag) DeleteOne(versionid string) bool {
engine, err := xorm.NewEngine("mysql", "root:123456@tcp(localhost:3306)/casedb?charset=utf8")
num, err := engine.Where("versionid=?", versionid).Delete(&(*this))
if err != nil {
return false
} else {
fmt.Println("delete num:", num)
return true
}
}
func GetVersionIds() []VersionTag {
engine, err := xorm.NewEngine("mysql", "root:123456@tcp(localhost:3306)/casedb?charset=utf8")
this := make([]VersionTag, 0)
err = engine.Cols("versionid").Find(&this)
if err != nil {
fmt.Println(err)
}
return this
}
func CheckErr(err error) {
if err != nil {
fmt.Println(err)
}
}
|
package main
import (
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
)
const (
namespace = "default"
taintNodeNotReadyName = "notReady"
)
func setupMockKubernetes(t *testing.T, node *v1.Node, config *v1.ConfigMap) kubernetes.Interface {
client := fake.NewSimpleClientset()
if node != nil {
_, err := client.CoreV1().Nodes().Create(node)
if err != nil {
t.Error(err)
}
}
if config != nil {
_, err := client.CoreV1().ConfigMaps(namespace).Create(config)
if err != nil {
t.Error(err)
}
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "foo",
Labels: map[string]string{"foo": "bar"},
},
}
_, err := client.CoreV1().Pods(pod.Namespace).Create(pod)
if err != nil {
t.Error(err)
}
return client
}
func TestRunOnce(t *testing.T) {
for _, tc := range []struct {
msg string
node *v1.Node
config *v1.ConfigMap
success bool
}{
{
msg: "runOnce should succeed.",
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{
Key: taintNodeNotReadyName,
},
{
Key: "foo",
},
},
},
},
config: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "config",
Namespace: namespace,
},
Data: map[string]string{ConfigMapSelectorsKey: `selectors:
- namespace: kube-system
labels:
foo: bar`},
},
success: true,
},
} {
t.Run(tc.msg, func(t *testing.T) {
controller := &NodeController{
Interface: setupMockKubernetes(t, tc.node, tc.config),
configMap: "",
namespace: namespace,
}
if tc.config != nil {
controller.configMap = tc.config.Name
}
err := controller.runOnce()
if err != nil && tc.success {
t.Errorf("should not fail: %s", err)
}
})
}
}
func TestRun(t *testing.T) {
stopCh := make(chan struct{}, 1)
node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{
Key: taintNodeNotReadyName,
},
{
Key: "foo",
},
},
},
}
config := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "config",
Namespace: namespace,
},
Data: map[string]string{ConfigMapSelectorsKey: `selectors:
- namespace: kube-system
labels:
foo: bar`},
}
controller := &NodeController{
Interface: setupMockKubernetes(t, node, config),
configMap: config.Name,
namespace: namespace,
}
go controller.Run(stopCh)
stopCh <- struct{}{}
}
func TestNodeReady(t *testing.T) {
for _, tc := range []struct {
msg string
selectors []*PodSelector
ready bool
}{
{
msg: "node should be ready when pod is found",
selectors: []*PodSelector{
{
Namespace: "default",
Labels: map[string]string{"foo": "bar"},
},
},
ready: true,
},
{
msg: "node should not be ready when pod is not found",
selectors: []*PodSelector{
{
Namespace: "default",
Labels: map[string]string{"foo": "baz"},
},
},
ready: false,
},
} {
t.Run(tc.msg, func(t *testing.T) {
controller := &NodeController{
Interface: setupMockKubernetes(t, nil, nil),
selectors: tc.selectors,
}
ready, _ := controller.nodeReady(&v1.Node{})
if ready != tc.ready {
t.Errorf("expected ready %t, got %t", tc.ready, ready)
}
})
}
}
func TestSetNodeReady(t *testing.T) {
for _, tc := range []struct {
msg string
node *v1.Node
ready bool
}{
{
msg: "taint should be removed when node is ready",
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{
Key: taintNodeNotReadyName,
},
{
Key: "foo",
},
},
},
},
ready: true,
},
{
msg: "taint should be added when node is not ready",
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{
Key: "foo",
},
},
},
},
ready: false,
},
} {
t.Run(tc.msg, func(t *testing.T) {
controller := &NodeController{
Interface: setupMockKubernetes(t, tc.node, nil),
taintNodeNotReadyName: taintNodeNotReadyName,
}
_ = controller.setNodeReady(tc.node, tc.ready)
n, err := controller.CoreV1().Nodes().Get(tc.node.Name, metav1.GetOptions{})
if err != nil {
t.Errorf("should not fail: %s", err)
}
if tc.ready && hasTaint(n, taintNodeNotReadyName) {
t.Errorf("node should not have taint when ready")
}
if !tc.ready && !hasTaint(n, taintNodeNotReadyName) {
t.Errorf("node should have taint when not ready")
}
})
}
}
func TestGetConfig(t *testing.T) {
for _, tc := range []struct {
msg string
config *v1.ConfigMap
success bool
}{
{
msg: "valid config map should overwrite selectors",
config: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "config",
Namespace: namespace,
},
Data: map[string]string{ConfigMapSelectorsKey: `selectors:
- namespace: kube-system
labels:
foo: bar`},
},
success: true,
},
{
msg: "config map with invalid key should fail",
config: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "config",
Namespace: namespace,
},
Data: map[string]string{"invalid": `selectors:
- namespace: kube-system
labels:
foo: bar`},
},
success: false,
},
{
msg: "config map with invalid content should fail",
config: &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "config",
Namespace: namespace,
},
Data: map[string]string{ConfigMapSelectorsKey: `selectors`},
},
success: false,
},
{
msg: "no configMap exists should fail",
config: nil,
success: false,
},
} {
t.Run(tc.msg, func(t *testing.T) {
controller := &NodeController{
Interface: setupMockKubernetes(t, nil, tc.config),
configMap: "config",
namespace: namespace,
}
err := controller.getConfig()
if err != nil && tc.success {
t.Errorf("should not fail: %s", err)
}
if err == nil && !tc.success {
t.Error("expected failure")
}
// n, err := controller.CoreV1().Nodes().Get(tc.node.Name, metav1.GetOptions{})
// if err != nil {
// t.Errorf("should not fail: %s", err)
// }
// if tc.ready && hasTaint(n) {
// t.Errorf("node should not have taint when ready")
// }
// if !tc.ready && !hasTaint(n) {
// t.Errorf("node should have taint when not ready")
// }
})
}
}
func TestContainLabels(t *testing.T) {
labels := map[string]string{
"foo": "bar",
}
expected := map[string]string{
"foo": "bar",
}
if !containLabels(labels, expected) {
t.Errorf("expected %s to be contained in %s", expected, labels)
}
notExpected := map[string]string{
"foo": "baz",
}
if containLabels(labels, notExpected) {
t.Errorf("did not expect %s to be contained in %s", notExpected, labels)
}
}
func TestPodReady(t *testing.T) {
pod := &v1.Pod{
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
{
Ready: true,
},
},
},
}
if !podReady(pod) {
t.Error("expected pod to be ready")
}
pod.Status.ContainerStatuses[0].Ready = false
if podReady(pod) {
t.Error("expected pod to not be ready")
}
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"regexp"
"strings"
)
func readdata(fname string) (lines []string) {
f, err := os.Open(fname)
if err != nil {
log.Fatalf("Error opening dataset '%s': %s", fname, err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
lines = append(lines, strings.ToLower(strings.Trim(scanner.Text(), " ")))
}
if err := scanner.Err(); err != nil {
fmt.Fprintln(os.Stderr, "reading standard input:", err)
}
return lines
}
var splitall = regexp.MustCompile(`^([[:alpha:]]+)(\[(.+)\])? = (.+)$`)
func parsedata(lines []string) (program []Command) {
parsers := make(map[string]func([]string) (Command, error))
parsers[MEM] = parseMem
parsers[MASK] = parseMask
for _, l := range lines {
matches := splitall.FindStringSubmatch(l)
if len(matches) != 5 {
log.Printf("Warning: Command not in standard format: '%s'", l)
} else {
if f, ok := parsers[matches[1]]; ok {
if c, err := f(matches[2:]); err == nil {
program = append(program, c)
} else {
log.Printf("Warning: parseerror '%s' in line '%s'", err, l)
}
} else {
log.Printf("Warning: unhandled command '%s'", l)
}
}
}
return program
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
// Creates or updates a component template
package putcomponenttemplate
import (
gobytes "bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"github.com/elastic/elastic-transport-go/v8/elastictransport"
"github.com/elastic/go-elasticsearch/v8/typedapi/types"
)
const (
nameMask = iota + 1
)
// ErrBuildPath is returned in case of missing parameters within the build of the request.
var ErrBuildPath = errors.New("cannot build path, check for missing path parameters")
type PutComponentTemplate struct {
transport elastictransport.Interface
headers http.Header
values url.Values
path url.URL
buf *gobytes.Buffer
req *Request
deferred []func(request *Request) error
raw io.Reader
paramSet int
name string
}
// NewPutComponentTemplate type alias for index.
type NewPutComponentTemplate func(name string) *PutComponentTemplate
// NewPutComponentTemplateFunc returns a new instance of PutComponentTemplate with the provided transport.
// Used in the index of the library this allows to retrieve every apis in once place.
func NewPutComponentTemplateFunc(tp elastictransport.Interface) NewPutComponentTemplate {
return func(name string) *PutComponentTemplate {
n := New(tp)
n.Name(name)
return n
}
}
// Creates or updates a component template
//
// https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-component-template.html
func New(tp elastictransport.Interface) *PutComponentTemplate {
r := &PutComponentTemplate{
transport: tp,
values: make(url.Values),
headers: make(http.Header),
buf: gobytes.NewBuffer(nil),
req: NewRequest(),
}
return r
}
// Raw takes a json payload as input which is then passed to the http.Request
// If specified Raw takes precedence on Request method.
func (r *PutComponentTemplate) Raw(raw io.Reader) *PutComponentTemplate {
r.raw = raw
return r
}
// Request allows to set the request property with the appropriate payload.
func (r *PutComponentTemplate) Request(req *Request) *PutComponentTemplate {
r.req = req
return r
}
// HttpRequest returns the http.Request object built from the
// given parameters.
func (r *PutComponentTemplate) HttpRequest(ctx context.Context) (*http.Request, error) {
var path strings.Builder
var method string
var req *http.Request
var err error
if len(r.deferred) > 0 {
for _, f := range r.deferred {
deferredErr := f(r.req)
if deferredErr != nil {
return nil, deferredErr
}
}
}
if r.raw != nil {
r.buf.ReadFrom(r.raw)
} else if r.req != nil {
data, err := json.Marshal(r.req)
if err != nil {
return nil, fmt.Errorf("could not serialise request for PutComponentTemplate: %w", err)
}
r.buf.Write(data)
}
r.path.Scheme = "http"
switch {
case r.paramSet == nameMask:
path.WriteString("/")
path.WriteString("_component_template")
path.WriteString("/")
path.WriteString(r.name)
method = http.MethodPut
}
r.path.Path = path.String()
r.path.RawQuery = r.values.Encode()
if r.path.Path == "" {
return nil, ErrBuildPath
}
if ctx != nil {
req, err = http.NewRequestWithContext(ctx, method, r.path.String(), r.buf)
} else {
req, err = http.NewRequest(method, r.path.String(), r.buf)
}
req.Header = r.headers.Clone()
if req.Header.Get("Content-Type") == "" {
if r.buf.Len() > 0 {
req.Header.Set("Content-Type", "application/vnd.elasticsearch+json;compatible-with=8")
}
}
if req.Header.Get("Accept") == "" {
req.Header.Set("Accept", "application/vnd.elasticsearch+json;compatible-with=8")
}
if err != nil {
return req, fmt.Errorf("could not build http.Request: %w", err)
}
return req, nil
}
// Perform runs the http.Request through the provided transport and returns an http.Response.
func (r PutComponentTemplate) Perform(ctx context.Context) (*http.Response, error) {
req, err := r.HttpRequest(ctx)
if err != nil {
return nil, err
}
res, err := r.transport.Perform(req)
if err != nil {
return nil, fmt.Errorf("an error happened during the PutComponentTemplate query execution: %w", err)
}
return res, nil
}
// Do runs the request through the transport, handle the response and returns a putcomponenttemplate.Response
func (r PutComponentTemplate) Do(ctx context.Context) (*Response, error) {
response := NewResponse()
res, err := r.Perform(ctx)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode < 299 {
err = json.NewDecoder(res.Body).Decode(response)
if err != nil {
return nil, err
}
return response, nil
}
errorResponse := types.NewElasticsearchError()
err = json.NewDecoder(res.Body).Decode(errorResponse)
if err != nil {
return nil, err
}
if errorResponse.Status == 0 {
errorResponse.Status = res.StatusCode
}
return nil, errorResponse
}
// Header set a key, value pair in the PutComponentTemplate headers map.
func (r *PutComponentTemplate) Header(key, value string) *PutComponentTemplate {
r.headers.Set(key, value)
return r
}
// Name Name of the component template to create.
// Elasticsearch includes the following built-in component templates:
// `logs-mappings`; 'logs-settings`; `metrics-mappings`;
// `metrics-settings`;`synthetics-mapping`; `synthetics-settings`.
// Elastic Agent uses these templates to configure backing indices for its data
// streams.
// If you use Elastic Agent and want to overwrite one of these templates, set
// the `version` for your replacement template higher than the current version.
// If you don’t use Elastic Agent and want to disable all built-in component and
// index templates, set `stack.templates.enabled` to `false` using the cluster
// update settings API.
// API Name: name
func (r *PutComponentTemplate) Name(name string) *PutComponentTemplate {
r.paramSet |= nameMask
r.name = name
return r
}
// Create If `true`, this request cannot replace or update existing component
// templates.
// API name: create
func (r *PutComponentTemplate) Create(create bool) *PutComponentTemplate {
r.values.Set("create", strconv.FormatBool(create))
return r
}
// MasterTimeout Period to wait for a connection to the master node.
// If no response is received before the timeout expires, the request fails and
// returns an error.
// API name: master_timeout
func (r *PutComponentTemplate) MasterTimeout(duration string) *PutComponentTemplate {
r.values.Set("master_timeout", duration)
return r
}
// AllowAutoCreate This setting overrides the value of the `action.auto_create_index` cluster
// setting.
// If set to `true` in a template, then indices can be automatically created
// using that
// template even if auto-creation of indices is disabled via
// `actions.auto_create_index`.
// If set to `false` then data streams matching the template must always be
// explicitly created.
// API name: allow_auto_create
func (r *PutComponentTemplate) AllowAutoCreate(allowautocreate bool) *PutComponentTemplate {
r.req.AllowAutoCreate = &allowautocreate
return r
}
// Meta_ Optional user metadata about the component template.
// May have any contents. This map is not automatically generated by
// Elasticsearch.
// This information is stored in the cluster state, so keeping it short is
// preferable.
// To unset `_meta`, replace the template without specifying this information.
// API name: _meta
func (r *PutComponentTemplate) Meta_(metadata types.Metadata) *PutComponentTemplate {
r.req.Meta_ = metadata
return r
}
// Template The template to be applied which includes mappings, settings, or aliases
// configuration.
// API name: template
func (r *PutComponentTemplate) Template(template *types.IndexState) *PutComponentTemplate {
r.req.Template = *template
return r
}
// Version Version number used to manage component templates externally.
// This number isn't automatically generated or incremented by Elasticsearch.
// To unset a version, replace the template without specifying a version.
// API name: version
func (r *PutComponentTemplate) Version(versionnumber int64) *PutComponentTemplate {
r.req.Version = &versionnumber
return r
}
|
// Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aggfuncs_test
import (
"testing"
"github.com/pingcap/tidb/executor/aggfuncs"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
)
func getEvaluatedMemDelta(row *chunk.Row, dataType *types.FieldType) (memDelta int64) {
switch dataType.GetType() {
case mysql.TypeString:
memDelta = int64(len(row.GetString(0)))
case mysql.TypeJSON:
memDelta = int64(len(row.GetJSON(0).Value))
}
return
}
func lastValueEvaluateRowUpdateMemDeltaGens(srcChk *chunk.Chunk, dataType *types.FieldType) (memDeltas []int64, err error) {
memDeltas = make([]int64, 0)
lastMemDelta := int64(0)
for i := 0; i < srcChk.NumRows(); i++ {
row := srcChk.GetRow(0)
curMemDelta := getEvaluatedMemDelta(&row, dataType)
memDeltas = append(memDeltas, curMemDelta-lastMemDelta)
lastMemDelta = curMemDelta
}
return memDeltas, nil
}
func nthValueEvaluateRowUpdateMemDeltaGens(nth int) updateMemDeltaGens {
return func(srcChk *chunk.Chunk, dataType *types.FieldType) (memDeltas []int64, err error) {
memDeltas = make([]int64, 0)
for i := 0; i < srcChk.NumRows(); i++ {
memDeltas = append(memDeltas, int64(0))
}
if nth < srcChk.NumRows() {
row := srcChk.GetRow(nth - 1)
memDeltas[nth-1] = getEvaluatedMemDelta(&row, dataType)
}
return memDeltas, nil
}
}
func TestMemValue(t *testing.T) {
firstMemDeltaGens := nthValueEvaluateRowUpdateMemDeltaGens(1)
secondMemDeltaGens := nthValueEvaluateRowUpdateMemDeltaGens(2)
fifthMemDeltaGens := nthValueEvaluateRowUpdateMemDeltaGens(5)
tests := []windowMemTest{
buildWindowMemTester(ast.WindowFuncFirstValue, mysql.TypeLonglong, 0, 2, 1,
aggfuncs.DefPartialResult4FirstValueSize+aggfuncs.DefValue4IntSize, firstMemDeltaGens),
buildWindowMemTester(ast.WindowFuncFirstValue, mysql.TypeFloat, 0, 2, 1,
aggfuncs.DefPartialResult4FirstValueSize+aggfuncs.DefValue4Float32Size, firstMemDeltaGens),
buildWindowMemTester(ast.WindowFuncFirstValue, mysql.TypeDouble, 0, 2, 1,
aggfuncs.DefPartialResult4FirstValueSize+aggfuncs.DefValue4Float64Size, firstMemDeltaGens),
buildWindowMemTester(ast.WindowFuncFirstValue, mysql.TypeNewDecimal, 0, 2, 1,
aggfuncs.DefPartialResult4FirstValueSize+aggfuncs.DefValue4DecimalSize, firstMemDeltaGens),
buildWindowMemTester(ast.WindowFuncFirstValue, mysql.TypeString, 0, 2, 1,
aggfuncs.DefPartialResult4FirstValueSize+aggfuncs.DefValue4StringSize, firstMemDeltaGens),
buildWindowMemTester(ast.WindowFuncFirstValue, mysql.TypeDate, 0, 2, 1,
aggfuncs.DefPartialResult4FirstValueSize+aggfuncs.DefValue4TimeSize, firstMemDeltaGens),
buildWindowMemTester(ast.WindowFuncFirstValue, mysql.TypeDuration, 0, 2, 1,
aggfuncs.DefPartialResult4FirstValueSize+aggfuncs.DefValue4DurationSize, firstMemDeltaGens),
buildWindowMemTester(ast.WindowFuncFirstValue, mysql.TypeJSON, 0, 2, 1,
aggfuncs.DefPartialResult4FirstValueSize+aggfuncs.DefValue4JSONSize, firstMemDeltaGens),
buildWindowMemTester(ast.WindowFuncLastValue, mysql.TypeLonglong, 1, 2, 0,
aggfuncs.DefPartialResult4LastValueSize+aggfuncs.DefValue4IntSize, lastValueEvaluateRowUpdateMemDeltaGens),
buildWindowMemTester(ast.WindowFuncLastValue, mysql.TypeString, 1, 2, 0,
aggfuncs.DefPartialResult4LastValueSize+aggfuncs.DefValue4StringSize, lastValueEvaluateRowUpdateMemDeltaGens),
buildWindowMemTester(ast.WindowFuncLastValue, mysql.TypeJSON, 1, 2, 0,
aggfuncs.DefPartialResult4LastValueSize+aggfuncs.DefValue4JSONSize, lastValueEvaluateRowUpdateMemDeltaGens),
buildWindowMemTester(ast.WindowFuncNthValue, mysql.TypeLonglong, 2, 3, 0,
aggfuncs.DefPartialResult4NthValueSize+aggfuncs.DefValue4IntSize, secondMemDeltaGens),
buildWindowMemTester(ast.WindowFuncNthValue, mysql.TypeLonglong, 5, 3, 0,
aggfuncs.DefPartialResult4NthValueSize+aggfuncs.DefValue4IntSize, fifthMemDeltaGens),
buildWindowMemTester(ast.WindowFuncNthValue, mysql.TypeJSON, 2, 3, 0,
aggfuncs.DefPartialResult4NthValueSize+aggfuncs.DefValue4JSONSize, secondMemDeltaGens),
buildWindowMemTester(ast.WindowFuncNthValue, mysql.TypeString, 5, 3, 0,
aggfuncs.DefPartialResult4NthValueSize+aggfuncs.DefValue4StringSize, fifthMemDeltaGens),
}
for _, test := range tests {
testWindowAggMemFunc(t, test)
}
}
|
package fuse
import (
"sync"
"fmt"
"unsafe"
"log"
)
var _ = log.Println
// This implements a pool of buffers that returns slices with capacity
// (2^e * PAGESIZE) for e=0,1,... which have possibly been used, and
// may contain random contents.
type BufferPool struct {
lock sync.Mutex
// For each exponent a list of slice pointers.
buffersByExponent [][][]byte
// start of slice -> exponent.
outstandingBuffers map[uintptr]uint
// Total count of created buffers. Handy for finding memory
// leaks.
createdBuffers int
}
// Returns the smallest E such that 2^E >= Z.
func IntToExponent(z int) uint {
x := z
var exp uint = 0
for x > 1 {
exp++
x >>= 1
}
if z > (1 << exp) {
exp++
}
return exp
}
func NewBufferPool() *BufferPool {
bp := new(BufferPool)
bp.buffersByExponent = make([][][]byte, 0, 8)
bp.outstandingBuffers = make(map[uintptr]uint)
return bp
}
func (me *BufferPool) String() string {
s := ""
for exp, bufs := range me.buffersByExponent {
s = s + fmt.Sprintf("%d = %d\n", exp, len(bufs))
}
return s
}
func (me *BufferPool) getBuffer(exponent uint) []byte {
if len(me.buffersByExponent) <= int(exponent) {
return nil
}
bufferList := me.buffersByExponent[exponent]
if len(bufferList) == 0 {
return nil
}
result := bufferList[len(bufferList)-1]
me.buffersByExponent[exponent] = me.buffersByExponent[exponent][:len(bufferList)-1]
return result
}
func (me *BufferPool) addBuffer(slice []byte, exp uint) {
for len(me.buffersByExponent) <= int(exp) {
me.buffersByExponent = append(me.buffersByExponent, make([][]byte, 0))
}
me.buffersByExponent[exp] = append(me.buffersByExponent[exp], slice)
}
func (me *BufferPool) AllocCount() int {
me.lock.Lock()
defer me.lock.Unlock()
return me.createdBuffers
}
func (me *BufferPool) AllocBuffer(size uint32) []byte {
sz := int(size)
if sz < PAGESIZE {
sz = PAGESIZE
}
exp := IntToExponent(sz)
rounded := 1 << exp
exp -= IntToExponent(PAGESIZE)
me.lock.Lock()
defer me.lock.Unlock()
b := me.getBuffer(exp)
if b == nil {
me.createdBuffers++
b = make([]byte, size, rounded)
} else {
b = b[:size]
}
me.outstandingBuffers[uintptr(unsafe.Pointer(&b[0]))] = exp
return b
}
// Takes back a buffer if it was allocated through AllocBuffer. It is
// not an error to call FreeBuffer() on a slice obtained elsewhere.
func (me *BufferPool) FreeBuffer(slice []byte) {
if cap(slice) < PAGESIZE {
return
}
slice = slice[:cap(slice)]
key := uintptr(unsafe.Pointer(&slice[0]))
me.lock.Lock()
defer me.lock.Unlock()
exp, ok := me.outstandingBuffers[key]
if ok {
me.addBuffer(slice, exp)
me.outstandingBuffers[key] = 0, false
}
}
|
package mat
import (
"math"
"math/rand"
)
func NewCone() *Cone {
m1 := New4x4() //NewMat4x4(make([]float64, 16))
inv := New4x4() //NewMat4x4(make([]float64, 16))
return &Cone{
Id: rand.Int63(),
Transform: m1,
Inverse: inv,
Material: NewDefaultMaterial(),
MinY: math.Inf(-1),
MaxY: math.Inf(1),
CastShadow: true,
}
}
func NewConeMMC(min, max float64, closed bool) *Cone {
c := NewCone()
c.MinY = min
c.MaxY = max
c.Closed = closed
return c
}
type Cone struct {
Id int64
Transform Mat4x4
Inverse Mat4x4
InverseTranspose Mat4x4
Material Material
Label string
parent Shape
savedRay Ray
MinY float64
MaxY float64
Closed bool
CastShadow bool
}
func (c *Cone) CastsShadow() bool {
return c.CastShadow
}
func (c *Cone) ID() int64 {
return c.Id
}
func (c *Cone) GetTransform() Mat4x4 {
return c.Transform
}
func (c *Cone) GetInverse() Mat4x4 {
return c.Inverse
}
func (c *Cone) GetInverseTranspose() Mat4x4 {
return c.InverseTranspose
}
func (c *Cone) SetTransform(transform Mat4x4) {
c.Transform = Multiply(c.Transform, transform)
c.Inverse = Inverse(c.Transform)
c.InverseTranspose = Transpose(c.Inverse)
}
func (c *Cone) GetMaterial() Material {
return c.Material
}
func (c *Cone) SetMaterial(material Material) {
c.Material = material
}
func (c *Cone) IntersectLocal(ray Ray) []Intersection {
var xs []Intersection
rdx2 := ray.Direction.Get(0) * ray.Direction.Get(0)
rdy2 := ray.Direction.Get(1) * ray.Direction.Get(1)
rdz2 := ray.Direction.Get(2) * ray.Direction.Get(2)
a := rdx2 - rdy2 + rdz2
b := 2*ray.Origin.Get(0)*ray.Direction.Get(0) -
2*ray.Origin.Get(1)*ray.Direction.Get(1) +
2*ray.Origin.Get(2)*ray.Direction.Get(2)
absA := math.Abs(a)
absB := math.Abs(b)
if absA < Epsilon && absB < Epsilon {
return xs
}
rox2 := ray.Origin.Get(0) * ray.Origin.Get(0)
roy2 := ray.Origin.Get(1) * ray.Origin.Get(1)
roz2 := ray.Origin.Get(2) * ray.Origin.Get(2)
c1 := rox2 - roy2 + roz2
//if math.Abs(a) < Epsilon {
// return c.intercectCaps(ray, xs)
//}
disc := b*b - 4*a*c1
// ray does not intersect the cone
if disc < 0 {
return xs
}
var t0, t1 float64
if absA < Epsilon && absB > Epsilon {
t0 = -c1 / (2.0 * b)
y0 := ray.Origin.Get(1) + t0*ray.Direction.Get(1)
if y0 > c.MinY && y0 < c.MaxY {
xs = append(xs, NewIntersection(t0, c))
}
//t1 = -c1 / (2.0 * b)
} else {
t0 = (-b - math.Sqrt(disc)) / (2 * a)
t1 = (-b + math.Sqrt(disc)) / (2 * a)
// Capping check
y0 := ray.Origin.Get(1) + t0*ray.Direction.Get(1)
if y0 > c.MinY && y0 < c.MaxY {
xs = append(xs, NewIntersection(t0, c))
}
y1 := ray.Origin.Get(1) + t1*ray.Direction.Get(1)
if y1 > c.MinY && y1 < c.MaxY {
xs = append(xs, NewIntersection(t1, c))
}
}
// Lids on top and bottom
return c.intercectCaps(ray, xs)
}
func (c *Cone) NormalAtLocal(point Tuple4, intersection *Intersection) Tuple4 {
// compute the square of the distance from the y axis
dist := math.Pow(point.Get(0), 2) + math.Pow(point.Get(2), 2)
if dist < 1 && point.Get(1) >= c.MaxY-Epsilon {
return NewVector(0, 1, 0)
} else if dist < 1 && point.Get(1) <= c.MinY+Epsilon {
return NewVector(0, -1, 0)
} else {
y := math.Sqrt(math.Pow(point.Get(0), 2) + math.Pow(point.Get(2), 2))
if point.Get(1) > 0.0 {
y = -y
}
return NewVector(point.Get(0), y, point.Get(2))
}
}
func (c *Cone) GetLocalRay() Ray {
return c.savedRay
}
// checkCap for cones changes so the MinY / MaxY is used instead of 1.0 since the cone narrows down.
// (remember, we're in unit space)
func (c *Cone) checkCap(ray Ray, t float64, minMaxY float64) bool {
x := ray.Origin.Get(0) + t*ray.Direction.Get(0)
z := ray.Origin.Get(2) + t*ray.Direction.Get(2)
return math.Pow(x, 2)+math.Pow(z, 2) <= math.Abs(minMaxY)
}
func (c *Cone) intercectCaps(ray Ray, xs []Intersection) []Intersection {
if !c.Closed || math.Abs(ray.Direction.Get(1)) < Epsilon {
return xs
}
// check for an intersection with the lower end cap by intersecting
// the ray with the plane at y=cyl.minimum
t := (c.MinY - ray.Origin.Get(1)) / ray.Direction.Get(1)
if c.checkCap(ray, t, c.MinY) {
xs = append(xs, NewIntersection(t, c))
}
// check for an intersection with the upper end cap by intersecting
// the ray with the plane at y=cyl.maximum
t = (c.MaxY - ray.Origin.Get(1)) / ray.Direction.Get(1)
if c.checkCap(ray, t, c.MaxY) {
xs = append(xs, NewIntersection(t, c))
}
return xs
}
func (c *Cone) GetParent() Shape {
return c.parent
}
func (c *Cone) SetParent(shape Shape) {
c.parent = shape
}
func (c *Cone) Name() string {
return c.Label
}
|
package main
import (
route3 "P-Learn-Go/001-Introduction/Rename/route"
route2 "P-Learn-Go/001-Introduction/Rename/route"
)
func main() {
route2.Http()
route3.Http()
}
|
package main
import "fmt"
type Point struct {
x int
y int
}
type Rect struct {
leftUp, rightDown Point
}
type Rect2 struct {
leftUp, rightDown *Point
}
func (rect Rect) updateLeftUpX(n int) {
rect.leftUp.x = n
}
func main() {
r1 := Rect{Point{1,2}, Point{3, 4}}
r1.updateLeftUpX(11);
fmt.Println(r1)
// r1 有四个int,在内存中是连续分布
// 打印地址
fmt.Printf("r1.leftUp.x 地址=%p r1.leftUp.y 地址=%p r1.rightDown.x 地址=%p r1.rightDown.y 地址=%p \n",
&r1.leftUp.x, &r1.leftUp.y, &r1.rightDown.x, &r1.rightDown.y)
// r2 有两个 *Point 类型,这个两个 *Point 类型的本身地址也是连续的
// 但是他们指向的地址不一定是连续
r2 := Rect2{&Point{10,20}, &Point{30,40}}
//打印地址
fmt.Printf("r2.leftUp 本身地址=%p r2.rightDown 本身地址=%p \n",
&r2.leftUp, &r2.rightDown)
//他们指向的地址不一定是连续..., 这个要看系统在运行时是如何分配
fmt.Printf("r2.leftUp 指向地址=%p r2.rightDown 指向地址=%p \n",
r2.leftUp, r2.rightDown)
} |
package main
import (
"errors"
"math"
"os"
"github.com/jcorbin/anansi"
"github.com/jcorbin/anansi/x/platform"
)
type schotterDemoUI struct {
*schotterDemo
}
func runInteractive() {
platform.MustRun(os.Stdin, os.Stdout, func(p *platform.Platform) error {
var ui schotterDemoUI
ui.schotterDemo = &sd
ui.squareSide = 20 // TODO push down, pre-compute based on initial width and squaresPerRow
return p.Run(&ui)
}, platform.FrameRate(60))
}
func (sd *schotterDemoUI) Update(ctx *platform.Context) (err error) {
// Ctrl-C interrupts
if ctx.Input.HasTerminal('\x03') {
// ... AFTER any other available input has been processed
err = errors.New("interrupt")
// ... NOTE err != nil will prevent wasting any time flushing the final
// lame-duck frame
}
// Ctrl-Z suspends
if ctx.Input.CountRune('\x1a') > 0 {
defer func() {
if err == nil {
err = ctx.Suspend()
} // else NOTE don't bother suspending, e.g. if Ctrl-C was also present
}()
}
zoomed := false
if n := ctx.Input.TotalScrollIn(ctx.Output.Bounds()); n != 0 {
sd.squareSide += n
if sd.squareSide < 1 {
sd.squareSide = 1
}
zoomed = true
}
canvasSize := sd.canvas.Rect.Size()
if screenSize := ctx.Output.Bounds().Size(); screenSize.X != canvasSize.X/2 || zoomed {
sd.padding = 0
if screenSize.X > 2 {
sd.padding = 2
}
canvasSize.X = screenSize.X * 2
canvasSize.Y = screenSize.Y * 4
roundUp := sd.squareSide - 1
sd.squaresPerRow = ((screenSize.X-sd.padding)*2 + roundUp) / sd.squareSide
sd.squaresPerCol = ((screenSize.Y-sd.padding)*4 + roundUp) / sd.squareSide
// TODO resize if != nil
sd.canvas.Resize(canvasSize)
}
for i := range sd.canvas.Bit {
sd.canvas.Bit[i] = false
}
sd.draw()
ctx.Output.Clear()
anansi.DrawBitmap(ctx.Output.Grid, sd.canvas)
sd.angleOffset += 0.01
if sd.angleOffset > math.Pi {
sd.angleOffset -= math.Pi
}
return err
}
|
package pv_monitor_controller
import (
"testing"
v1 "k8s.io/api/core/v1"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/kubernetes-csi/external-health-monitor/pkg/mock"
"github.com/kubernetes-csi/external-health-monitor/pkg/util"
)
func Test_AbnormalVolumeWithoutNodeWatcher(t *testing.T) {
abnormalVolume := &mock.MockVolume{
CSIVolume: &mock.CSIVolume{
Volume: &csi.Volume{
VolumeId: "abnormalVolume1",
},
Condition: &csi.VolumeCondition{
Abnormal: true,
Message: "Volume not found",
},
},
NativeVolume: mock.CreatePV(2, "pvc", "pv", mock.DefaultNS, "abnormalVolume1", "pvcuid", &mock.FSVolumeMode, v1.VolumeBound),
NativeVolumeClaim: mock.CreatePVC(1, 2, "pvc", "pvcuid", mock.DefaultNS, "pv", v1.ClaimBound),
}
testCase := &testCase{
name: "abnormal_volume_case1",
enableNodeWatcher: false,
supportListVolumes: true,
fakeNativeObjects: &fakeNativeObjects{
MockVolume: abnormalVolume,
},
wantAbnormalEvent: true,
}
runTest(t, testCase)
}
func Test_AbnormalVolumeWithNodeWatcher(t *testing.T) {
abnormalVolume := &mock.MockVolume{
CSIVolume: &mock.CSIVolume{
Volume: &csi.Volume{
VolumeId: "abnormalVolume1",
},
Condition: &csi.VolumeCondition{
Abnormal: true,
Message: "Volume not found",
},
},
NativeVolume: mock.CreatePV(2, "pvc", "pv", mock.DefaultNS, "abnormalVolume1", "pvcuid", &mock.FSVolumeMode, v1.VolumeBound),
NativeVolumeClaim: mock.CreatePVC(1, 2, "pvc", "pvcuid", mock.DefaultNS, "pv", v1.ClaimBound),
}
abnormalNodes := &mock.MockNode{
NativeNode: mock.CreateNode("node1", ""),
}
testCase := &testCase{
name: "abnormal_volume_case1",
enableNodeWatcher: true,
supportListVolumes: true,
fakeNativeObjects: &fakeNativeObjects{
MockVolume: abnormalVolume,
MockNode: abnormalNodes,
},
wantAbnormalEvent: true,
}
runTest(t, testCase)
}
func Test_NormalVolumeWithoutNodeWatcher(t *testing.T) {
normalVolume := &mock.MockVolume{
CSIVolume: &mock.CSIVolume{
Volume: &csi.Volume{
VolumeId: "normalVolume1",
},
Condition: &csi.VolumeCondition{
Abnormal: false,
Message: "",
},
},
NativeVolume: mock.CreatePV(2, "pvc", "pv", mock.DefaultNS, "normalVolume1", "pvcuid", &mock.FSVolumeMode, v1.VolumeBound),
NativeVolumeClaim: mock.CreatePVC(1, 2, "pvc", "pvcuid", mock.DefaultNS, "pv", v1.ClaimBound),
}
testCase := &testCase{
name: "normal_volume_case1",
enableNodeWatcher: false,
supportListVolumes: true,
fakeNativeObjects: &fakeNativeObjects{
MockVolume: normalVolume,
},
wantAbnormalEvent: false,
}
runTest(t, testCase)
}
func Test_AbnormalVolumeWithoutNodeWatcherAndListVolume(t *testing.T) {
abnormalVolume := &mock.MockVolume{
CSIVolume: &mock.CSIVolume{
Volume: &csi.Volume{
VolumeId: "abnormalVolume1",
},
Condition: &csi.VolumeCondition{
Abnormal: true,
Message: "Volume not found",
},
},
NativeVolume: mock.CreatePV(2, "pvc", "pv", mock.DefaultNS, "abnormalVolume1", "pvcuid", &mock.FSVolumeMode, v1.VolumeBound),
NativeVolumeClaim: mock.CreatePVC(1, 2, "pvc", "pvcuid", mock.DefaultNS, "pv", v1.ClaimBound),
}
testCase := &testCase{
name: "abnormal_volume_case1",
enableNodeWatcher: false,
supportListVolumes: false,
fakeNativeObjects: &fakeNativeObjects{
MockVolume: abnormalVolume,
},
wantAbnormalEvent: true,
}
runTest(t, testCase)
}
func Test_AbnormalVolumeWithNodeWatcherNoListVolume(t *testing.T) {
abnormalVolume := &mock.MockVolume{
CSIVolume: &mock.CSIVolume{
Volume: &csi.Volume{
VolumeId: "abnormalVolume1",
},
Condition: &csi.VolumeCondition{
Abnormal: true,
Message: "Volume not found",
},
},
NativeVolume: mock.CreatePV(2, "pvc", "pv", mock.DefaultNS, "abnormalVolume1", "pvcuid", &mock.FSVolumeMode, v1.VolumeBound),
NativeVolumeClaim: mock.CreatePVC(1, 2, "pvc", "pvcuid", mock.DefaultNS, "pv", v1.ClaimBound),
}
abnormalNodes := &mock.MockNode{
NativeNode: mock.CreateNode("node1", ""),
}
testCase := &testCase{
name: "abnormal_volume_case1",
enableNodeWatcher: true,
supportListVolumes: false,
fakeNativeObjects: &fakeNativeObjects{
MockVolume: abnormalVolume,
MockNode: abnormalNodes,
},
wantAbnormalEvent: true,
}
runTest(t, testCase)
}
func Test_NormalVolumeWithoutNodeWatcherAndListVolume(t *testing.T) {
normalVolume := &mock.MockVolume{
CSIVolume: &mock.CSIVolume{
Volume: &csi.Volume{
VolumeId: "normalVolume1",
},
Condition: &csi.VolumeCondition{
Abnormal: false,
Message: "",
},
},
NativeVolume: mock.CreatePV(2, "pvc", "pv", mock.DefaultNS, "normalVolume1", "pvcuid", &mock.FSVolumeMode, v1.VolumeBound),
NativeVolumeClaim: mock.CreatePVC(1, 2, "pvc", "pvcuid", mock.DefaultNS, "pv", v1.ClaimBound),
}
testCase := &testCase{
name: "normal_volume_case1",
enableNodeWatcher: false,
supportListVolumes: false,
fakeNativeObjects: &fakeNativeObjects{
MockVolume: normalVolume,
},
wantAbnormalEvent: false,
}
runTest(t, testCase)
}
func Test_RecoveryEventWithListVolume(t *testing.T) {
normalVolume := &mock.MockVolume{
CSIVolume: &mock.CSIVolume{
Volume: &csi.Volume{
VolumeId: "normalVolume1",
},
Condition: &csi.VolumeCondition{
Abnormal: false,
Message: util.DefaultRecoveryEventMessage,
},
},
NativeVolume: mock.CreatePV(2, "pvc", "pv", mock.DefaultNS, "normalVolume1", "pvcuid", &mock.FSVolumeMode, v1.VolumeBound),
NativeVolumeClaim: mock.CreatePVC(1, 2, "pvc", "pvcuid", mock.DefaultNS, "pv", v1.ClaimBound),
}
oldAbnormalEvent := &mock.MockEvent{
NativeEvent: mock.CreateEvent("event", "", "pvcuid", v1.EventTypeWarning, "VolumeConditionAbnormal"),
}
testCase := &testCase{
name: "normal_volume_recovery_event1",
enableNodeWatcher: false,
supportListVolumes: true,
fakeNativeObjects: &fakeNativeObjects{
MockVolume: normalVolume,
MockEvent: oldAbnormalEvent,
},
wantAbnormalEvent: false,
hasRecoveryEvent: true,
}
runTest(t, testCase)
}
func Test_RecoveryEventWithoutListVolume(t *testing.T) {
normalVolume := &mock.MockVolume{
CSIVolume: &mock.CSIVolume{
Volume: &csi.Volume{
VolumeId: "normalVolume1",
},
Condition: &csi.VolumeCondition{
Abnormal: false,
Message: util.DefaultRecoveryEventMessage,
},
},
NativeVolume: mock.CreatePV(2, "pvc", "pv", mock.DefaultNS, "normalVolume1", "pvcuid", &mock.FSVolumeMode, v1.VolumeBound),
NativeVolumeClaim: mock.CreatePVC(1, 2, "pvc", "pvcuid", mock.DefaultNS, "pv", v1.ClaimBound),
}
oldAbnormalEvent := &mock.MockEvent{
NativeEvent: mock.CreateEvent("event", "", "pvcuid", v1.EventTypeWarning, "VolumeConditionAbnormal"),
}
testCase := &testCase{
name: "normal_volume_recovery_event2",
enableNodeWatcher: false,
supportListVolumes: false,
fakeNativeObjects: &fakeNativeObjects{
MockVolume: normalVolume,
MockEvent: oldAbnormalEvent,
},
wantAbnormalEvent: false,
hasRecoveryEvent: true,
}
runTest(t, testCase)
}
|
package main
var dx []int
var dy []int
// DFS的调用者
func solve(board [][]byte) {
/* 1. 确定搜索方向dx、dy */
dx = []int{0, 0, -1, 1}
dy = []int{1, -1, 0, 0}
if len(board) == 0 {
return
}
m, n := len(board), len(board[0])
/* 2. 对坐标进行DFS搜索 (这里开始调用DFS函数) */
for i := 0; i < m; i++ {
DFS(board, i, 0)
DFS(board, i, n-1)
}
for t := 0; t < n; t++ {
DFS(board, 0, t)
DFS(board, m-1, t)
}
for i := 0; i < m; i++ {
for t := 0; t < n; t++ {
if board[i][t] == 'S' {
board[i][t] = 'O'
continue
}
if board[i][t] == 'O' {
board[i][t] = 'X'
continue
}
}
}
}
// DFS递归搜索
func DFS(board [][]byte, x, y int) {
/* 3. 判断进行DFS的坐标是否符合要求 */
if x < 0 || y < 0 || x >= len(board) || y >= len(board[x]) || board[x][y] == 'X' || board[x][y] == 'S' {
return
}
/* 4. 对坐标进行标记,防止走回头路 */
board[x][y] = 'S'
/* 5.遍历所有方向 */
for i := 0; i < len(dx); i++ {
/* 6. 对下一个坐标进行DFS搜索 */
DFS(board, x+dx[i], y+dy[i])
}
}
/*
题目链接:
https://leetcode-cn.com/problems/surrounded-regions/solution/ 被围绕的区域
*/
/*
总结
1. 这题的思路是:
(1) 扫描矩阵边界,利用DFS将边界岛屿的每一块变为'S'。 (字符不唯一,可以选择其他字符,只要不是'O','X')
(2) 扫描整个矩阵,将'O'变'X', 'S'变'O'
(3) 完毕~
2. 个人感觉DFS一般比较容易编写~
*/
|
package main
import "fmt"
func main() {
var n int
fmt.Scanf("%d", &n)
var min float64
for i := 0; i < n; i++ {
var price, grams float64
fmt.Scanf("%f %f", &price, &grams)
total := (1000.0 / grams) * price
if total < min || i == 0 {
min = total
continue
}
}
fmt.Printf("%.2f\n", min)
}
|
package gourmet
import (
"fmt"
"os"
"strconv"
"testing"
"github.com/joho/godotenv"
"github.com/stretchr/testify/assert"
)
func init() {
_, err := os.Stat("../.env")
if !os.IsNotExist(err) {
_ = godotenv.Load("../.env")
}
}
func TestQueryExec(t *testing.T) {
// assert := assert.New(t)
q := QueryParam{
Latitude: strconv.FormatFloat(35.531365, 'f', -1, 64),
Longitude: strconv.FormatFloat(139.6947003, 'f', -1, 64),
Freeword: "ラーメン",
}
restaurantInfos, err := QueryExec(q)
assert.Nil(t, err)
for _, rest := range restaurantInfos {
fmt.Println(rest.Name + ": " + rest.Link)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.