text
stringlengths 11
4.05M
|
|---|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/12/5 9:08 上午
# @File : offer09_栈实现队列.go
# @Description :
# @Attention :
*/
package offer2
// 关键: 一个栈push,一个栈pop
type CQueue struct {
push []int
pop []int
}
func Constructor() CQueue {
return CQueue{}
}
func (this *CQueue) AppendTail(value int) {
this.push = append(this.push, value)
}
func (this *CQueue) DeleteHead() int {
if len(this.pop) == 0 {
for len(this.push) > 0 {
v := this.push[len(this.push)-1]
this.push = this.push[:len(this.push)-1]
this.pop = append(this.pop, v)
}
}
if len(this.pop) == 0 {
return -1
}
ret := this.pop[len(this.pop)-1]
this.pop = this.pop[:len(this.pop)-1]
return ret
}
/**
* Your CQueue object will be instantiated and called as such:
* obj := Constructor();
* obj.AppendTail(value);
* param_2 := obj.DeleteHead();
*/
|
package functions_demo
import "fmt"
/*
函数可以接收0或多个参数
当连续两个或多个函数的已命名形参类型相同时,除最后一个类型以外,其它都可以省略
*/
func FuncArgs(x, y int) {
fmt.Println(x + y)
}
|
package main
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sns"
"flag"
"fmt"
"os"
)
func main() {
emailPtr := flag.String("e", "", "The email address of the user subscribing to the topic")
topicPtr := flag.String("t", "", "The ARN of the topic to which the user subscribes")
flag.Parse()
if *emailPtr == "" || *topicPtr == "" {
fmt.Println("You must supply an email address and topic ARN")
fmt.Println("Usage: go run SnsSubscribe.go -e EMAIL -t TOPIC-ARN")
os.Exit(1)
}
// Initialize a session that the SDK will use to load
// credentials from the shared credentials file. (~/.aws/credentials).
sess := session.Must(session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
}))
svc := sns.New(sess)
result, err := svc.Subscribe(&sns.SubscribeInput{
Endpoint: emailPtr,
Protocol: aws.String("email"),
ReturnSubscriptionArn: aws.Bool(true), // Return the ARN, even if user has yet to confirm
TopicArn: topicPtr,
})
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
fmt.Println(*result.SubscriptionArn)
}
|
package main
import (
"database/sql"
"fmt"
"log"
"time"
"github.com/nitohu/err"
)
// Transaction model
// TODO: Implement Forecasted and Booked in database
type Transaction struct {
// Database fields
ID int64
Name string
Description string
Active bool
TransactionDate time.Time
CreateDate time.Time
LastUpdate time.Time
Amount float64
FromAccount int64
ToAccount int64
TransactionType string
CategoryID int64
// Computed fields
FromAccountName string
ToAccountName string
TransactionDateStr string
Category Category
}
// EmptyTransaction ..
func EmptyTransaction() Transaction {
t := Transaction{
ID: 0,
Name: "",
Description: "",
Active: false,
TransactionDate: time.Now().Local(),
CreateDate: time.Now().Local(),
LastUpdate: time.Now().Local(),
Amount: 0.0,
FromAccount: 0,
ToAccount: 0,
TransactionType: "",
CategoryID: 0,
}
return t
}
func bookIntoAccount(cr *sql.DB, id int64, t *Transaction, invert bool) err.Error {
acc, e := FindAccountByID(cr, id)
if !e.Empty() {
e.AddTraceback("bookIntoAccount()", "Error while finding account")
return e
}
e = acc.Book(cr, t, invert)
if !e.Empty() {
e.AddTraceback("bookIntoAccount()", "Error while booking into account")
return e
}
return err.Error{}
}
// Create 's a transaction with the current values of the object
func (t *Transaction) Create(cr *sql.DB) err.Error {
// Requirements for creating a transaction
if t.ID != 0 {
var err err.Error
err.Init("Transaction.Create()", "This object already has an id")
return err
} else if t.Amount == 0.0 {
var err err.Error
err.Init("Transaction.Create()", "The Amount of this transaction is 0")
return err
}
var id int64
var categID interface{}
// Initializing variables
query := "INSERT INTO transactions ( name, active, transaction_date, last_update, create_date, amount,"
query += " account_id, to_account, transaction_type, description, category_id"
query += ") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) RETURNING id;"
t.CreateDate = time.Now().Local()
t.LastUpdate = time.Now().Local()
categID = t.CategoryID
if t.CategoryID == 0 {
categID = nil
}
var e error
// 4 cases of executing the query
if t.ToAccount == 0 && t.FromAccount > 0 {
e = cr.QueryRow(query,
t.Name,
t.Active,
t.TransactionDate,
t.LastUpdate,
t.CreateDate,
t.Amount,
t.FromAccount,
nil,
t.TransactionType,
t.Description,
categID,
).Scan(&id)
} else if t.ToAccount > 0 && t.FromAccount > 0 {
e = cr.QueryRow(query,
t.Name,
t.Active,
t.TransactionDate,
t.LastUpdate,
t.CreateDate,
t.Amount,
t.FromAccount,
t.ToAccount,
t.TransactionType,
t.Description,
categID,
).Scan(&id)
// TODO: This should throw an error
} else if t.ToAccount == 0 && t.FromAccount == 0 {
e = cr.QueryRow(query,
t.Name,
t.Active,
t.TransactionDate,
t.LastUpdate,
t.CreateDate,
t.Amount,
nil,
nil,
t.TransactionType,
t.Description,
categID,
).Scan(&id)
} else if t.ToAccount > 0 && t.FromAccount == 0 {
e = cr.QueryRow(query,
t.Name,
t.Active,
t.TransactionDate,
t.LastUpdate,
t.CreateDate,
t.Amount,
nil,
t.ToAccount,
t.TransactionType,
t.Description,
categID,
).Scan(&id)
}
if e != nil {
var err err.Error
err.Init("Transaction.Create()", e.Error())
return err
}
// Writing id to object
t.ID = id
// Book transaction into FromAccount if it's given
if t.FromAccount > 0 {
err := bookIntoAccount(cr, t.FromAccount, t, true)
if !err.Empty() {
err.AddTraceback("Transaction.Create()", "Error while booking into FromAccount")
return err
}
}
// Book the transaction into ToAccount if it's given
if t.ToAccount > 0 {
err := bookIntoAccount(cr, t.ToAccount, t, false)
if !err.Empty() {
err.AddTraceback("Transaction.Create()", "Error while booking into ToAccount")
return err
}
}
return err.Error{}
}
// Save 's the current values of the object to the database
func (t *Transaction) Save(cr *sql.DB) err.Error {
if t.ID == 0 {
var err err.Error
err.Init("Transaction.Save()", "This transaction as no ID, maybe create it first?")
return err
} else if t.Amount == 0.0 {
var err err.Error
err.Init("Transaction.Save()", "The Amount of the transaction with the id "+fmt.Sprintf("%d", t.ID)+" is 0")
return err
}
// Get old data
var oldAmount float64
var accountID, toAccountID interface{}
query := "SELECT amount, account_id, to_account FROM transactions WHERE id=$1"
row := cr.QueryRow(query, t.ID)
e := row.Scan(&oldAmount, &accountID, &toAccountID)
if e != nil {
var err err.Error
err.Init("Transaction.Save()", e.Error())
return err
}
// Write values to database
query = "UPDATE transactions SET name=$2, active=$3, transaction_date=$4, last_update=$5, amount=$6, account_id=$7,"
query += "to_account=$8, transaction_type=$9, description=$10, category_id=$11 WHERE id=$1"
var categID interface{}
categID = t.CategoryID
if t.CategoryID == 0 {
categID = nil
}
// Write data to database
if t.ToAccount == 0 && t.FromAccount > 0 {
_, e = cr.Exec(query,
t.ID,
t.Name,
t.Active,
t.TransactionDate,
t.LastUpdate,
t.Amount,
t.FromAccount,
nil,
t.TransactionType,
t.Description,
categID,
)
} else if t.ToAccount > 0 && t.FromAccount > 0 {
_, e = cr.Exec(query,
t.ID,
t.Name,
t.Active,
t.TransactionDate,
t.LastUpdate,
t.Amount,
t.FromAccount,
t.ToAccount,
t.TransactionType,
t.Description,
categID,
)
} else if t.ToAccount == 0 && t.FromAccount == 0 {
// TODO: This case shouldn't be allowed
_, e = cr.Exec(query,
t.ID,
t.Name,
t.Active,
t.TransactionDate,
t.LastUpdate,
t.Amount,
nil,
nil,
t.TransactionType,
t.Description,
categID,
)
} else if t.ToAccount > 0 && t.FromAccount == 0 {
_, e = cr.Exec(query,
t.ID,
t.Name,
t.Active,
t.TransactionDate,
t.LastUpdate,
t.Amount,
nil,
t.ToAccount,
t.TransactionType,
t.Description,
categID,
)
}
if e != nil {
var err err.Error
err.Init("Transaction.Save()", e.Error())
return err
}
/*
Change balance on accounts
*/
temp := Transaction{
Name: t.Name,
Amount: oldAmount,
}
// The origin account changed
if accountID != t.FromAccount {
// Remove booking from old account
// Amount needs to be inverted
// Make sure the old amount gets removed from the old account
if accountID != nil {
err := bookIntoAccount(cr, accountID.(int64), &temp, false)
if !err.Empty() {
err.AddTraceback("Transaction.Save()", "Error while redoing booking from old origin account: "+accountID.(string))
return err
}
}
// Book the amount into the new account
if t.FromAccount > 0 {
err := bookIntoAccount(cr, t.FromAccount, t, true)
if !err.Empty() {
err.AddTraceback("Transaction.Save()", "Error while booking to new origin account: "+fmt.Sprintf("%d", t.FromAccount))
return err
}
}
}
// The destination account changed
if toAccountID != t.ToAccount {
// Remove booking from old destination account
// Make sure the old amount is used
if toAccountID != nil {
err := bookIntoAccount(cr, toAccountID.(int64), &temp, true)
if !err.Empty() {
err.AddTraceback("Transaction.Save()", "Error while removing transaction from the old receiving account: "+toAccountID.(string))
return err
}
}
// Book into new account
if t.ToAccount > 0 {
err := bookIntoAccount(cr, t.ToAccount, t, false)
if !err.Empty() {
err.AddTraceback("Transaction.Save()", "Error while booking transaction into new receiving account: "+fmt.Sprintf("%d", t.ToAccount))
return err
}
}
}
// The amount of the transaction changed
if t.Amount != oldAmount {
diff := t.Amount - oldAmount
temp.Amount = diff
// The origin did not change
// So book the difference into the origin account
// If the origin has changed we've already booked into the accounts
if accountID == t.FromAccount {
// Calculate difference and book the difference into the origin account
diff := t.Amount - oldAmount
temp.Amount = diff
err := bookIntoAccount(cr, t.FromAccount, &temp, true)
if !err.Empty() {
err.AddTraceback("Transaction.Save()", "Error while booking difference into origin account: "+fmt.Sprintf("%d", t.FromAccount))
return err
}
}
// The destination did not change
if toAccountID == t.ToAccount {
err := bookIntoAccount(cr, t.ToAccount, &temp, false)
if !err.Empty() {
err.AddTraceback("Transaction.Save()", "Error while booking difference into destination account: "+fmt.Sprintf("%d", t.ToAccount))
return err
}
}
}
return err.Error{}
}
// Delete 's the transtaction
func (t *Transaction) Delete(cr *sql.DB) err.Error {
if t.ID == 0 {
var err err.Error
err.Init("Transaction.Delete()", "The transaction you want to delete does not have an id")
return err
}
if t.FromAccount > 0 {
err := bookIntoAccount(cr, t.FromAccount, t, false)
if !err.Empty() {
err.AddTraceback("Transaction.Delete()", "Redo booking from origin account: "+fmt.Sprintf("%d", t.FromAccount))
return err
}
}
if t.ToAccount > 0 {
err := bookIntoAccount(cr, t.ToAccount, t, true)
if !err.Empty() {
// TODO: If FromAccount was bigger than 0, it's already booked at this time
// Make sure the booking is reverted again.
err.AddTraceback("Transaction.Delete()", "Redo booking from recipient account: "+fmt.Sprintf("%d", t.FromAccount))
return err
}
}
query := "DELETE FROM transactions WHERE id=$1"
_, e := cr.Exec(query, t.ID)
if e != nil {
var err err.Error
err.Init("Transaction.Delete()", e.Error())
return err
}
return err.Error{}
}
// ComputeFields computes the fields which are not directly received
// from the database
func (t *Transaction) computeFields(cr *sql.DB) {
// Compute: FromAccountName
if t.FromAccount != 0 {
fromAccount, err := FindAccountByID(cr, t.FromAccount)
if !err.Empty() {
err.AddTraceback("Transaction.computeFields()", "Error while finding origin account by ID.")
log.Println("[WARN]", err)
}
t.FromAccountName = fromAccount.Name
} else {
t.FromAccountName = "External Account"
}
// Compute: ToAccountName
if t.ToAccount != 0 {
toAccount, err := FindAccountByID(cr, t.ToAccount)
if !err.Empty() {
err.AddTraceback("Transaction.computeFields()", "Error while finding recipient account by ID:"+fmt.Sprintf("%d", t.CategoryID))
log.Println("[WARN]", err)
}
t.ToAccountName = toAccount.Name
} else {
t.ToAccountName = "External Account"
}
// Compute: TransactionDateStr
t.TransactionDateStr = t.TransactionDate.Format("02.01.2006 - 15:04")
// Compute: Category
if t.CategoryID > 0 {
var err err.Error
if t.Category, err = FindCategoryByID(cr, t.CategoryID); !err.Empty() {
err.AddTraceback("Transaction.computeFields()", "Error while finding category by ID: "+fmt.Sprintf("%d", t.CategoryID))
log.Println("[WARN]", err)
}
}
}
// FindByID finds a transaction with it's id
func (t *Transaction) FindByID(cr *sql.DB, transactionID int64) err.Error {
query := "SELECT id, name, active, transaction_date, last_update, create_date, "
query += "amount, account_id, to_account, transaction_type, description, category_id "
query += "FROM transactions WHERE id=$1 "
query += "ORDER BY transaction_date"
var fromAccountID, toAccountID, categID interface{}
e := cr.QueryRow(query, transactionID).Scan(
&t.ID,
&t.Name,
&t.Active,
&t.TransactionDate,
&t.LastUpdate,
&t.CreateDate,
&t.Amount,
&fromAccountID,
&toAccountID,
&t.TransactionType,
&t.Description,
&categID,
)
if e != nil {
var err err.Error
err.Init("Transaction.FindByID()", e.Error())
return err
}
if fromAccountID != nil {
t.FromAccount = fromAccountID.(int64)
}
if toAccountID != nil {
t.ToAccount = toAccountID.(int64)
}
if categID != nil {
t.CategoryID = categID.(int64)
}
t.computeFields(cr)
return err.Error{}
}
// FindTransactionByID is similar to FindByID but returns the transaction
func FindTransactionByID(cr *sql.DB, transactionID int64) (Transaction, err.Error) {
t := EmptyTransaction()
e := t.FindByID(cr, transactionID)
if !e.Empty() {
e.AddTraceback("FindTransactionByID()", "Error while finding transaction by ID: "+fmt.Sprintf("%d", t.ID))
return t, e
}
return t, err.Error{}
}
// GetAllTransactions does that what you expect
func GetAllTransactions(cr *sql.DB) ([]Transaction, err.Error) {
var transactions []Transaction
query := "SELECT id FROM transactions"
idRows, e := cr.Query(query)
if e != nil {
var err err.Error
err.Init("GetAllTransactions()", e.Error())
return transactions, err
}
for idRows.Next() {
var id int64
if e = idRows.Scan(&id); e != nil {
log.Println("[INFO] GetAllTransactions(): Skipping record")
log.Printf("[WARN] GetAllTransactions: %s\n", e)
} else {
t := EmptyTransaction()
if err := t.FindByID(cr, id); !err.Empty() {
log.Printf("[INFO] GetAllTransactions(): Skipping record with ID %d\n", t.ID)
log.Printf("[WARN] GetAllTransactions(): %s\n", err)
} else {
transactions = append(transactions, t)
}
}
}
return transactions, err.Error{}
}
// GetLatestTransactions returns a limited number of the latest transactions
// latest transactions are sorted by their transaction_date
func GetLatestTransactions(cr *sql.DB, amount int) ([]Transaction, err.Error) {
var transactions []Transaction
query := "SELECT id FROM transactions ORDER BY transaction_date DESC"
if amount > 0 {
query += " LIMIT $1"
}
var rows *sql.Rows
var e error
if amount > 0 {
rows, e = cr.Query(query, amount)
} else {
rows, e = cr.Query(query)
}
if e != nil {
var err err.Error
err.Init("GetLatestTransactions()", e.Error())
return transactions, err
}
for rows.Next() {
var id int64
if e = rows.Scan(&id); e != nil {
log.Println("[INFO] GetLatestTransactions(): Skipping record")
log.Printf("[WARN] GetLatestTransactions: %s\n", e)
} else {
t := EmptyTransaction()
if err := t.FindByID(cr, id); !err.Empty() {
log.Printf("[INFO] GetLatestTransactions(): Skipping record with ID: %d\n", t.ID)
log.Println("[WARN]", err)
} else {
transactions = append(transactions, t)
}
}
}
return transactions, err.Error{}
}
|
package aggregate
var v1Tov2MetricsConversion = map[string]string{
"replica*app.pegasus*get_qps": "get_qps",
"replica*app.pegasus*multi_get_qps": "multi_get_qps",
"replica*app.pegasus*put_qps": "put_qps",
"replica*app.pegasus*multi_put_qps": "multi_put_qps",
"replica*app.pegasus*remove_qps": "remove_qps",
"replica*app.pegasus*multi_remove_qps": "multi_remove_qps",
"replica*app.pegasus*incr_qps": "incr_qps",
"replica*app.pegasus*check_and_set_qps": "check_and_set_qps",
"replica*app.pegasus*check_and_mutate_qps": "check_and_mutate_qps",
"replica*app.pegasus*scan_qps": "scan_qps",
"replica*eon.replica*backup_request_qps": "backup_request_qps",
"replica*app.pegasus*duplicate_qps": "duplicate_qps",
"replica*app.pegasus*dup_shipped_ops": "dup_shipped_ops",
"replica*app.pegasus*dup_failed_shipping_ops": "dup_failed_shipping_ops",
"replica*app.pegasus*get_bytes": "get_bytes",
"replica*app.pegasus*multi_get_bytes": "multi_get_bytes",
"replica*app.pegasus*scan_bytes": "scan_bytes",
"replica*app.pegasus*put_bytes": "put_bytes",
"replica*app.pegasus*multi_put_bytes": "multi_put_bytes",
"replica*app.pegasus*check_and_set_bytes": "check_and_set_bytes",
"replica*app.pegasus*check_and_mutate_bytes": "check_and_mutate_bytes",
"replica*app.pegasus*recent.read.cu": "recent_read_cu",
"replica*app.pegasus*recent.write.cu": "recent_write_cu",
"replica*app.pegasus*recent.expire.count": "recent_expire_count",
"replica*app.pegasus*recent.filter.count": "recent_filter_count",
"replica*app.pegasus*recent.abnormal.count": "recent_abnormal_count",
"replica*eon.replica*recent.write.throttling.delay.count": "recent_write_throttling_delay_count",
"replica*eon.replica*recent.write.throttling.reject.count": "recent_write_throttling_reject_count",
"replica*app.pegasus*disk.storage.sst(MB)": "sst_storage_mb",
"replica*app.pegasus*disk.storage.sst.count": "sst_count",
"replica*app.pegasus*rdb.block_cache.hit_count": "rdb_block_cache_hit_count",
"replica*app.pegasus*rdb.block_cache.total_count": "rdb_block_cache_total_count",
"replica*app.pegasus*rdb.index_and_filter_blocks.memory_usage": "rdb_index_and_filter_blocks_mem_usage",
"replica*app.pegasus*rdb.memtable.memory_usage": "rdb_memtable_mem_usage",
"replica*app.pegasus*rdb.estimate_num_keys": "rdb_estimate_num_keys",
"replica*app.pegasus*rdb.bf_seek_negatives": "rdb_bf_seek_negatives",
"replica*app.pegasus*rdb.bf_seek_total": "rdb_bf_seek_total",
"replica*app.pegasus*rdb.bf_point_positive_true": "rdb_bf_point_positive_true",
"replica*app.pegasus*rdb.bf_point_positive_total": "rdb_bf_point_positive_total",
"replica*app.pegasus*rdb.bf_point_negatives": "rdb_bf_point_negatives",
}
var aggregatableSet = map[string]interface{}{
"read_qps": nil,
"write_qps": nil,
"read_bytes": nil,
"write_bytes": nil,
}
// aggregatable returns whether the counter is to be aggregated on collector,
// including v1Tov2MetricsConversion and aggregatableSet.
func aggregatable(pc *partitionPerfCounter) bool {
v2Name, found := v1Tov2MetricsConversion[pc.name]
if found { // ignored
pc.name = v2Name
return true // listed above are all aggregatable
}
_, found = aggregatableSet[pc.name]
return found
}
// AllMetrics returns metrics tracked within this collector.
// The sets of metrics from cluster level and table level are completely equal.
func AllMetrics() (res []string) {
for _, newName := range v1Tov2MetricsConversion {
res = append(res, newName)
}
for name := range aggregatableSet {
res = append(res, name)
}
return res
}
|
/*
This is a simple challenge.
The task is to write code that outputs a 448*448 square image with 100% transparency. The output should follow the standard image rules.
*/
package main
import (
"flag"
"image"
"image/png"
"os"
)
func main() {
var w, h int
flag.IntVar(&w, "width", 448, "window width")
flag.IntVar(&h, "height", 448, "window height")
flag.Parse()
m := image.NewRGBA(image.Rect(0, 0, w, h))
png.Encode(os.Stdout, m)
}
|
package main
import (
"fmt"
"house365.com/studyGo/lagagent/kafka"
"house365.com/studyGo/lagagent/taillog"
"math/rand"
"os"
"strings"
"time"
)
func run() {
// 1 读取日志
for {
select {
case line := <-taillog.ReadChan():
// 2 发送到kafka
kafka.SendToKafka("web_log", line.Text)
default:
time.Sleep(time.Second)
}
}
}
func GetRandomString(l int) string {
str := "0123456789abcdefghijklmnopqrstuvwxyz"
bytes := []byte(str)
result := []byte{}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for i := 0; i < l; i++ {
time.Sleep(20)
result = append(result, bytes[r.Intn(len(bytes))])
}
return string(result)
}
func writelog() {
for {
fd, _ := os.OpenFile("my.log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644)
fd_content := strings.Join([]string{"====== ", "=====", GetRandomString(6), "\n"}, "")
buf := []byte(fd_content)
fd.Write(buf)
fd.Close()
}
}
func main() {
//1 初始化kafak连接
err := kafka.Init([]string{"127.0.0.1:9092"})
if err != nil {
fmt.Printf("init kafka failed,err:%v\n", err)
return
}
fmt.Println("init kafka success.")
//2 打开日志文件准备收集日志
err = taillog.Init("./my.log")
if err != nil {
fmt.Printf("init taillog failed,err:%v\n", err)
return
}
fmt.Println("init taillog success.")
go writelog()
run()
}
|
package api
import (
"errors"
"fmt"
"net/http"
"regexp"
"strconv"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/golang/glog"
backend "github.com/zalando/chimp/backend"
"github.com/zalando/chimp/conf"
. "github.com/zalando/chimp/types"
"github.com/zalando/chimp/validators"
)
//Backend contains the current backend
type Backend struct {
BackendType string
Backend backend.Backend
}
// Bootstrap backend
var se = Backend{
BackendType: conf.New().BackendType,
}
//Start initializes the current backend
func Start() {
se.Backend = backend.New()
}
//BackendError is the erro representation that should be consumed by "frontend" serving layer
//resembles (but no need to be 1:1) what frontend needs to give to the user which is based on our RESTful API Guidelines doc.
type BackendError struct {
Status int
Title string //error message coming from THIS layer
Detail string //error message coming from backends
}
func rootHandler(ginCtx *gin.Context) {
config := conf.New()
ginCtx.JSON(http.StatusOK, gin.H{"chimp-server": fmt.Sprintf("Build Time: %s - Git Commit Hash: %s", config.VersionBuildStamp, config.VersionGitHash)})
}
func healthHandler(ginCtx *gin.Context) {
ginCtx.String(http.StatusOK, "OK")
}
//deployList is used to get a list of all the running application
func deployList(ginCtx *gin.Context) {
team, uid := buildTeamLabel(ginCtx)
all := ginCtx.Query("all")
var filter map[string]string
if all == "" {
filter = make(map[string]string, 2)
filter["uid"] = uid
filter["team"] = team
}
result, err := se.Backend.GetAppNames(filter)
if err != nil {
glog.Errorf("Could not get artifacts from backend for LIST request, caused by: %s", err.Error())
ginCtx.JSON(http.StatusNotFound, gin.H{"error": fmt.Sprintf("Could not get artifact from backend for INFO, caused by: %s", err)})
return
}
ginCtx.JSON(http.StatusOK, gin.H{"deployments": result})
}
func deployInfo(ginCtx *gin.Context) {
name := ginCtx.Params.ByName("name")
glog.Infof("retrieve info by name: %s", name)
var arReq = ArtifactRequest{Action: INFO, Name: name}
result, err := se.Backend.GetApp(&arReq)
if err != nil {
glog.Errorf("Could not get artifact from backend for INFO request with name %s, caused by: %s", name, err.Error())
ginCtx.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("Could not get artifact from backend for INFO request with name %s, caused by: %s", name, err)})
return
}
ginCtx.JSON(http.StatusOK, result)
}
func deployCreate(ginCtx *gin.Context) {
givenDeploy, err := commonDeploy(ginCtx)
validator := validators.New()
valid, err := validator.Validate(givenDeploy)
if !valid {
glog.Errorf("Invalid request, validation not passed.")
ginCtx.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request."})
ginCtx.Error(errors.New("Invalid request"))
return
}
team, uid := buildTeamLabel(ginCtx)
if givenDeploy.Labels == nil {
givenDeploy.Labels = make(map[string]string, 2)
}
givenDeploy.Labels["team"] = team
givenDeploy.Labels["user"] = uid
ginCtx.Set("data", givenDeploy)
if err != nil {
glog.Errorf("Could not update deploy, caused by: %s", err.Error())
ginCtx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
ginCtx.Error(err)
return
}
memoryLimit, e := mapMemory(givenDeploy.MemoryLimit)
if e != nil {
glog.Errorf("Could not create a deploy, caused by: %s", e.Error())
ginCtx.JSON(http.StatusBadRequest, gin.H{"error": e.Error()})
ginCtx.Error(err)
return
}
volumes := make([]*Volume, len(givenDeploy.Volumes))
for i, vol := range givenDeploy.Volumes {
volumes[i] = &Volume{HostPath: vol.HostPath, ContainerPath: vol.ContainerPath, Mode: vol.Mode}
}
var beReq = &CreateRequest{BaseRequest: BaseRequest{
Name: givenDeploy.Name, Ports: givenDeploy.Ports, Labels: givenDeploy.Labels, ImageURL: givenDeploy.ImageURL, Env: givenDeploy.Env, Replicas: givenDeploy.Replicas,
CPULimit: givenDeploy.CPULimit, MemoryLimit: memoryLimit, Force: givenDeploy.Force, Volumes: volumes}}
beRes, err := se.Backend.Deploy(beReq)
if err != nil {
glog.Errorf("Could not create a deploy, caused by: %s", err.Error())
ginCtx.JSON(http.StatusNotAcceptable, gin.H{"error": err.Error()})
ginCtx.Error(err)
return
}
glog.Infof("Deployed: %+v\n", beRes)
ginCtx.JSON(http.StatusOK, gin.H{"name": beRes})
}
func deployUpsert(ginCtx *gin.Context) {
deploy, err := commonDeploy(ginCtx)
validator := validators.New()
valid, err := validator.Validate(deploy)
if !valid {
glog.Errorf("Invalid request, validation not passed.")
ginCtx.JSON(http.StatusBadRequest, gin.H{"error": "Invalid request."})
ginCtx.Error(errors.New("Invalid request"))
return
}
ginCtx.Set("data", deploy)
if err != nil {
glog.Errorf("Could not update deploy, caused by: %s", err.Error())
ginCtx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
ginCtx.Error(err)
return
}
memoryLimit, err := mapMemory(deploy.MemoryLimit)
if err != nil {
glog.Errorf("Could not create a deploy, caused by: %s", err.Error())
ginCtx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
ginCtx.Error(err)
return
}
var beReq = UpdateRequest{BaseRequest: BaseRequest{Name: deploy.Name, Ports: deploy.Ports, Labels: deploy.Labels, ImageURL: deploy.ImageURL, Env: deploy.Env, Replicas: deploy.Replicas,
CPULimit: deploy.CPULimit, MemoryLimit: memoryLimit, Force: deploy.Force}}
_, err = se.Backend.UpdateDeployment(&beReq)
if err != nil {
glog.Errorf("Could not update deploy, caused by: %s", err.Error())
ginCtx.JSON(http.StatusNotAcceptable, gin.H{"error": err.Error()})
ginCtx.Error(err)
return
}
glog.Infof("Deployment updated")
ginCtx.JSON(http.StatusOK, gin.H{})
}
func deployDelete(ginCtx *gin.Context) {
name := ginCtx.Params.ByName("name")
glog.Info("delete by name: %s", name)
var ar = ArtifactRequest{Action: DELETE, Name: name}
_, err := se.Backend.Delete(&ar)
if err != nil {
glog.Errorf("Could not get artifact from backend for CANCEL request with name %s, caused by: %s", name, err.Error())
ginCtx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
ginCtx.Error(err)
return
}
ginCtx.JSON(http.StatusOK, gin.H{})
}
func deployReplicasModify(ginCtx *gin.Context) {
name := ginCtx.Params.ByName("name")
num := ginCtx.Params.ByName("num")
force := ginCtx.Query("force")
fs := false
if force == "true" {
fs = true
}
glog.Info("scaling %s to %d instances", name, num)
replicas, err := strconv.Atoi(num)
if err != nil {
glog.Errorf("Could not change instances for %s, caused by %s", name, err)
ginCtx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
ginCtx.Error(err)
return
}
var beReq = &ScaleRequest{Name: name, Replicas: replicas, Force: fs}
_, err = se.Backend.Scale(beReq)
if err != nil {
glog.Errorf("Could not change instances for %s, caused by: %s", name, err.Error())
ginCtx.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
ginCtx.Error(err)
return
}
ginCtx.JSON(http.StatusOK, gin.H{})
}
func commonDeploy(ginCtx *gin.Context) (DeployRequest, error) {
ginCtx.Request.ParseForm()
var givenDeploy DeployRequest
ginCtx.BindWith(&givenDeploy, binding.JSON)
glog.Infof("given %+v", givenDeploy)
return givenDeploy, nil
}
//TODO this a very hacky implementation of resource handling.
//A much better implementation is found in "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
//We do not support this, but we only express in MB because this is the only amount supported by
//marathon. I (rdifazio) really believe this a limitation in terms of expressibite, but we keep it
//simple for now.
func mapMemory(memory string) (int, error) {
re, err := regexp.Compile(`^([0-9]*)(MB|GB){0,1}$`)
if err != nil {
return 0, err
}
res := re.FindStringSubmatch(memory)
if len(res) == 0 {
return 0, errors.New("Memory formatting is wrong")
} else if res[2] == "" { //user didn't specify the size, assuming
return strconv.Atoi(res[1])
} else {
val, err := strconv.Atoi(res[1])
if res[2] == "GB" {
return val * 1000, err
} else if res[2] == "MB" {
return val, err
} else {
return 0, errors.New("Memory formatting is wrong")
}
}
}
func buildTeamLabel(ginCtx *gin.Context) (string, string) {
uid, uidSet := ginCtx.Get("uid")
team, teamSet := ginCtx.Get("team")
if uidSet && teamSet {
return team.(string), uid.(string)
}
return "", ""
}
|
package agents
import gophercloud "github.com/zhuqinghua/gophercloud"
func listURL(c *gophercloud.ServiceClient) string {
return c.ServiceURL("agents")
}
func listDHCPNetworksURL(client *gophercloud.ServiceClient, id string) string {
return client.ServiceURL("agents", id, "dhcp-networks")
}
|
package main
import (
"encoding/json"
"fmt"
"net/http"
)
func main() {
serverURL, serverPort, queryParamA, queryParamB := getInputValues()
numbersArr, err := sendReqToServer(serverURL, serverPort, queryParamA, queryParamB)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(addArray(numbersArr))
}
func getInputValues() (string, string, string, string) {
var serverURL, serverPort, queryParamA, queryParamB string
fmt.Scan(&serverURL, &serverPort, &queryParamA, &queryParamB)
return serverURL, serverPort, queryParamA, queryParamB
}
func sendReqToServer(serverURL, serverPort, queryParamA, queryParamB string) ([]int, error) {
resultArr := make([]int, 0)
req, err := http.NewRequest("GET", serverURL+":"+serverPort, nil)
if err != nil {
return nil, err
}
q := req.URL.Query()
q.Add("a", queryParamA)
q.Add("b", queryParamB)
req.URL.RawQuery = q.Encode()
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = json.NewDecoder(resp.Body).Decode(&resultArr)
if err != nil {
return nil, err
}
return resultArr, nil
}
func addArray(numbersArr []int) int {
resultAdd := 0
for _, value := range numbersArr {
resultAdd += value
}
return resultAdd
}
|
package dft
type DataFileType int
const (
CARCOLS DataFileType = iota + 1
CARVARIATIONS
CONTENTUNLOCKS
HANDLING
VEHICLELAYOUTS
VEHICLEMODELSETS
VEHICLES
WEAPONSFILE
INVALID
)
func (d DataFileType) String() string {
return [...]string{"CARCOLS", "CARVARIATIONS", "CONTENTUNLOCKS", "HANDLING", "VEHICLELAYOUTS", "VEHICLEMODELSETS", "VEHICLES", "WEAPONSFILE", "INVALID"}[d-1]
}
func (d DataFileType) EnumIndex() int {
return int(d)
}
type DataFile struct {
Path string
Name string
Type DataFileType
}
type StreamFile struct {
Path string
Name string
}
|
package compute
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"sync"
"github.com/BurntSushi/toml"
"github.com/Masterminds/semver/v3"
"github.com/fastly/cli/pkg/api"
"github.com/fastly/cli/pkg/common"
"github.com/fastly/cli/pkg/errors"
"github.com/fastly/cli/pkg/text"
)
const (
// RustToolchainVersion is the `rustup` toolchain string for the compiler
// that we support
RustToolchainVersion = "1.46.0"
// WasmWasiTarget is the Rust compilation target for Wasi capable Wasm.
WasmWasiTarget = "wasm32-wasi"
)
// CargoPackage models the package confuiguration properties of a Rust Cargo
// package which we are interested in and is embedded within CargoManifest and
// CargoLock.
type CargoPackage struct {
Name string `toml:"name" json:"name"`
Version string `toml:"version" json:"version"`
Dependencies []CargoPackage `toml:"-" json:"dependencies"`
}
// CargoManifest models the package confuiguration properties of a Rust Cargo
// manifest which we are interested in and are read from the Cargo.toml manifest
// file within the $PWD of the package.
type CargoManifest struct {
Package CargoPackage
}
// Read the contents of the Cargo.toml manifest from filename.
func (m *CargoManifest) Read(filename string) error {
_, err := toml.DecodeFile(filename, m)
return err
}
// CargoMetadata models information about the workspace members and resolved
// dependencies of the current package via `cargo metadata` command output.
type CargoMetadata struct {
Package []CargoPackage `json:"packages"`
}
// Read the contents of the Cargo.lock file from filename.
func (m *CargoMetadata) Read() error {
cmd := exec.Command("cargo", "metadata", "--format-version", "1")
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
if err := cmd.Start(); err != nil {
return err
}
if err := json.NewDecoder(stdout).Decode(&m); err != nil {
return err
}
if err := cmd.Wait(); err != nil {
return err
}
return nil
}
// Rust is an implments Toolchain for the Rust lanaguage.
type Rust struct {
client api.HTTPClient
}
// Verify implments the Toolchain interface and verifies whether the Rust
// language toolchain is correctly configured on the host.
func (r Rust) Verify(out io.Writer) error {
// 1) Check `rustup` is on $PATH
//
// Rustup is Rust's toolchain installer and manager, it is needed to assert
// that the correct WASI WASM compiler target is installed correctly. We
// only check whether the binary exists on the users $PATH and error with
// installation help text.
fmt.Fprintf(out, "Checking if rustup is installed...\n")
p, err := exec.LookPath("rustup")
if err != nil {
return errors.RemediationError{
Inner: fmt.Errorf("`rustup` not found in $PATH"),
Remediation: fmt.Sprintf("To fix this error, run the following command:\n\n\t$ %s", text.Bold("curl https://sh.rustup.rs -sSf | sh")),
}
}
fmt.Fprintf(out, "Found rustup at %s\n", p)
// 2) Check that the `1.43.0` toolchain is installed
//
// We use rustup to assert that the toolchain is installed by streaming the output of
// `rustup toolchain list` and looking for a toolchain whose prefix matches our desired
// version.
fmt.Fprintf(out, "Checking if Rust %s is installed...\n", RustToolchainVersion)
cmd := exec.Command("rustup", "toolchain", "list")
stdoutStderr, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("error executing rustup: %w", err)
}
scanner := bufio.NewScanner(strings.NewReader(string(stdoutStderr)))
scanner.Split(bufio.ScanLines)
var found bool
for scanner.Scan() {
if strings.HasPrefix(scanner.Text(), RustToolchainVersion) {
found = true
break
}
}
if !found {
return errors.RemediationError{
Inner: fmt.Errorf("rust toolchain %s not found", RustToolchainVersion),
Remediation: fmt.Sprintf("To fix this error, run the following command:\n\n\t$ %s\n", text.Bold("rustup toolchain install "+RustToolchainVersion)),
}
}
// 3) Check `wasm32-wasi` target exists
//
// We use rustup to assert that the target is installed for our toolchain by streaming the
// output of `rustup target list` and looking for the the `wasm32-wasi` value. If not found,
// we error with help text suggesting how to install.
fmt.Fprintf(out, "Checking if %s target is installed...\n", WasmWasiTarget)
cmd = exec.Command("rustup", "target", "list", "--installed", "--toolchain", RustToolchainVersion)
stdoutStderr, err = cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("error executing rustup: %w", err)
}
scanner = bufio.NewScanner(strings.NewReader(string(stdoutStderr)))
scanner.Split(bufio.ScanWords)
found = false
for scanner.Scan() {
if scanner.Text() == WasmWasiTarget {
found = true
break
}
}
if !found {
return errors.RemediationError{
Inner: fmt.Errorf("rust target %s not found", WasmWasiTarget),
Remediation: fmt.Sprintf("To fix this error, run the following command:\n\n\t$ %s\n", text.Bold(fmt.Sprintf("rustup target add %s --toolchain %s", WasmWasiTarget, RustToolchainVersion))),
}
}
fmt.Fprintf(out, "Found wasm32-wasi target\n")
// 4) Check Cargo.toml file exists in $PWD
//
// A valid Cargo.toml file is needed for the `cargo build` compilation
// process. Therefore, we assert whether one exists in the current $PWD.
fpath, err := filepath.Abs("Cargo.toml")
if err != nil {
return fmt.Errorf("error getting Cargo.toml path: %w", err)
}
if !common.FileExists(fpath) {
return fmt.Errorf("%s not found", fpath)
}
fmt.Fprintf(out, "Found Cargo.toml at %s\n", fpath)
// 5) Verify `fastly` and `fastly-sys` crate version
//
// A valid and up-to-date version of the fastly-sys crate is required.
if !common.FileExists(fpath) {
return fmt.Errorf("%s not found", fpath)
}
var metadata CargoMetadata
if err := metadata.Read(); err != nil {
return fmt.Errorf("error reading cargo metadata: %w", err)
}
// Fetch the latest crate versions from cargo.io API.
latestFastly, err := getLatestCrateVersion(r.client, "fastly")
if err != nil {
return fmt.Errorf("error fetching latest crate version: %w", err)
}
latestFastlySys, err := getLatestCrateVersion(r.client, "fastly-sys")
if err != nil {
return fmt.Errorf("error fetching latest crate version: %w", err)
}
// Create a semver contraint to be within the latest minor range or above.
// TODO(phamann): Update this to major when fastly-sys hits 1.x.x.
fastlySysConstraint, err := semver.NewConstraint(fmt.Sprintf("~%d.%d.0", latestFastlySys.Major(), latestFastlySys.Minor()))
if err != nil {
return fmt.Errorf("error parsing latest crate version: %w", err)
}
fastlySysVersion, err := getCrateVersionFromMetadata(metadata, "fastly-sys")
// If fastly-sys crate not found, error with dual remediation steps.
if err != nil {
return newCargoUpdateRemediationErr(err, latestFastly.String())
}
// If fastly-sys version doesn't meet our constraint of being within the
// minor range, error with dual remediation steps.
if ok := fastlySysConstraint.Check(fastlySysVersion); !ok {
return newCargoUpdateRemediationErr(fmt.Errorf("fastly crate not up-to-date"), latestFastly.String())
}
fastlyVersion, err := getCrateVersionFromMetadata(metadata, "fastly")
// If fastly crate not found, error with dual remediation steps.
if err != nil {
return newCargoUpdateRemediationErr(err, latestFastly.String())
}
// If fastly crate version is lower than the latest, suggest user should
// update, but don't error.
if fastlyVersion.LessThan(latestFastly) {
text.Break(out)
text.Info(out, fmt.Sprintf(
"an optional upgrade for the fastly crate is available, edit %s with:\n\n\t %s\n\nAnd then run the following command:\n\n\t$ %s\n",
text.Bold("Cargo.toml"),
text.Bold(fmt.Sprintf(`fastly = "^%s"`, latestFastly)),
text.Bold("cargo update -p fastly"),
))
text.Break(out)
}
return nil
}
// Build implments the Toolchain interface and attempts to compile the package
// Rust source to a Wasm binary.
func (r Rust) Build(out io.Writer, verbose bool) error {
// Get binary name from Cargo.toml.
var m CargoManifest
if err := m.Read("Cargo.toml"); err != nil {
return fmt.Errorf("error reading Cargo.toml manifest: %w", err)
}
binName := m.Package.Name
// Specify the toolchain using the `cargo +<version>` syntax.
toolchain := fmt.Sprintf("+%s", RustToolchainVersion)
args := []string{
toolchain,
"build",
"--bin",
binName,
"--release",
"--target",
WasmWasiTarget,
"--color",
"always",
}
if verbose {
args = append(args, "--verbose")
}
// Call cargo build with Wasm Wasi target and release flags.
// gosec flagged this:
// G204 (CWE-78): Subprocess launched with variable
// Disabling as the variables come from trusted sources.
/* #nosec */
cmd := exec.Command("cargo", args...)
// Add debuginfo RUSTFLAGS to command environment to ensure DWARF debug
// infomation (such as, source mappings) are compiled into the binary.
cmd.Env = append(os.Environ(),
`RUSTFLAGS=-C debuginfo=2`,
)
// Pipe the child process stdout and stderr to our own writer.
var stdoutBuf, stderrBuf bytes.Buffer
stdoutIn, _ := cmd.StdoutPipe()
stderrIn, _ := cmd.StderrPipe()
stdout := io.MultiWriter(out, &stdoutBuf)
stderr := io.MultiWriter(out, &stderrBuf)
// Start the command.
if err := cmd.Start(); err != nil {
return fmt.Errorf("failed to start compilation process: %w", err)
}
var errStdout, errStderr error
var wg sync.WaitGroup
wg.Add(1)
go func() {
_, errStdout = io.Copy(stdout, stdoutIn)
wg.Done()
}()
_, errStderr = io.Copy(stderr, stderrIn)
wg.Wait()
if errStdout != nil {
return fmt.Errorf("error streaming stdout output from child process: %w", errStdout)
}
if errStderr != nil {
return fmt.Errorf("error streaming stderr output from child process: %w", errStderr)
}
// Wait for the command to exit.
if err := cmd.Wait(); err != nil {
// If we're not in verbose mode return the bufferred stderr output
// from cargo as the error.
if !verbose && stderrBuf.Len() > 0 {
return fmt.Errorf("error during compilation process:\n%s", strings.TrimSpace(stderrBuf.String()))
}
return fmt.Errorf("error during compilation process")
}
// Get working directory.
dir, err := os.Getwd()
if err != nil {
return fmt.Errorf("error getting current working directory: %w", err)
}
src := filepath.Join(dir, "target", WasmWasiTarget, "release", fmt.Sprintf("%s.wasm", binName))
dst := filepath.Join(dir, "bin", "main.wasm")
// Check if bin directory exists and create if not.
binDir := filepath.Join(dir, "bin")
fi, err := os.Stat(binDir)
switch {
case err == nil && fi.IsDir():
// no problem
case err == nil && !fi.IsDir():
return fmt.Errorf("error creating bin directory: target already exists as a regular file")
case os.IsNotExist(err):
if err := os.MkdirAll(binDir, 0750); err != nil {
return err
}
case err != nil:
return err
}
err = common.CopyFile(src, dst)
if err != nil {
return fmt.Errorf("error copying wasm binary: %w", err)
}
return nil
}
// CargoCrateVersion models a Cargo crate version returned by the crates.io API.
type CargoCrateVersion struct {
Version string `json:"num"`
}
// CargoCrateVersions models a Cargo crate version returned by the crates.io API.
type CargoCrateVersions struct {
Versions []CargoCrateVersion `json:"versions"`
}
// getLatestCrateVersion fetches all versions of a given Rust crate from the
// crates.io HTTP API and returns the latest valid semver version.
func getLatestCrateVersion(client api.HTTPClient, name string) (*semver.Version, error) {
url := fmt.Sprintf("https://crates.io/api/v1/crates/%s/versions", name)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("error fetching latest crate version: %s", resp.Status)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
crate := CargoCrateVersions{}
err = json.Unmarshal(body, &crate)
if err != nil {
return nil, err
}
var versions []*semver.Version
for _, v := range crate.Versions {
if version, err := semver.NewVersion(v.Version); err == nil {
versions = append(versions, version)
}
}
if len(versions) < 1 {
return nil, fmt.Errorf("no valid crate versions found")
}
sort.Sort(semver.Collection(versions))
latest := versions[len(versions)-1]
return latest, nil
}
// getCrateVersionFromLockfile searches for a crate inside a CargoMetadata tree
// and returns the crates version as a semver.Version.
func getCrateVersionFromMetadata(metadata CargoMetadata, crate string) (*semver.Version, error) {
// Search for crate in metadata tree.
var c CargoPackage
for _, p := range metadata.Package {
if p.Name == crate {
c = p
break
}
for _, pp := range p.Dependencies {
if pp.Name == crate {
c = pp
break
}
}
}
if c.Name == "" {
return nil, fmt.Errorf("%s crate not found", crate)
}
// Parse lockfile version to semver.Version.
version, err := semver.NewVersion(c.Version)
if err != nil {
return nil, fmt.Errorf("error parsing cargo metadata: %w", err)
}
return version, nil
}
// newCargoUpdateRemediationErr constructs a new a new RemediationError which
// wraps a cargo error and suggests to update the fastly crate to a specified
// version as its remediation message.
func newCargoUpdateRemediationErr(err error, version string) errors.RemediationError {
return errors.RemediationError{
Inner: err,
Remediation: fmt.Sprintf(
"To fix this error, edit %s with:\n\n\t %s\n\nAnd then run the following command:\n\n\t$ %s\n",
text.Bold("Cargo.toml"),
text.Bold(fmt.Sprintf(`fastly = "^%s"`, version)),
text.Bold("cargo update -p fastly"),
),
}
}
|
package main
import (
"github.com/g-xianhui/op/server/pb"
"github.com/golang/protobuf/proto"
)
func toRoleBasic(r *RoleBasic) *pb.RoleBasic {
b := &pb.RoleBasic{}
b.Id = proto.Uint32(r.id)
b.Occupation = proto.Uint32(r.occupation)
b.Level = proto.Uint32(r.level)
b.Name = proto.String(r.name)
return b
}
func replyRolelist(agent *Agent) {
rep := &pb.MRRolelist{}
for _, r := range agent.rolelist {
rep.Rolelist = append(rep.Rolelist, toRoleBasic(r))
}
replyMsg(agent, pb.MROLELIST, rep)
}
func findRole(agent *Agent, id uint32) int {
for i := range agent.rolelist {
if agent.rolelist[i].id == id {
return i
}
}
return -1
}
func setRole(agent *Agent, i int) {
if agent.Role != nil && agent.Role.index == i {
return
}
agent.Role = &Role{id: agent.rolelist[i].id, index: i}
agent.Role.load()
}
func roleLogin(agent *Agent, id uint32) uint32 {
if agent.getStatus() != CONNECTED {
return ErrLoginAtWrongStage
}
index := findRole(agent, id)
if index == -1 {
return ErrRoleNotFound
}
setRole(agent, index)
agent.login(id)
agentcenter.add(id, agent)
return 0
}
func createRole(agent *Agent, occ uint32, name string) (*RoleBasic, uint32) {
if len(agent.rolelist) >= 3 {
return nil, ErrRolelistFull
}
if !agentcenter.bookName(name) {
return nil, ErrNameAlreadyUsed
}
roleid, errno := dbCreateRole(occ, name)
if errno != 0 {
agentcenter.unbookName(name)
return nil, errno
} else {
agentcenter.confirmName(name, roleid)
}
newrole := &RoleBasic{id: roleid, occupation: occ, name: name}
agent.rolelist = append(agent.rolelist, newrole)
// if crash before save rolelist, this roleid will be waste, not a big deal though
idlist := make([]uint32, 3)
for i, r := range agent.rolelist {
idlist[i] = r.id
}
saveRolelist(agent.getAccountId(), idlist)
return newrole, 0
}
|
package main
import (
"encoding/xml"
"fmt"
)
func main() {
type Email struct {
Where string `xml:"where,attr"`
Addr string
}
type Address struct {
City, State string
}
type Result struct {
XMLName xml.Name `xml:"Person"`
Name string `xml:"FullName"`
T []string `xml:"Thing>one"`
T2 []string `xml:"Thing>more"`
T3 []string `xml:"Thing>special>little"`
Company string
Phone string
Email []Email
Groups []string `xml:"Group>Value"`
Address
}
v := Result{Name: "none", Phone: "none"}
data := `
<Person>
<FullName>Grace R. Emlin</FullName>
<Company>Example Inc.</Company>
<Thing>
<one>one</one>
<one>two</one>
<more>more</more>
<more>here</more>
<special> <little>hidden</little></special>
</Thing>
<Email where="home">
<Addr>gre@example.com</Addr>
</Email>
<Email where='work'>
<Addr>gre@work.com</Addr>
</Email>
<Group>
<Value>Friends</Value>
<Value>Squash</Value>
</Group>
<City>Hanga Roa</City>
<State>Easter Island</State>
</Person>
`
err := xml.Unmarshal([]byte(data), &v)
if err != nil {
fmt.Printf("error: %v", err)
return
}
fmt.Printf("XMLName: %#v\n", v.XMLName)
fmt.Printf("Name: %q\n", v.Name)
fmt.Printf("Company: %q\n", v.Company)
fmt.Printf("Phone: %q\n", v.Phone)
fmt.Printf("T: %q\n", v.T)
fmt.Printf("T2: %q\n", v.T2)
fmt.Printf("T3: %q\n", v.T3)
fmt.Printf("Email: %v\n", v.Email)
fmt.Printf("Groups: %v\n", v.Groups)
fmt.Printf("Address: %v\n", v.Address)
fmt.Printf("v=, %v\n", v)
}
|
package main
import (
"encoding/json"
"fmt"
gen "github.com/youknowbopu/attack_map_server/generator"
"log"
"math/rand"
"net/http"
"path/filepath"
"time"
"golang.org/x/net/websocket"
)
const bind = "127.0.0.1:9999"
const workerNumber = 3
var wsConns []*websocket.Conn
func dataHandler(ws *websocket.Conn) {
wsConns = append(wsConns, ws)
// 从订阅列表中删除
defer func() {
for k, v := range wsConns {
if v == ws {
wsConns = append(wsConns[:k], wsConns[k+1:]...)
}
}
}()
var b []byte
for {
_, err := ws.Read(b)
if err != nil {
break
}
}
}
func generateData(cities []gen.City) {
generator := new(gen.Generator)
for {
event := generator.New(cities)
if b, err := json.Marshal(event); err == nil {
for _, ws := range wsConns {
_, err := ws.Write(b)
if err != nil {
log.Println(err)
}
}
}
d := time.Duration(rand.Float32()*2000) * time.Millisecond
time.Sleep(d)
}
}
func main() {
http.Handle("/", websocket.Handler(dataHandler))
dir, err := filepath.Abs("cities.txt")
if err != nil {
log.Fatal(err)
}
cities, err := gen.ReadCitiesFromFile(dir)
if err != nil {
log.Fatal(fmt.Errorf("error occurred when reading city informations: %v", err))
}
for i := 0; i < workerNumber; i++ {
go generateData(cities)
}
log.Printf("Server is listening on %s...", bind)
http.ListenAndServe(bind, nil)
}
|
package mysqldb
import (
"path/filepath"
"strconv"
"testing"
"time"
"math/rand"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
context "golang.org/x/net/context"
)
const UserRegisterTypeLegacy = "LEGACY"
// UserTestSuite 是 User 的 testSuite
type UserTestSuite struct {
suite.Suite
db *DbClient
}
// SetupSuite 准备设置 Test Suite 执行
func (suite *UserTestSuite) SetupSuite() {
envFilepath := filepath.Join("testdata", "local.svc-biz-core.env")
db, _ := newTestingDbClientFromEnvFile(envFilepath)
suite.db = db
}
// TestCreateUserSuccess 测试 token 成功创建
func (suite *UserTestSuite) TestCreateUserSuccess() {
t := suite.T()
ctx := context.Background()
now := time.Now()
u := &User{
Username: "abcabc",
Nickname: "abcabc",
Gender: "M",
Birthday: now.AddDate(-20, 0, 0).UTC(),
CreatedAt: now.UTC(),
RegisterType: UserRegisterTypeLegacy,
RegisterTime: now.UTC(),
UpdatedAt: now.UTC(),
}
_, err := suite.db.GetDB(ctx).CreateUser(ctx, u)
assert.NoError(t, err)
}
// TestFindUserByUserIDSuccess 测试查找用户成功
func (suite *UserTestSuite) TestFindUserByUserIDSuccess() {
t := suite.T()
ctx := context.Background()
u, err := suite.db.GetDB(ctx).FindUserByUserID(ctx, 1)
assert.NotNil(t, u)
assert.NoError(t, err)
}
// TestUpdateUserProfile 测试修改用户个人信息成功
func (suite *UserTestSuite) TestUpdateUserProfile() {
const userID = 1
t := suite.T()
ctx := context.Background()
randName := strconv.Itoa(rand.Int())
var p ProtoUserProfile
err := suite.db.GetDB(ctx).UpdateUserProfile(ctx, p, userID)
assert.NoError(t, err)
u, _ := suite.db.GetDB(ctx).FindUserByUserID(ctx, userID)
assert.Equal(t, randName, u.Nickname)
}
// TestTokenTestSuite 启动测试
func TestUserTestSuite(t *testing.T) {
suite.Run(t, new(UserTestSuite))
}
|
/*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"fmt"
"http"
"image"
"image/jpeg"
"image/png"
"log"
"io"
"os"
"camli/blobref"
"camli/blobserver"
"camli/misc/resize"
"camli/schema"
)
type ImageHandler struct {
Fetcher blobref.StreamingFetcher
Cache blobserver.Storage // optional
MaxWidth, MaxHeight int
Square bool
}
func (ih *ImageHandler) storageSeekFetcher() (blobref.SeekFetcher, os.Error) {
return blobref.SeekerFromStreamingFetcher(ih.Fetcher) // TODO: pass ih.Cache?
}
type subImager interface {
SubImage(image.Rectangle) image.Image
}
func squareImage(i image.Image) image.Image {
si, ok := i.(subImager)
if !ok {
log.Fatalf("image %T isn't a subImager", i)
}
b := i.Bounds()
if b.Dx() > b.Dy() {
thin := (b.Dx() - b.Dy()) / 2
newB := b
newB.Min.X += thin
newB.Max.X -= thin
return si.SubImage(newB)
}
thin := (b.Dy() - b.Dx()) / 2
newB := b
newB.Min.Y += thin
newB.Max.Y -= thin
return si.SubImage(newB)
}
func (ih *ImageHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, file *blobref.BlobRef) {
if req.Method != "GET" && req.Method != "HEAD" {
http.Error(rw, "Invalid method", 400)
return
}
mw, mh := ih.MaxWidth, ih.MaxHeight
if mw == 0 || mh == 0 || mw > 2000 || mh > 2000 {
http.Error(rw, "bogus dimensions", 400)
return
}
fetchSeeker, err := ih.storageSeekFetcher()
if err != nil {
http.Error(rw, err.String(), 500)
return
}
fr, err := schema.NewFileReader(fetchSeeker, file)
if err != nil {
http.Error(rw, "Can't serve file: "+err.String(), 500)
return
}
var buf bytes.Buffer
n, err := io.Copy(&buf, fr)
if err != nil {
log.Printf("image resize: error reading image %s: %v", file, err)
return
}
i, format, err := image.Decode(bytes.NewBuffer(buf.Bytes()))
if err != nil {
http.Error(rw, "Can't serve file: "+err.String(), 500)
return
}
b := i.Bounds()
useBytesUnchanged := true
isSquare := b.Dx() == b.Dy()
if ih.Square && !isSquare {
useBytesUnchanged = false
i = squareImage(i)
b = i.Bounds()
}
// only do downscaling, otherwise just serve the original image
if mw < b.Dx() || mh < b.Dy() {
useBytesUnchanged = false
const huge = 2400
// If it's gigantic, it's more efficient to downsample first
// and then resize; resizing will smooth out the roughness.
// (trusting the moustachio guys on that one).
if b.Dx() > huge || b.Dy() > huge {
w, h := mw*2, mh*2
if b.Dx() > b.Dy() {
w = b.Dx() * h / b.Dy()
} else {
h = b.Dy() * w / b.Dx()
}
i = resize.Resample(i, i.Bounds(), w, h)
b = i.Bounds()
}
// conserve proportions. use the smallest of the two as the decisive one.
if mw > mh {
mw = b.Dx() * mh / b.Dy()
} else {
mh = b.Dy() * mw / b.Dx()
}
}
if !useBytesUnchanged {
i = resize.Resize(i, b, mw, mh)
// Encode as a new image
buf.Reset()
switch format {
case "jpeg":
err = jpeg.Encode(&buf, i, nil)
default:
err = png.Encode(&buf, i)
}
if err != nil {
http.Error(rw, "Can't serve file: "+err.String(), 500)
return
}
}
rw.Header().Set("Content-Type", imageContentTypeOfFormat(format))
size := buf.Len()
rw.Header().Set("Content-Length", fmt.Sprintf("%d", size))
n, err = io.Copy(rw, &buf)
if err != nil {
log.Printf("error serving thumbnail of file schema %s: %v", file, err)
return
}
if n != int64(size) {
log.Printf("error serving thumbnail of file schema %s: sent %d, expected size of %d",
file, n, size)
return
}
}
func imageContentTypeOfFormat(format string) string {
if format == "jpeg" {
return "image/jpeg"
}
return "image/png"
}
|
package main
import (
"fmt"
"unicode/utf8"
)
func main() {
str := "abc一二三🌚"
strByte := []byte(str)
for i := 0; i < len(str); {
c, size := utf8.DecodeRune(strByte[i:])
fmt.Printf("---%d--%c---\n", i, c)
i += size
}
}
|
// Copyright (C) 2020 Cisco Systems Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"bytes"
"fmt"
"net"
)
type WireguardTunnel struct {
Addr net.IP
Port uint16
SwIfIndex uint32
PublicKey []byte
PrivateKey []byte
}
func (t *WireguardTunnel) String() string {
return fmt.Sprintf("[%d] %s:%d", t.SwIfIndex, t.Addr, t.Port)
}
type WireguardPeer struct {
PublicKey []byte
Port uint16
PersistentKeepalive int
TableID uint32
Addr net.IP
SwIfIndex uint32
Index uint32
AllowedIps []net.IPNet
}
func (t *WireguardPeer) allowedIpsMap() map[string]bool {
m := make(map[string]bool)
for _, aip := range t.AllowedIps {
m[aip.String()] = true
}
return m
}
func (t *WireguardPeer) Equal(o *WireguardPeer) bool {
if o == nil {
return false
}
if o.Index != t.Index {
return false
}
if !bytes.Equal(o.PublicKey, t.PublicKey) {
return false
}
if o.Port != t.Port {
return false
}
if o.TableID != t.TableID {
return false
}
if o.SwIfIndex != t.SwIfIndex {
return false
}
if !o.Addr.Equal(t.Addr) {
return false
}
if o.PersistentKeepalive != t.PersistentKeepalive {
return false
}
if len(t.AllowedIps) != len(o.AllowedIps) {
return false
}
/* AllowedIps should be unique */
m := t.allowedIpsMap()
for _, aip := range o.AllowedIps {
if _, found := m[aip.String()]; !found {
return false
}
}
return true
}
func (t *WireguardPeer) AddAllowedIp(addr net.IPNet) {
m := t.allowedIpsMap()
if _, found := m[addr.String()]; !found {
t.AllowedIps = append(t.AllowedIps, addr)
}
}
func (t *WireguardPeer) DelAllowedIp(addr net.IPNet) {
allowedIps := make([]net.IPNet, 0)
for _, aip := range t.AllowedIps {
if aip.String() != addr.String() {
allowedIps = append(allowedIps, aip)
}
}
t.AllowedIps = allowedIps
}
func (t *WireguardPeer) String() string {
s := fmt.Sprintf("[id=%d swif=%d", t.Index, t.SwIfIndex)
s += fmt.Sprintf(" addr=%s port=%d", t.Addr, t.Port)
s += fmt.Sprintf(" pubKey=%s", string(t.PublicKey[:]))
s += StrableListToString(" allowedIps=", t.AllowedIps)
if t.TableID != 0 {
s += fmt.Sprintf(" tbl=%d", t.TableID)
}
if t.PersistentKeepalive != 1 {
s += fmt.Sprintf(" ka=%d", t.PersistentKeepalive)
}
s += "]"
return s
}
|
/*****************************************************************
* Copyright©,2020-2022, email: 279197148@qq.com
* Version: 1.0.0
* @Author: yangtxiang
* @Date: 2020-08-25 18:35
* Description:
*****************************************************************/
package xhttpServer
import (
"github.com/gorilla/mux"
"strings"
)
type TSubRouter struct {
r *mux.Router
baseRouter string
}
func newSubRouter(r *mux.Router) *TSubRouter {
return &TSubRouter{
r: r,
}
}
func (p *TSubRouter) Handle(method string, pattern string, handler HttpServerHandler) {
h := newServerHandler(p.baseRouter, handler)
r := p.r.Handle(pattern, h)
if method != "" {
items := strings.Split(method, ",")
r.Methods(items...)
}
}
func (p *TSubRouter) HandleFun(method string, pattern string, fun HttpServerHandleFun) {
h := newServerHandleByFun(p.baseRouter, fun)
r := p.r.Handle(pattern, h)
if method != "" {
items := strings.Split(method, ",")
r.Methods(items...)
}
}
|
// +build ignore
package frclient
import (
"fmt"
"time"
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/balance"
"github.com/iotaledger/wasp/client/chainclient"
"github.com/iotaledger/wasp/packages/coretypes"
"github.com/iotaledger/wasp/packages/kv/codec"
"github.com/iotaledger/wasp/packages/sctransaction"
"github.com/iotaledger/wasp/packages/util"
"github.com/iotaledger/wasp/packages/vm/examples/fairroulette"
"github.com/iotaledger/wasp/packages/webapi/model/statequery"
)
type FairRouletteClient struct {
*chainclient.Client
contractHname coretypes.Hname
}
func NewClient(scClient *chainclient.Client, contractHname coretypes.Hname) *FairRouletteClient {
return &FairRouletteClient{
Client: scClient,
contractHname: contractHname,
}
}
type Status struct {
*chainclient.SCStatus
CurrentBetsAmount uint16
CurrentBets []*fairroulette.BetInfo
LockedBetsAmount uint16
LockedBets []*fairroulette.BetInfo
LastWinningColor int64
PlayPeriodSeconds int64
NextPlayTimestamp time.Time
PlayerStats map[address.Address]*fairroulette.PlayerStats
WinsPerColor []uint32
}
func (s *Status) NextPlayIn() string {
diff := s.NextPlayTimestamp.Sub(s.FetchedAt)
// round to the second
diff -= diff % time.Second
if diff < 0 {
return "unknown"
}
return diff.String()
}
func (frc *FairRouletteClient) FetchStatus() (*Status, error) {
scStatus, results, err := frc.FetchSCStatus(func(query *statequery.Request) {
query.AddArray(fairroulette.StateVarBets, 0, 100)
query.AddArray(fairroulette.StateVarLockedBets, 0, 100)
query.AddScalar(fairroulette.StateVarLastWinningColor)
query.AddScalar(fairroulette.ReqVarPlayPeriodSec)
query.AddScalar(fairroulette.StateVarNextPlayTimestamp)
query.AddMap(fairroulette.StateVarPlayerStats, 100)
query.AddArray(fairroulette.StateArrayWinsPerColor, 0, fairroulette.NumColors)
})
if err != nil {
return nil, err
}
status := &Status{SCStatus: scStatus}
lastWinningColor, _ := results.Get(fairroulette.StateVarLastWinningColor).MustInt64()
status.LastWinningColor = lastWinningColor
playPeriodSeconds, _ := results.Get(fairroulette.ReqVarPlayPeriodSec).MustInt64()
status.PlayPeriodSeconds = playPeriodSeconds
if status.PlayPeriodSeconds == 0 {
status.PlayPeriodSeconds = fairroulette.DefaultPlaySecondsAfterFirstBet
}
nextPlayTimestamp, _ := results.Get(fairroulette.StateVarNextPlayTimestamp).MustInt64()
status.NextPlayTimestamp = time.Unix(0, nextPlayTimestamp).UTC()
status.PlayerStats, err = decodePlayerStats(results.Get(fairroulette.StateVarPlayerStats).MustMapResult())
if err != nil {
return nil, err
}
status.WinsPerColor, err = decodeWinsPerColor(results.Get(fairroulette.StateArrayWinsPerColor).MustArrayResult())
if err != nil {
return nil, err
}
status.CurrentBetsAmount, status.CurrentBets, err = decodeBets(results.Get(fairroulette.StateVarBets).MustArrayResult())
if err != nil {
return nil, err
}
status.LockedBetsAmount, status.LockedBets, err = decodeBets(results.Get(fairroulette.StateVarLockedBets).MustArrayResult())
if err != nil {
return nil, err
}
return status, nil
}
func decodeBets(result *statequery.ArrayResult) (uint16, []*fairroulette.BetInfo, error) {
size := result.Len
bets := make([]*fairroulette.BetInfo, 0)
for _, b := range result.Values {
bet, err := fairroulette.DecodeBetInfo(b)
if err != nil {
return 0, nil, err
}
bets = append(bets, bet)
}
return size, bets, nil
}
func decodeWinsPerColor(result *statequery.ArrayResult) ([]uint32, error) {
ret := make([]uint32, 0)
for _, b := range result.Values {
var n uint32
if b != nil {
n = util.MustUint32From4Bytes(b)
}
ret = append(ret, n)
}
return ret, nil
}
func decodePlayerStats(result *statequery.MapResult) (map[address.Address]*fairroulette.PlayerStats, error) {
playerStats := make(map[address.Address]*fairroulette.PlayerStats)
for _, e := range result.Entries {
if len(e.Key) != address.Length {
return nil, fmt.Errorf("not an address: %v", e.Key)
}
addr, _, err := address.FromBytes(e.Key)
if err != nil {
return nil, err
}
ps, err := fairroulette.DecodePlayerStats(e.Value)
if err != nil {
return nil, err
}
playerStats[addr] = ps
}
return playerStats, nil
}
func (frc *FairRouletteClient) Bet(color int, amount int) (*sctransaction.Transaction, error) {
return frc.PostRequest(
frc.contractHname,
fairroulette.RequestPlaceBet,
chainclient.PostRequestParams{
Transfer: map[balance.Color]int64{balance.ColorIOTA: int64(amount)},
ArgsRaw: codec.MakeDict(map[string]interface{}{fairroulette.ReqVarColor: int64(color)}),
},
)
}
func (frc *FairRouletteClient) SetPeriod(seconds int) (*sctransaction.Transaction, error) {
return frc.PostRequest(
frc.contractHname,
fairroulette.RequestSetPlayPeriod,
chainclient.PostRequestParams{
ArgsRaw: codec.MakeDict(map[string]interface{}{fairroulette.ReqVarPlayPeriodSec: int64(seconds)}),
},
)
}
|
package project
import (
"fmt"
sdk "github.com/cosmos/cosmos-sdk/types"
)
//NewHandler handle project requests
func NewHandler() sdk.Handler {
return func(ctx sdk.Context, msg sdk.Msg) sdk.Result {
fmt.Println("In Project Handler: *****************************************************")
return sdk.Result{}
}
}
|
// Copyright 2018 Diego Bernardes. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package test
import (
"context"
"github.com/diegobernardes/flare"
)
// Trigger is a mock of the subscription.Trigger, this is used by tests.
type Trigger struct{ err error }
// Push is a mock of subscription.Push, this is used by tests.
func (t *Trigger) Push(_ context.Context, _ *flare.Document, _ string) error { return t.err }
// NewTrigger returns a configured mock trigger.
func NewTrigger(err error) *Trigger {
return &Trigger{err}
}
|
package sysinit
/*
符号 含义 示例
* 表示任何时刻
, 表示分割 如第三段里:2,4,表示 2 点和 4 点执行
- 表示一个段 如第三端里: 1-5,就表示 1 到 5 点
/n 表示每个n的单位执行一次 如第三段里,1, 就表示每隔 1 个小时执行一次命令。也可以写成1-23/1
示例 详细含义
0/30 * * * * * 每 30 秒 执行
0 43 21 * * * 21:43 执行
0 0 17 * * 1 每周一的 17:00 执行
0 0,10 17 * * 0,2,3 每周日,周二,周三的 17:00和 17:10 执行
0 0 21 * * 1-6 周一到周六 21:00 执行
0 0/10 * * * 每隔 10 分 执行
*/
//Beego定时任务
func init() {
}
|
package main
import (
"context"
"database/sql"
"fmt"
"github.com/alexliesenfeld/health"
_ "github.com/mattn/go-sqlite3"
"log"
"net/http"
"time"
)
// This is an example configuration that shows how Kubernetes liveness and readiness checks can be created with this
// library (for more info, please visit
// https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/).
//
// This file is accompanied by the file `example-pod-config.yaml` that is located in the same directory. It
// contains a Kubernetes pod configuration that complements this implementation.
//
// Note that Kubernetes readiness and especially liveness checks need to be designed with care to not cause
// any unintended behaviour (such as unexpected pod restarts, cascading failures, etc.). Please refer to the following
// articles for guidance:
// - https://www.innoq.com/en/blog/kubernetes-probes/
// - https://blog.colinbreck.com/kubernetes-liveness-and-readiness-probes-how-to-avoid-shooting-yourself-in-the-foot/
// - https://srcco.de/posts/kubernetes-liveness-probes-are-dangerous.html
func main() {
db, _ := sql.Open("sqlite3", "simple.sqlite")
defer db.Close()
// Create a new Checker for our readiness check.
readinessChecker := health.NewChecker(
// Configure a global timeout that will be applied to all check functions.
health.WithTimeout(10*time.Second),
// A check configuration to see if our database connection is up.
// Hint: Like with all external dependencies, this database instance is considered to be "service private".
// If many of your services use the same database instance, the readiness checks
// of all of these services will start failing at once on every database hick-up.
// This is most likely not what you want. For guidance on how to design Kubernetes checks,
// please refer to the links that are listed in the main function documentation above.
health.WithCheck(health.Check{
Name: "database", // A unique check name.
Check: db.PingContext,
}),
// The following check will be executed periodically every 15 seconds
// started with an initial delay of 3 seconds. The check function will NOT
// be executed for each HTTP request.
health.WithPeriodicCheck(15*time.Second, 3*time.Second, health.Check{
Name: "disk",
// If the check function returns an error, this component will be considered unavailable ("down").
// The context contains a deadline according to the configuration of the Checker.
Check: func(ctx context.Context) error {
return fmt.Errorf("this makes the check fail")
},
}),
// Set a status listener that will be invoked when the health status changes.
// More powerful hooks are also available (see docs). For guidance, please refer to the links
// listed in the main function documentation above.
health.WithStatusListener(func(ctx context.Context, state health.CheckerState) {
log.Println(fmt.Sprintf("readiness status changed to %s", state.Status))
}),
)
// Liveness check should mostly contain checks that identify if the service is locked up or in a state that it
// cannot recover from (deadlocks, etc.). In most cases it should just respond with 200 OK to avoid unexpected
// restarts.
livenessChecker := health.NewChecker()
// Create a new health check http.Handler that returns the health status
// serialized as a JSON string. You can pass pass further configuration
// options to NewHandler to modify default configuration.
http.Handle("/live", health.NewHandler(livenessChecker))
http.Handle("/ready", health.NewHandler(readinessChecker))
// Start the HTTP server
log.Fatalln(http.ListenAndServe(":3000", nil))
}
|
package main
import (
"fmt"
"os"
"github.com/philippklemmer/discordbotoverview-service/bot"
"github.com/philippklemmer/discordbotoverview-service/config"
)
func main() {
err := config.ReadConfig()
if err != nil {
fmt.Println(err.Error())
os.Exit(0)
}
bot.Start()
<-make(chan struct{})
return
}
|
// Copyright © 2020. All rights reserved.
// Author: Ilya Stroy.
// Contacts: qioalice@gmail.com, https://github.com/qioalice
// License: https://opensource.org/licenses/MIT
package privet
import (
"strings"
)
type (
/*
Locale is a storage of all translated phrases for one language.
Getting locale by Client.LC() or Client.Default()
allows you to get (from cache) Locale object,
using which you may transform your translated key to the desired language's phrase.
WARNING!
You must not instantiate this class manually!
It's useless but safely.
So you won't get panicked or UB.
Manually instantiated Locale objects are considered not initialized
and provides to you the same behaviour as if it'd be nil.
*/
Locale struct {
owner *Client
root *localeNode
name string // in format xx_YY
phrasesCount uint64 // not only root localeNode but all nested also
}
)
/*
Tr tries to get translated language phrase by the specified translation key
and then tries to interpolate this phrase using passed args, if any.
Nil safe.
If this method is called on nil object, the special string is returned.
Special returned strings.
All of special returned strings has the same format:
"i18nErr: <error_class>. Key: <translation_key>".
<translation_key> is your translation key,
<error_class> might be:
- _SPTR_LOCALE_IS_NIL: Current Locale object is nil,
- _SPTR_TRANSLATION_KEY_IS_EMPTY: Translation key is empty,
- _SPTR_TRANSLATION_KEY_IS_INCORRECT: Translation key is invalid (incorrect separator),
- _SPTR_TRANSLATION_NOT_FOUND: Translation not found.
*/
func (l *Locale) Tr(key string, args Args) string {
switch {
case !l.isValid():
return sptr(_SPTR_LOCALE_IS_NIL, key)
case key == "":
return sptr(_SPTR_TRANSLATION_KEY_IS_EMPTY, key)
}
var (
prefix string
originalKey = key
)
for node := l.root; node != nil; {
if idx := strings.IndexByte(key, DEFAULT_DELIMITER); idx != -1 {
prefix, key = key[:idx], key[idx+1:]
if len(key) == 0 || len(prefix) == 0 {
return sptr(_SPTR_TRANSLATION_KEY_IS_INCORRECT, originalKey)
}
node = node.subNode(prefix, false)
continue
} else if translatedPhrase, found := node.content[key]; found {
if len(args) != 0 {
return newInterpolator(translatedPhrase, args).interpolate()
} else {
return translatedPhrase
}
} else {
return sptr(_SPTR_TRANSLATION_NOT_FOUND, originalKey)
}
}
return sptr(_SPTR_TRANSLATION_NOT_FOUND, originalKey)
}
/*
MarkAsDefault marks the current Locale object as a default Locale.
If any Locale was marked as default Locale already, the will be overwritten.
Nil safe.
If this method is called on nil object, there is no-op.
*/
func (l *Locale) MarkAsDefault() {
if !l.isValid() {
return
}
l.owner.setDefaultLocale(l)
}
/*
Name returns the current Locale's name.
Returned name is always in "xx_YY" format, where:
- xx is a lower case chars of language name ("en", "ru", "jp"),
- YY is a upper case chars of country name ("US", "GB", "RU").
Nil safe.
If this method is called on nil object, the empty string is returned.
*/
func (l *Locale) Name() string {
if !l.isValid() {
return ""
}
return l.name
}
|
package main
import(
"fmt"
"net/http"
"io/ioutil"
)
func main() {
urls := []string{
"http://python.org",
"http://golang.org"}
responses := make(chan string)
for _, url := range urls {
go func(url string) {
resp, _ := http.Get(url)
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
responses <- string(body)
}(url)
}
for response := range responses {
fmt.Println(response)
}
}
|
package main
import (
//"github.com/zhoujiagen/ProgrammingInGo/examples/hello"
"github.com/zhoujiagen/ProgrammingInGo/examples/bigdigits"
)
func main() {
//hello.HelloWorld()
}
|
package main
import (
"github.com/igm/sockjs-go/sockjs"
)
type ClientSessionTransport interface {
Send(string) error
Recv() (string, error)
}
type SockjsClientSessionTransport struct {
ClientSessionTransport
sock sockjs.Session
}
func (s *SockjsClientSessionTransport) Send(str string) error {
return s.sock.Send(str)
}
func (s *SockjsClientSessionTransport) Recv() (string, error) {
return s.sock.Recv()
}
|
package jqer
import (
"context"
"encoding/json"
"fmt"
"reflect"
"sort"
"strings"
"time"
"github.com/bbuck/go-lexer"
"github.com/dop251/goja"
"github.com/itchyny/gojq"
)
var (
StringQueryRequiresWrappings bool
TrimWhitespaceOnQueryStrings bool
SearchInStrings bool
WrappingBegin = ""
WrappingIncrement = "{{"
WrappingDecrement = "}}"
)
// Evaluate evaluates the data against the query provided and returns the result.
func Evaluate(data, query interface{}) ([]interface{}, error) {
if query == nil {
var out []interface{}
out = append(out, data)
return out, nil
}
return recursiveEvaluate(data, query)
}
func recursiveEvaluate(data, query interface{}) ([]interface{}, error) {
var out []interface{}
if query == nil {
out = append(out, nil)
return out, nil
}
switch q := query.(type) {
case bool:
case int:
case float64:
case string:
return recurseIntoString(data, q)
case map[string]interface{}:
return recurseIntoMap(data, q)
case []interface{}:
return recurseIntoArray(data, q)
default:
return nil, fmt.Errorf("unexpected type: %s", reflect.TypeOf(query).String())
}
out = append(out, query)
return out, nil
}
const (
JqStartToken lexer.TokenType = iota
JsStartToken
StringToken
ErrorToken
NoToken
)
func JqState(l *lexer.L) lexer.StateFunc {
src := make([]string, 3)
var jdxJ int
mover := func(rewind int, forward bool) {
for a := 0; a < rewind; a++ {
if forward {
l.Next()
} else {
l.Rewind()
}
}
}
for i := 0; i < 3; i++ {
r := l.Next()
if r == lexer.EOFRune {
// emit string token if there is content in it
if len(l.Current()) > 0 {
l.Emit(StringToken)
}
return nil
}
src[i] = string(r)
// if one of the strings has a j we store the index for rewind
// this is only to save scanning
if src[i] == "j" && i > 0 {
jdxJ = i
}
}
isJX := strings.Join(src, "")
token := NoToken
if isJX == "jq(" {
token = JqStartToken
} else if isJX == "js(" {
token = JsStartToken
}
if token != NoToken {
// this cuts out the 'jX(' bit
mover(3, false)
// emit string token if there is content in it
if len(l.Current()) > 0 {
l.Emit(StringToken)
}
mover(3, true)
// counting the '()'
var open int
l.Ignore()
for {
n := l.Next()
if n == lexer.EOFRune {
l.Emit(ErrorToken)
return nil
}
switch n {
case '(':
open++
case ')':
open--
}
if open < 0 {
l.Rewind()
break
}
}
l.Emit(token)
// remove closing ')'
mover(1, true)
l.Ignore()
return JqState
}
// only rewind to jdxJ, if there was no j in the runes, we can skip rewind all together
if jdxJ > 0 {
mover(3-jdxJ, false)
}
return JqState
}
func recurseIntoString(data interface{}, s string) ([]interface{}, error) {
out := make([]interface{}, 0)
if TrimWhitespaceOnQueryStrings {
s = strings.TrimSpace(s)
}
l := lexer.New(s, JqState)
l.Start()
for {
tok, done := l.NextToken()
if done {
break
}
switch tok.Type {
case ErrorToken:
return nil, fmt.Errorf("jq/js script missing bracket")
case JqStartToken:
x, err := jq(data, tok.Value)
if err != nil {
return nil, fmt.Errorf("error executing jq query %s: %w", tok.Value, err)
}
if len(x) == 0 || len(x) > 0 && x[0] == nil {
return nil, fmt.Errorf("error in jq query %s: no results", tok.Value)
}
if len(x) == 1 {
out = append(out, x[0])
} else {
return nil, fmt.Errorf("jq query produced multiple outputs")
}
case JsStartToken:
vm := goja.New()
fn := fmt.Sprintf("function fn(data) {\n %s \n}", tok.Value)
_, err := vm.RunString(fn)
if err != nil {
return nil, fmt.Errorf("error loading js query %s: %w", tok.Value, err)
}
fnExe, ok := goja.AssertFunction(vm.Get("fn"))
if !ok {
return nil, fmt.Errorf("error getting js query %s: %w", tok.Value, err)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
done := make(chan bool, 1)
go func(ctx context.Context, rt *goja.Runtime, b chan bool) {
select {
case <-b:
return
case <-ctx.Done():
rt.Interrupt("timeout")
}
}(ctx, vm, done)
defer func(b chan bool) {
b <- true
}(done)
// execute and get results
v, err := fnExe(goja.Undefined(), vm.ToValue(data))
if err != nil {
return nil, fmt.Errorf("error running js query %s: %w", tok.Value, err)
}
ret := v.Export()
if ret == nil {
return nil, fmt.Errorf("error in js query %s: no results", tok.Value)
}
out = append(out, ret)
default:
out = append(out, tok.Value)
}
}
if len(out) == 1 {
return out, nil
}
x := make([]string, len(out))
for i := range out {
part := out[i]
if _, ok := part.(string); ok {
x = append(x, fmt.Sprintf("%v", part))
} else {
data, err := json.Marshal(part)
if err != nil {
return nil, err
}
x = append(x, string(data))
}
}
s = strings.Join(x, "")
out = make([]interface{}, 1)
out[0] = s
return out, nil
}
func recurseIntoMap(data interface{}, m map[string]interface{}) ([]interface{}, error) {
var out []interface{}
results := make(map[string]interface{})
var keys []string
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
for i := range keys {
k := keys[i]
x, err := recursiveEvaluate(data, m[k])
if err != nil {
return nil, fmt.Errorf("error in '%s': %w", k, err)
}
if len(x) == 0 {
return nil, fmt.Errorf("error in element '%s': no results", k)
}
if len(x) > 1 {
return nil, fmt.Errorf("error in element '%s': more than one result", k)
}
results[k] = x[0]
}
out = append(out, results)
return out, nil
}
func recurseIntoArray(data interface{}, q []interface{}) ([]interface{}, error) {
var out []interface{}
array := make([]interface{}, 0)
for i := range q {
x, err := recursiveEvaluate(data, q[i])
if err != nil {
return nil, fmt.Errorf("error in element %d: %w", i, err)
}
if len(x) == 0 {
return nil, fmt.Errorf("error in element %d: no results", i)
}
if len(x) > 1 {
return nil, fmt.Errorf("error in element %d: more than one result", i)
}
array = append(array, x[0])
}
out = append(out, array)
return out, nil
}
func jq(input interface{}, command string) ([]interface{}, error) {
data, err := json.Marshal(input)
if err != nil {
return nil, err
}
var x interface{}
err = json.Unmarshal(data, &x)
if err != nil {
return nil, err
}
query, err := gojq.Parse(command)
if err != nil {
return nil, err
}
var output []interface{}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
iter := query.RunWithContext(ctx, x)
for i := 0; ; i++ {
v, ok := iter.Next()
if !ok {
break
}
if err, ok := v.(error); ok {
return nil, err
}
output = append(output, v)
}
return output, nil
}
|
package routing_table
import (
"errors"
"github.com/cloudfoundry-incubator/runtime-schema/models"
)
type RoutesByProcessGuid map[string][]string
type ContainersByProcessGuid map[string][]Container
func RoutesByProcessGuidFromDesireds(desireds []models.DesiredLRP) RoutesByProcessGuid {
routes := RoutesByProcessGuid{}
for _, desired := range desireds {
routes[desired.ProcessGuid] = desired.Routes
}
return routes
}
func ContainersByProcessGuidFromActuals(actuals []models.ActualLRP) ContainersByProcessGuid {
containers := ContainersByProcessGuid{}
for _, actual := range actuals {
container, err := ContainerFromActual(actual)
if err != nil {
continue
}
containers[actual.ProcessGuid] = append(containers[actual.ProcessGuid], container)
}
return containers
}
func ContainerFromActual(actual models.ActualLRP) (Container, error) {
if len(actual.Ports) == 0 {
return Container{}, errors.New("missing ports")
}
return Container{
Host: actual.Host,
Port: int(actual.Ports[0].HostPort),
}, nil
}
|
package main
import (
"fmt"
"github.com/rossifedericoe/bootcamp/calculadora"
)
func main() {
var resultado int
resultado = calculadora.Sumar(1, 2)
fmt.Println(resultado)
resultado = calculadora.SumarConConstante(1, 2)
fmt.Println(resultado)
}
|
package mempool
import (
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/meshplus/bitxhub-kit/storage"
"github.com/meshplus/bitxhub-kit/types"
"github.com/meshplus/bitxhub-model/pb"
raftproto "github.com/meshplus/bitxhub/pkg/order/etcdraft/proto"
"github.com/meshplus/bitxhub/pkg/peermgr"
"github.com/google/btree"
"github.com/sirupsen/logrus"
)
type mempoolImpl struct {
localID uint64
leader uint64 // leader node id
batchSize uint64
batchSeqNo uint64 // track the sequence number of block
logger logrus.FieldLogger
batchC chan *raftproto.Ready
close chan bool
txStore *transactionStore // store all transactions info
txCache *TxCache // cache the transactions received from api
subscribe *subscribeEvent
storage storage.Storage
peerMgr peermgr.PeerManager //network manager
batchTimerMgr *timerManager
ledgerHelper func(hash *types.Hash) (*pb.Transaction, error)
}
func newMempoolImpl(config *Config, storage storage.Storage, batchC chan *raftproto.Ready) *mempoolImpl {
mpi := &mempoolImpl{
localID: config.ID,
peerMgr: config.PeerMgr,
batchSeqNo: config.ChainHeight,
ledgerHelper: config.GetTransactionFunc,
logger: config.Logger,
batchC: batchC,
storage: storage,
}
mpi.txStore = newTransactionStore()
mpi.txCache = newTxCache(config.TxSliceTimeout, config.TxSliceSize, config.Logger)
mpi.subscribe = newSubscribe()
if config.BatchSize == 0 {
mpi.batchSize = DefaultBatchSize
} else {
mpi.batchSize = config.BatchSize
}
var batchTick time.Duration
if config.BatchTick == 0 {
batchTick = DefaultBatchTick
} else {
batchTick = config.BatchTick
}
mpi.batchTimerMgr = newTimer(batchTick)
return mpi
}
func (mpi *mempoolImpl) listenEvent() {
waitC := make(chan bool)
for {
select {
case <-mpi.close:
mpi.logger.Info("----- Exit listen loop -----")
return
case newLeader := <-mpi.subscribe.updateLeaderC:
if newLeader == mpi.localID {
mpi.logger.Info("----- Become the leader node -----")
}
mpi.leader = newLeader
case txSet := <-mpi.txCache.txSetC:
// 1. send transactions to other peer
data, err := txSet.Marshal()
if err != nil {
mpi.logger.Errorf("Marshal failed, err: %s", err.Error())
return
}
pbMsg := mpi.msgToConsensusPbMsg(data, raftproto.RaftMessage_BROADCAST_TX)
mpi.broadcast(pbMsg)
// 2. process transactions
if err := mpi.processTransactions(txSet.TxList); err != nil {
mpi.logger.Errorf("Process transactions failed, err: %s", err.Error())
}
case txSlice := <-mpi.subscribe.txForwardC:
if err := mpi.processTransactions(txSlice.TxList); err != nil {
mpi.logger.Errorf("Process transactions failed, err: %s", err.Error())
}
case res := <-mpi.subscribe.getBlockC:
result := mpi.getBlock(res.ready)
res.result <- result
case <-mpi.batchTimerMgr.timeoutEventC:
if mpi.isBatchTimerActive() {
mpi.stopBatchTimer(StopReason1)
mpi.logger.Debug("Batch timer expired, try to create a batch")
if mpi.txStore.priorityNonBatchSize > 0 {
ready, err := mpi.generateBlock(true)
if err != nil {
mpi.logger.Errorf("Generator batch failed")
continue
}
mpi.batchC <- ready
} else {
mpi.logger.Debug("The length of priorityIndex is 0, skip the batch timer")
}
}
case commitReady := <-mpi.subscribe.commitTxnC:
gcStartTime := time.Now()
mpi.processCommitTransactions(commitReady)
duration := time.Now().Sub(gcStartTime).Nanoseconds()
mpi.logger.Debugf("GC duration %v", duration)
case lostTxnEvent := <-mpi.subscribe.localMissingTxnEvent:
if err := mpi.sendFetchTxnRequest(lostTxnEvent.Height, lostTxnEvent.MissingTxnHashList); err != nil {
mpi.logger.Errorf("Process fetch txn failed, err: %s", err.Error())
lostTxnEvent.WaitC <- false
} else {
mpi.logger.Debug("Process fetch txn success")
waitC = lostTxnEvent.WaitC
}
case fetchRequest := <-mpi.subscribe.fetchTxnRequestC:
if err := mpi.processFetchTxnRequest(fetchRequest); err != nil {
mpi.logger.Error("Process fetchTxnRequest failed")
}
case fetchRes := <-mpi.subscribe.fetchTxnResponseC:
if err := mpi.processFetchTxnResponse(fetchRes); err != nil {
waitC <- false
continue
}
waitC <- true
case getNonceRequest := <-mpi.subscribe.pendingNonceC:
pendingNonce := mpi.txStore.nonceCache.getPendingNonce(getNonceRequest.account)
getNonceRequest.waitC <- pendingNonce
}
}
}
func (mpi *mempoolImpl) processTransactions(txs []*pb.Transaction) error {
validTxs := make(map[string][]*pb.Transaction)
for _, tx := range txs {
// check the sequence number of tx
txAccount := tx.Account()
currentSeqNo := mpi.txStore.nonceCache.getPendingNonce(txAccount)
if tx.Nonce < currentSeqNo {
mpi.logger.Warningf("Account %s, current sequence number is %d, required %d", txAccount, tx.Nonce, currentSeqNo+1)
continue
}
// check the existence of hash of this tx
txHash := tx.TransactionHash.String()
if txPointer := mpi.txStore.txHashMap[txHash]; txPointer != nil {
mpi.logger.Warningf("Tx %s already received", txHash)
continue
}
_, ok := validTxs[txAccount]
if !ok {
validTxs[txAccount] = make([]*pb.Transaction, 0)
}
validTxs[txAccount] = append(validTxs[txAccount], tx)
}
// Process all the new transaction and merge any errors into the original slice
dirtyAccounts := mpi.txStore.InsertTxs(validTxs)
// send tx to mempool store
mpi.processDirtyAccount(dirtyAccounts)
if mpi.isLeader() {
// start batch timer when this node receives the first transaction set of a batch
if !mpi.isBatchTimerActive() {
mpi.startBatchTimer(StartReason1)
}
// generator batch by block size
if mpi.txStore.priorityNonBatchSize >= mpi.batchSize {
ready, err := mpi.generateBlock(false)
if err != nil {
return errors.New("generator batch fai")
}
// stop batch timer
mpi.stopBatchTimer(StopReason2)
mpi.batchC <- ready
}
}
return nil
}
func (txStore *transactionStore) InsertTxs(txs map[string][]*pb.Transaction) map[string]bool {
dirtyAccounts := make(map[string]bool)
for account, list := range txs {
for _, tx := range list {
txHash := tx.TransactionHash.String()
txPointer := &orderedIndexKey{
account: account,
nonce: tx.Nonce,
}
txStore.txHashMap[txHash] = txPointer
list, ok := txStore.allTxs[account]
if !ok {
// if this is new account to send tx, create a new txSortedMap
txStore.allTxs[account] = newTxSortedMap()
}
list = txStore.allTxs[account]
txItem := &txItem{
account: account,
tx: tx,
}
list.items[tx.Nonce] = txItem
list.index.insertBySortedNonceKey(tx)
atomic.AddInt32(&txStore.poolSize, 1)
}
dirtyAccounts[account] = true
}
return dirtyAccounts
}
func (mpi *mempoolImpl) processDirtyAccount(dirtyAccounts map[string]bool) {
for account := range dirtyAccounts {
if list, ok := mpi.txStore.allTxs[account]; ok {
// search for related sequential txs in allTxs
// and add these txs into priorityIndex and parkingLotIndex
pendingNonce := mpi.txStore.nonceCache.getPendingNonce(account)
readyTxs, nonReadyTxs, nextDemandNonce := list.filterReady(pendingNonce)
mpi.txStore.nonceCache.setPendingNonce(account, nextDemandNonce)
// inset ready txs into priorityIndex.
for _, tx := range readyTxs {
mpi.txStore.priorityIndex.insertByOrderedQueueKey(account, tx)
}
mpi.txStore.priorityNonBatchSize = mpi.txStore.priorityNonBatchSize + uint64(len(readyTxs))
// inset non-ready txs into parkingLotIndex.
for _, tx := range nonReadyTxs {
mpi.txStore.parkingLotIndex.insertByOrderedQueueKey(account, tx)
}
}
}
}
// getBlock fetches next block of transactions for consensus,
// batchedTx are all txs sent to consensus but were not committed yet, mempool should filter out such txs.
func (mpi *mempoolImpl) generateBlock(isTimeout bool) (*raftproto.Ready, error) {
result := make([]orderedIndexKey, 0, mpi.batchSize)
// txs has lower nonce will be observed first in priority index iterator.
mpi.logger.Infof("Length of priority index: %v", mpi.txStore.priorityIndex.data.Len())
mpi.txStore.priorityIndex.data.Ascend(func(a btree.Item) bool {
tx := a.(*orderedIndexKey)
// if tx has existed in bathedTxs,
if _, ok := mpi.txStore.batchedTxs[orderedIndexKey{tx.account, tx.nonce}]; ok {
return true
}
txSeq := tx.nonce
commitNonce := mpi.txStore.nonceCache.getCommitNonce(tx.account)
var seenPrevious bool
if txSeq >= 1 {
_, seenPrevious = mpi.txStore.batchedTxs[orderedIndexKey{account: tx.account, nonce: txSeq - 1}]
}
// include transaction if it's "next" for given account or
// we've already sent its ancestor to Consensus
if seenPrevious || (txSeq == commitNonce) {
ptr := orderedIndexKey{account: tx.account, nonce: tx.nonce}
mpi.txStore.batchedTxs[ptr] = true
result = append(result, ptr)
// batched by batch size or timeout
condition1 := uint64(len(result)) == mpi.batchSize
condition2 := isTimeout && uint64(len(result)) == mpi.txStore.priorityNonBatchSize
if condition1 || condition2 {
return false
}
}
return true
})
// convert transaction pointers to real values
hashList := make([]types.Hash, len(result))
txList := make([]*pb.Transaction, len(result))
for i, v := range result {
rawTransaction := mpi.txStore.getTxByOrderKey(v.account, v.nonce)
hashList[i] = *rawTransaction.TransactionHash
txList[i] = rawTransaction
}
mpi.increaseBatchSeqNo()
batchSeqNo := mpi.getBatchSeqNo()
ready := &raftproto.Ready{
TxHashes: hashList,
Height: batchSeqNo,
}
// store the batch to cache
if _, ok := mpi.txStore.batchedCache[batchSeqNo]; ok {
mpi.logger.Errorf("Generate block with height %d, but there is already block at this height", batchSeqNo)
return nil, errors.New("wrong block height ")
}
// store the batch to cache
mpi.txStore.batchedCache[batchSeqNo] = txList
// store the batch to db
mpi.batchStore(txList)
mpi.txStore.priorityNonBatchSize = mpi.txStore.priorityNonBatchSize - uint64(len(hashList))
mpi.logger.Infof("Generated block %d with %d txs", batchSeqNo, len(txList))
return ready, nil
}
func (mpi *mempoolImpl) getBlock(ready *raftproto.Ready) *mempoolBatch {
res := &mempoolBatch{}
// leader get the block directly from batchedCache
if mpi.isLeader() {
if txList, ok := mpi.txStore.batchedCache[ready.Height]; !ok {
mpi.logger.Warningf("Leader get block failed, can't find block %d from batchedCache", ready.Height)
missingTxnHashList := make(map[uint64]string)
for i, txHash := range ready.TxHashes {
missingTxnHashList[uint64(i)] = txHash.String()
}
res.missingTxnHashList = missingTxnHashList
} else {
// TODO (YH): check tx hash and length
res.txList = txList
}
return res
}
// follower construct the same batch by given ready.
return mpi.constructSameBatch(ready)
}
// constructSameBatch only be called by follower, constructs a batch by given ready info.
func (mpi *mempoolImpl) constructSameBatch(ready *raftproto.Ready) *mempoolBatch {
res := &mempoolBatch{}
if txList, ok := mpi.txStore.batchedCache[ready.Height]; ok {
mpi.logger.Warningf("Batch %d already exists in batchedCache", ready.Height)
// TODO (YH): check tx hash and length
res.txList = txList
return res
}
missingTxList := make(map[uint64]string)
txList := make([]*pb.Transaction, 0)
for index, txHash := range ready.TxHashes {
var (
txPointer *orderedIndexKey
txMap *txSortedMap
txItem *txItem
ok bool
)
strHash := txHash.String()
if txPointer, _ = mpi.txStore.txHashMap[strHash]; txPointer == nil {
missingTxList[uint64(index)] = strHash
continue
}
if txMap, ok = mpi.txStore.allTxs[txPointer.account]; !ok {
mpi.logger.Warningf("Transaction %s exist in txHashMap but not in allTxs", strHash)
missingTxList[uint64(index)] = strHash
continue
}
if txItem, ok = txMap.items[txPointer.nonce]; !ok {
mpi.logger.Warningf("Transaction %s exist in txHashMap but not in allTxs", strHash)
missingTxList[uint64(index)] = strHash
continue
}
txList = append(txList, txItem.tx)
mpi.txStore.batchedTxs[*txPointer] = true
}
res.missingTxnHashList = missingTxList
res.txList = txList
// persist the correct batch
if len(res.missingTxnHashList) == 0 {
// store the batch to cache
mpi.txStore.batchedCache[ready.Height] = txList
}
return res
}
// processCommitTransactions removes the transactions in ready.
func (mpi *mempoolImpl) processCommitTransactions(ready *raftproto.Ready) {
dirtyAccounts := make(map[string]bool)
// update current cached commit nonce for account
for _, txHash := range ready.TxHashes {
strHash := txHash.String()
txPointer := mpi.txStore.txHashMap[strHash]
txPointer, ok := mpi.txStore.txHashMap[strHash]
if !ok {
mpi.logger.Warningf("Remove transaction %s failed, Can't find it from txHashMap", strHash)
continue
}
preCommitNonce := mpi.txStore.nonceCache.getCommitNonce(txPointer.account)
newCommitNonce := txPointer.nonce + 1
if preCommitNonce < newCommitNonce {
mpi.txStore.nonceCache.setCommitNonce(txPointer.account, newCommitNonce)
}
delete(mpi.txStore.txHashMap, strHash)
delete(mpi.txStore.batchedTxs, *txPointer)
dirtyAccounts[txPointer.account] = true
}
// clean related txs info in cache
for account := range dirtyAccounts {
commitNonce := mpi.txStore.nonceCache.getCommitNonce(account)
if list, ok := mpi.txStore.allTxs[account]; ok {
// remove all previous seq number txs for this account.
removedTxs := list.forward(commitNonce)
// remove index smaller than commitNonce delete index.
var wg sync.WaitGroup
wg.Add(3)
go func(ready map[string][]*pb.Transaction) {
defer wg.Done()
list.index.removeBySortedNonceKey(removedTxs)
}(removedTxs)
go func(ready map[string][]*pb.Transaction) {
defer wg.Done()
mpi.txStore.priorityIndex.removeByOrderedQueueKey(removedTxs)
}(removedTxs)
go func(ready map[string][]*pb.Transaction) {
defer wg.Done()
mpi.txStore.parkingLotIndex.removeByOrderedQueueKey(removedTxs)
}(removedTxs)
wg.Wait()
delta := int32(len(removedTxs))
atomic.AddInt32(&mpi.txStore.poolSize, -delta)
}
}
if mpi.isLeader() {
mpi.batchDelete(ready.TxHashes)
}
delete(mpi.txStore.batchedCache, ready.Height)
// restart batch timer for remain txs.
if mpi.isLeader() {
mpi.startBatchTimer(StartReason2)
}
mpi.logger.Debugf("Replica removes batch %d in mempool, and now there are %d batches, "+
"priority len: %d, parkingLot len: %d", ready.Height, len(mpi.txStore.batchedCache),
mpi.txStore.priorityIndex.size(), mpi.txStore.parkingLotIndex.size())
}
// sendFetchTxnRequest sends fetching missing transactions request to leader node.
func (mpi *mempoolImpl) sendFetchTxnRequest(height uint64, lostTxnHashList map[uint64]string) error {
filterFetchTxHashList := &FetchTxnRequest{
ReplicaId: mpi.localID,
Height: height,
MissingTxHashes: lostTxnHashList,
}
missingHashListBytes, err := filterFetchTxHashList.Marshal()
if err != nil {
mpi.logger.Error("Marshal MissingHashList fail")
return err
}
pbMsg := mpi.msgToConsensusPbMsg(missingHashListBytes, raftproto.RaftMessage_GET_TX)
mpi.logger.Debugf("Send fetch transactions request to replica %d", mpi.leader)
mpi.unicast(mpi.leader, pbMsg)
mpi.txStore.missingBatch[height] = lostTxnHashList
return nil
}
// processFetchTxnRequest processes fetch request...
func (mpi *mempoolImpl) processFetchTxnRequest(fetchTxnRequest *FetchTxnRequest) error {
txList := make(map[uint64]*pb.Transaction, len(fetchTxnRequest.MissingTxHashes))
var err error
if txList, err = mpi.loadTxnFromCache(fetchTxnRequest); err != nil {
if txList, err = mpi.loadTxnFromStorage(fetchTxnRequest); err != nil {
if txList, err = mpi.loadTxnFromLedger(fetchTxnRequest); err != nil {
mpi.logger.Errorf("Process fetch txn request [peer: %s, block height: %d] failed",
fetchTxnRequest.ReplicaId, fetchTxnRequest.Height)
return err
}
}
}
fetchTxnResponse := &FetchTxnResponse{
ReplicaId: mpi.localID,
Height: fetchTxnRequest.Height,
MissingTxnList: txList,
}
resBytes, err := fetchTxnResponse.Marshal()
if err != nil {
return err
}
pbMsg := mpi.msgToConsensusPbMsg(resBytes, raftproto.RaftMessage_GET_TX_ACK)
mpi.logger.Debugf("Send fetch missing transactions response to replica %d", fetchTxnRequest.ReplicaId)
mpi.unicast(fetchTxnRequest.ReplicaId, pbMsg)
return nil
}
func (mpi *mempoolImpl) loadTxnFromCache(fetchTxnRequest *FetchTxnRequest) (map[uint64]*pb.Transaction, error) {
missingHashList := fetchTxnRequest.MissingTxHashes
targetHeight := fetchTxnRequest.Height
for _, txHash := range missingHashList {
if txPointer, _ := mpi.txStore.txHashMap[txHash]; txPointer == nil {
return nil, fmt.Errorf("transaction %s dones't exist in txHashMap", txHash)
}
}
var targetBatch []*pb.Transaction
var ok bool
if targetBatch, ok = mpi.txStore.batchedCache[targetHeight]; !ok {
return nil, fmt.Errorf("batch %d dones't exist in batchedCache", targetHeight)
}
targetBatchLen := uint64(len(targetBatch))
txList := make(map[uint64]*pb.Transaction, len(missingHashList))
for index, txHash := range missingHashList {
if index > targetBatchLen || targetBatch[index].TransactionHash.String() != txHash {
return nil, fmt.Errorf("find invaild transaction, index: %d, targetHash: %s", index, txHash)
}
txList[index] = targetBatch[index]
}
return txList, nil
}
// TODO (YH): restore txn from wal
func (mpi *mempoolImpl) loadTxnFromStorage(fetchTxnRequest *FetchTxnRequest) (map[uint64]*pb.Transaction, error) {
missingHashList := fetchTxnRequest.MissingTxHashes
txList := make(map[uint64]*pb.Transaction)
for index, txHash := range missingHashList {
var (
tx *pb.Transaction
rawHash []byte
err error
ok bool
)
if rawHash, err = types.HexDecodeString(txHash); err != nil {
return nil, err
}
if tx, ok = mpi.load(rawHash); !ok {
return nil, errors.New("can't load tx from storage")
}
txList[index] = tx
}
return txList, nil
}
// loadTxnFromLedger find missing transactions from ledger.
func (mpi *mempoolImpl) loadTxnFromLedger(fetchTxnRequest *FetchTxnRequest) (map[uint64]*pb.Transaction, error) {
missingHashList := fetchTxnRequest.MissingTxHashes
txList := make(map[uint64]*pb.Transaction)
for index, txHash := range missingHashList {
var (
tx *pb.Transaction
err error
)
hash := types.NewHashByStr(txHash)
if hash == nil {
return nil, errors.New("nil hash")
}
if tx, err = mpi.ledgerHelper(hash); err != nil {
return nil, err
}
txList[index] = tx
}
return txList, nil
}
func (mpi *mempoolImpl) processFetchTxnResponse(fetchTxnResponse *FetchTxnResponse) error {
mpi.logger.Debugf("Receive fetch transactions response from replica %d", fetchTxnResponse.ReplicaId)
if _, ok := mpi.txStore.missingBatch[fetchTxnResponse.Height]; !ok {
return errors.New("can't find batch %d from missingBatch")
}
expectLen := len(mpi.txStore.missingBatch[fetchTxnResponse.Height])
recvLen := len(fetchTxnResponse.MissingTxnList)
if recvLen != expectLen {
return fmt.Errorf("receive unmatched fetching txn response, expect length: %d, received length: %d", expectLen, recvLen)
}
validTxn := make([]*pb.Transaction, 0)
targetBatch := mpi.txStore.missingBatch[fetchTxnResponse.Height]
for index, tx := range fetchTxnResponse.MissingTxnList {
if tx.TransactionHash.String() != targetBatch[index] {
return errors.New("find a hash mismatch tx")
}
validTxn = append(validTxn, tx)
}
if err := mpi.processTransactions(validTxn); err != nil {
return err
}
delete(mpi.txStore.missingBatch, fetchTxnResponse.Height)
return nil
}
|
package paillier
type PaillierConfig struct {
Enable bool `yaml:"enable"`
}
|
package middlewares
import (
"github.com/golang-jwt/jwt"
"github.com/labstack/echo/v4"
)
func JWTSuccessHandler(c echo.Context) {
user := c.Get("user").(*jwt.Token).Claims.(jwt.MapClaims)
userId := uint(user["id"].(float64))
c.Set("userId", userId)
}
|
package algorithm
import "fmt"
type BiTree interface{
SetValue(data int)
SetLeft(v int)
SetRight(v int)
PreBiTree()
PostBiTree()
InBiTree()
Layers() int
GetLeft()*BiTreeNode
BreathTraverse()
DepthTraverse()
}
type BiTreeNode struct{
data int
left, right *BiTreeNode
}
func NewBiTree() BiTree{
return &BiTreeNode{}
}
func (node *BiTreeNode)GetLeft()*BiTreeNode{
if node == nil{
fmt.Println("nil.node ignored.")
return nil
}
return node.left
}
func (node *BiTreeNode)SetValue(data int){
if node == nil{
fmt.Println("setting value to nil.node ignored.")
return
}
node.data = data
}
func (node *BiTreeNode)SetLeft(v int){
if node == nil{
fmt.Println("setting left to nil.node ignored.")
return
}
node.left = &BiTreeNode{data: v}
}
func (node *BiTreeNode)SetRight(v int){
if node == nil{
fmt.Println("setting right to nil.node ignored.")
return
}
node.right = &BiTreeNode{data: v}
}
func (node *BiTreeNode)printNodeValue(){
fmt.Print(node.data, " ")
}
func (node *BiTreeNode)PreBiTree(){
if node == nil{
return
}
node.printNodeValue()
node.left.PreBiTree()
node.right.PreBiTree()
}
func (node *BiTreeNode)PostBiTree(){
if node == nil {
return
}
node.left.PostBiTree()
node.right.PostBiTree()
node.printNodeValue()
}
func (node *BiTreeNode)InBiTree(){
if node == nil{
return
}
node.left.InBiTree()
node.printNodeValue()
node.right.InBiTree()
}
func (node *BiTreeNode)Layers() int {
if node == nil{
return 0
}
leftLayers := node.left.Layers()
rightLayers := node.right.Layers()
if leftLayers > rightLayers{
return leftLayers + 1
} else {
return rightLayers + 1
}
}
/*
广度优先遍历二叉树
*/
func (node *BiTreeNode)BreathTraverse(){
if node == nil{
return
}
var slice []*BiTreeNode
fmt.Print(node.data, " ")
slice = append(slice, node.left)
slice = append(slice, node.right)
for {
if len(slice) == 0{
break
}
fmt.Print(slice[0].data, " ")
if slice[0].left != nil{
slice = append(slice, slice[0].left)
}
if slice[0].right != nil{
slice = append(slice, slice[0].right)
}
slice = slice[1:]
}
}
/*
深度优先遍历二叉树
*/
func (node *BiTreeNode)DepthTraverse(){
var slice []*BiTreeNode
if node == nil {
fmt.Println("traverse nil.node ignored.")
return
}
fmt.Print(node.data, " ")
slice = append(slice, node.right)
slice = append(slice, node.left)
for {
if len(slice) == 0{
break
}
fmt.Print(slice[len(slice) - 1].data, " ")
tmp := slice[len(slice) - 1]
slice = slice[:len(slice) - 1]
if tmp.right != nil{
slice = append(slice, tmp.right)
}
if tmp.left != nil{
slice = append(slice, tmp.left)
}
}
}
|
package main
import (
"fmt"
"github.com/gin-contrib/cors" // Why do we need this package?
"github.com/gin-gonic/gin"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/sqlite" // If you want to use mysql or any other db, replace this line
)
var db *gorm.DB // declaring the db globally
var err error
var loguser string
type Login struct {
Name string `json:"name"`
Username string `json:"username"`
Password string `json:"password"`
City string `json:"city"`
}
type Ques struct {
Genre string `json:"genre"`
Question string `json:"question"`
Opt1 string `json:"opt1"`
Opt2 string `json:"opt2"`
Opt3 string `json:"opt3"`
}
type Answers struct {
Ans1 string `json:"ans1"`
Ans2 string `json:"ans2"`
Ans3 string `json:"ans3"`
Ans4 string `json:"ans4"`
Ans5 string `json:"ans5"`
Ans6 string `json:"ans6"`
Ans7 string `json:"ans7"`
Ans8 string `json:"ans8"`
}
type HighScores struct {
Username string `json:"username"`
DN int `json:"dn"`
CG int `json:"cg"`
IP int `json:"ip"`
USP int `json:"usp"`
}
type Score struct {
Score int `json:"score"`
}
var dnhscore int
var dnscore int
var iphscore int
var ipscore int
var usphscore int
var uspscore int
var cghscore int
var cgscore int
var current string
func main() {
db, err = gorm.Open("sqlite3", "./gorm.db")
if err != nil {
fmt.Println(err)
}
current=""
defer db.Close()
db.AutoMigrate(&Login{}, &Ques{} , &Score{}, &HighScores{})
/*dn:=Ques{Genre: "dn", Question: "What is the price of Shinigami eyes? ",Opt1: "Ownership Of DeathNote",Opt2: "Half Of Remaining Lifespan",Opt3: "A living Soul"} //2
db.Create(&dn)
dn=Ques{Genre: "dn", Question: "How many times was Misa lifespan cut to half?",Opt1: "Three",Opt2: "Two",Opt3: "One"} //2
db.Create(&dn)
dn=Ques{Genre: "dn", Question: "What does Teru Mikami say while punishing someone?",Opt1: "Delete",Opt2: "Erase",Opt3: "Destroy"} //1
db.Create(&dn)
dn=Ques{Genre: "dn", Question: "Who is the most frequent shinigami in the anime?",Opt1: "Ryuk",Opt2: "Rem",Opt3: "Gelus"} //2
db.Create(&dn)
dn=Ques{Genre: "dn", Question: "Who are the member's of the taskforce?",Opt1: "Mogi",Opt2: "Matsuda",Opt3: "Aizawa"} //1 2 3
db.Create(&dn)
dn=Ques{Genre: "dn", Question: "Who among these are Shinigamis?",Opt1: "Rem" ,Opt2: "Mogi",Opt3: "Ryuk"} //1 3
db.Create(&dn)
dn=Ques{Genre: "dn", Question: "Who among these does Lite Yagami date?",Opt1: "Sachiko",Opt2: "Lisa Amane",Opt3: "Kiyomi Takada",} //2 3
db.Create(&dn)
dn=Ques{Genre: "dn", Question: "Who all are part of the Yagami family?",Opt1: "Sachiko",Opt2: "Lisa",Opt3: "Sayu"} //1 3
db.Create(&dn)
ip:=Ques{Genre: "ip", Question: "Name The First Vice-President Of India? ",Opt1: "Rajendra Awasthi",Opt2: "Rajendra Prasad",Opt3: "Madanmohan Malviya"} //2
db.Create(&ip)
ip=Ques{Genre: "ip", Question: "Current CM of Uttar Pradesh?",Opt1: "Akhilesh Yadav",Opt2: "Yogi Adityanath",Opt3: "Mayawati"} //2
db.Create(&ip)
ip=Ques{Genre: "ip", Question: "Who is the Founder Of ShivSena?",Opt1: "Bal Thakre",Opt2: "Udhav Thakre",Opt3: "Raj Thakre"} //1
db.Create(&ip)
ip=Ques{Genre: "ip", Question: "Current Lok-Sabha Speaker?",Opt1: "Aashish Gogoi",Opt2: " Sumitra Mahajan ",Opt3: "Smriti Irani"} //2
db.Create(&ip)
dn=Ques{Genre: "ip", Question: "Which among these have a BJP state-government??",Opt1: " Rajasthan ",Opt2: " Chhattisgarh ",Opt3: "Assam"} //1 2 3
db.Create(&dn)
dn=Ques{Genre: "ip", Question: "Who among these is an MP?",Opt1: " Sachin Tendulkar " ,Opt2: " Rahul Dravid ",Opt3: " MaryKom "} //1 3
db.Create(&dn)
dn=Ques{Genre: "ip", Question: "Who among these are Central Cabinet Ministers?",Opt1: " Navjot Singh Sidhu ",Opt2: " Arun Jaitley ",Opt3: " Nitin Gadkari ",} //2 3
db.Create(&dn)
dn=Ques{Genre: "ip", Question: "Who among these are Congress Leaders?",Opt1: " Rahul Gandhi ",Opt2: " Sushma Swaraj ",Opt3: " Digvijay Singh "} //1 3
db.Create(&dn)
dn=Ques{Genre: "cg", Question: "Who is zero ? ",Opt1: " Suzaku kururugi ",Opt2: " Lelouch Lamperouge ",Opt3: "Cc"} //2
db.Create(&dn)
dn=Ques{Genre: "cg", Question: "What was the name of the final plan of zero?",Opt1: " Zeros revenge ",Opt2: " Zero requiem ",Opt3: " Zero won "} //2
db.Create(&dn)
dn=Ques{Genre: "cg", Question: "What was the name of lelouch’s sister ?",Opt1: "Nunnally",Opt2: "Annie",Opt3: "Katara"} //1
db.Create(&dn)
dn=Ques{Genre: "cg", Question: "What was Japan called ?",Opt1: "Nippon",Opt2: "Area 11",Opt3: "District 13"} //2
db.Create(&dn)
dn=Ques{Genre: "cg", Question: "Which of these are members of the japan liberation front ?",Opt1: "Katase tatewaki ",Opt2: "Josui kusakabe ",Opt3: "Nagisa chiba "} //1 2 3
db.Create(&dn)
dn=Ques{Genre: "cg", Question: "Whore the members of the Black knight?",Opt1: "Lalouch lamperouge" ,Opt2: "Suzaku kururugi",Opt3: "Kallen kozuki"} //1 3
db.Create(&dn)
dn=Ques{Genre: "cg", Question: "Which of these belong to the ruling family of the holy britanian empire ?",Opt1: "Suzaku kurungi",Opt2: "Lalouch lamperouge",Opt3: "Charles z Britainia"} //2 3
db.Create(&dn)
dn=Ques{Genre: "cg", Question: "Which of these are super powers in the world of code geas ?",Opt1: "Europian union",Opt2: "American empire ",Opt3: "Chinese Federation"} //1 3
db.Create(&dn)
dn=Ques{Genre: "usp", Question: " Who drafted the US Declaration of Independence ?",Opt1: " George Washington ",Opt2: " Thomas Jefferson ",Opt3: " Abraham Lincoln "} //2
db.Create(&dn)
dn=Ques{Genre: "usp", Question: "What is the nickname for the old regulations requiring racial segregation ?",Opt1: " Old crow laws ",Opt2: " Jim Crow laws ",Opt3: " Blue laws "} //2
db.Create(&dn)
dn=Ques{Genre: "usp", Question: "Which document does the Fourth of July commemorate ?",Opt1: "Declaration of Independence",Opt2: "Articles of confederation",Opt3: "Gettysburg address "} //1
db.Create(&dn)
dn=Ques{Genre: "usp", Question: "What kind of govt does the United States have ?",Opt1: "Democracy",Opt2: "Republic",Opt3: "Gerontocracy"} //2
db.Create(&dn)
dn=Ques{Genre: "usp", Question: "Which of the following have been the US vice presidents ?",Opt1: " Mike Pence ",Opt2: " Joe Biden ",Opt3: " Dick Cheney "} //2
db.Create(&dn)
dn=Ques{Genre: "usp", Question: "Which of these have been the Us Chief Justice ?",Opt1: " John Roberts ",Opt2: " George Clooney ",Opt3: " John Jay "} //2
db.Create(&dn)
dn=Ques{Genre: "usp", Question: " The states in which marijuana is legal ?",Opt1: "Okhlahoma",Opt2: "California",Opt3: "Washington DC "} //1
db.Create(&dn)
dn=Ques{Genre: "usp", Question: "Who among these have been US Presidents ?",Opt1: "George Bush",Opt2: "Alama",Opt3: "Barack Obama"} //2
db.Create(&dn)*/
r := gin.Default()
r.GET("/view", GetPeople)
r.DELETE("/people/:username", DeletePerson)
r.POST("/login", LoginFunc)
r.POST("/register", RegisterFunc)
r.GET("/LoggedIn/anime",AnimeScore)
r.GET("/LoggedIn/anime/deathnote",DNFunc)
r.GET("/LoggedIn/anime/deathnote/score",DNScoreFun)
r.POST("/LoggedIn/anime/deathnote/score",DNScoreFunc)
r.GET("/LoggedIn/anime/codegeass",CGFunc)
r.GET("/LoggedIn/anime/codegeass/score",CGScoreFun)
r.POST("/LoggedIn/anime/codegeass/score",CGScoreFunc)
r.GET("/LoggedIn/politics",PoliticsScore)
r.GET("/LoggedIn/politics/indian",IPFunc)
r.GET("/LoggedIn/politics/indian/score",IPScoreFun)
r.POST("/LoggedIn/politics/indian/score",IPScoreFunc)
r.GET("/LoggedIn/politics/us",USFunc)
r.GET("/LoggedIn/politics/us/score",USScoreFun)
r.POST("/LoggedIn/politics/us/score",USScoreFunc)
r.GET("/scoreboard/anime",AnimeBoard)
r.GET("/scoreboard/politics",PoliticsBoard)
r.Use((cors.Default()))
r.Run(":8080")
}
func DeletePerson(c *gin.Context) {
username := c.Params.ByName("username")
var person Login
d := db.Where("username = ?", username).Delete(&person)
fmt.Println(d)
c.Header("access-control-allow-origin", "*")
c.JSON(200, gin.H{"username #" + username: "deleted"})
}
func GetPeople(c *gin.Context) {
var people []Login
if err := db.Find(&people).Error; err != nil {
c.AbortWithStatus(404)
fmt.Println(err)
} else {
c.Header("access-control-allow-origin", "*") // Why am I doing this? Find out. Try running with this line commented
c.JSON(200, people)
}
}
func RegisterFunc(c *gin.Context) {
var login Login
c.BindJSON(&login)
d:=db.Where("username = ?",login.Username).Find(&login).RecordNotFound()
if d {
high:= HighScores{Username: login.Username, DN: 0, CG: 0, IP:0, USP:0}
db.Create(&login)
db.Create(&high)
c.Header("access-control-allow-origin", "*") // Why am I doing this? Find out. Try running with this line commented
c.JSON(200, login)
} else {
c.Header("access-control-allow-origin", "*") // Why am I doing this? Find out. Try running with this line commented
fmt.Println("Already Exits")
c.AbortWithStatus(404)
}
}
func LoginFunc(c *gin.Context) {
var login Login
c.BindJSON(&login)
a:=login.Username
b:=login.Password
var person Login
err:=db.Where("Username = ? AND Password = ?",a,b).Find(&person).Error
if err != nil{
c.AbortWithStatus(404)
fmt.Println(err)
} else {
current=a
fmt.Println("Logged In")
c.Header("access-control-allow-origin", "*") // Why am I doing this? Find out. Try running with this line commented
c.JSON(200,login)
}
}
func AnimeScore(c *gin.Context) {
c.Header("access-control-allow-origin", "*")
var a HighScores
db.Where("Username = ?",current).Find(&a)
c.JSON(200, a)
}
func PoliticsScore(c *gin.Context) {
c.Header("access-control-allow-origin", "*")
var a HighScores
db.Where("Username = ?",current).Find(&a)
c.JSON(200, a)
}
func DNFunc(c *gin.Context) {
var dn []Ques
err:=db.Where("Genre = ?","dn").Find(&dn).Error
if err != nil {
c.AbortWithStatus(404)
fmt.Println(err)
} else {
db.Where("Genre = ?","dn").Find(&dn)
c.Header("access-control-allow-origin", "*") // Why am I doing this? Find out. Try running with this line commented
c.JSON(200, dn)
}
}
func CGFunc(c *gin.Context) {
var cg []Ques
err:=db.Where("Genre = ?","cg").Find(&cg).Error
if err != nil {
c.AbortWithStatus(404)
fmt.Println(err)
} else {
db.Where("Genre = ?","cg").Find(&cg)
c.Header("access-control-allow-origin", "*") // Why am I doing this? Find out. Try running with this line commented
c.JSON(200, cg)
}
}
func IPFunc(c *gin.Context) {
var ip []Ques
err:=db.Where("Genre = ?","ip").Find(&ip).Error
if err != nil {
c.AbortWithStatus(404)
fmt.Println(err)
} else {
db.Where("Genre = ?","ip").Find(&ip)
c.Header("access-control-allow-origin", "*") // Why am I doing this? Find out. Try running with this line commented
c.JSON(200, ip)
}
}
func USFunc(c *gin.Context) {
var usp []Ques
err:=db.Where("Genre = ?","usp").Find(&usp).Error
if err != nil {
c.AbortWithStatus(404)
fmt.Println(err)
} else {
db.Where("Genre = ?","usp").Find(&usp)
c.Header("access-control-allow-origin", "*") // Why am I doing this? Find out. Try running with this line commented
c.JSON(200, usp)
}
}
func DNScoreFun(c *gin.Context) {
c.Header("access-control-allow-origin", "*")
var a Score
a.Score=dnscore
c.JSON(200, a)
}
func CGScoreFun(c *gin.Context) {
c.Header("access-control-allow-origin", "*")
var a Score
a.Score=cgscore
fmt.Println(a.Score)
c.JSON(200, a)
}
func IPScoreFun(c *gin.Context) {
c.Header("access-control-allow-origin", "*")
var a Score
a.Score=ipscore
c.JSON(200, a)
}
func USScoreFun(c *gin.Context) {
c.Header("access-control-allow-origin", "*")
var a Score
a.Score=uspscore
c.JSON(200, a)
}
func DNScoreFunc(c *gin.Context) {
var dn Answers
c.BindJSON(&dn)
dnscore=0
if dn.Ans1 == "2" {
dnscore++
}
if dn.Ans2 == "2" {
dnscore++
}
if dn.Ans3 == "1" {
dnscore++
}
if dn.Ans4 == "2" {
dnscore++
}
if dn.Ans5 == "1 2 3" {
dnscore++
}
if dn.Ans6 == "1 3" {
dnscore++
}
if dn.Ans7 == "2 3" {
dnscore++
}
if dn.Ans8 == "1 3" {
dnscore++
}
var hs HighScores;
db.Where("Username = ?",current).Find(&hs)
dnhscore=hs.DN
if dnscore > dnhscore {
dnhscore=dnscore
db.Model(&hs).Where("Username=?",current).Update("dn",dnhscore)
}
c.Header("access-control-allow-origin", "*") // Why am I doing this? Find out. Try running with this line commented
c.JSON(200,hs)
}
func CGScoreFunc(c *gin.Context) {
var dn Answers
c.BindJSON(&dn)
cgscore=0
if dn.Ans1 == "2" {
cgscore++
}
if dn.Ans2 == "2" {
cgscore++
}
if dn.Ans3 == "1" {
cgscore++
}
if dn.Ans4 == "2" {
cgscore++
}
if dn.Ans5 == "1 2 3" {
cgscore++
}
if dn.Ans6 == "1 3" {
cgscore++
}
if dn.Ans7 == "2 3" {
cgscore++
}
if dn.Ans8 == "1 3" {
cgscore++
}
var hs HighScores;
db.Where("Username = ?",current).Find(&hs)
cghscore=hs.CG
if cgscore > cghscore {
cghscore=cgscore
db.Model(&hs).Where("Username=?",current).Update("cg",cghscore)
}
c.Header("access-control-allow-origin", "*") // Why am I doing this? Find out. Try running with this line commented
c.JSON(200, dn)
}
func IPScoreFunc(c *gin.Context) {
var dn Answers
c.BindJSON(&dn)
ipscore=0
if dn.Ans1 == "2" {
ipscore++
}
if dn.Ans2 == "2" {
ipscore++
}
if dn.Ans3 == "1" {
ipscore++
}
if dn.Ans4 == "2" {
ipscore++
}
if dn.Ans5 == "1 2 3" {
ipscore++
}
if dn.Ans6 == "1 3" {
ipscore++
}
if dn.Ans7 == "2 3" {
ipscore++
}
if dn.Ans8 == "1 3" {
ipscore++
}
var hs HighScores;
db.Where("Username = ?",current).Find(&hs)
iphscore=hs.IP
if ipscore > iphscore {
iphscore=ipscore
db.Model(&hs).Where("Username=?",current).Update("ip",iphscore)
}
c.Header("access-control-allow-origin", "*") // Why am I doing this? Find out. Try running with this line commented
c.JSON(200, dn)
}
func USScoreFunc(c *gin.Context) {
var dn Answers
c.BindJSON(&dn)
uspscore=0
if dn.Ans1 == "2" {
uspscore++
}
if dn.Ans2 == "2" {
uspscore++
}
if dn.Ans3 == "1" {
uspscore++
}
if dn.Ans4 == "2" {
uspscore++
}
if dn.Ans5 == "1 2 3" {
uspscore++
}
if dn.Ans6 == "1 3" {
uspscore++
}
if dn.Ans7 == "2 3" {
uspscore++
}
if dn.Ans8 == "1 3" {
uspscore++
}
var hs HighScores;
db.Where("Username = ?",current).Find(&hs)
usphscore=hs.USP
if uspscore > usphscore {
usphscore=uspscore
db.Model(&hs).Where("Username=?",current).Update("usp",usphscore)
}
c.Header("access-control-allow-origin", "*") // Why am I doing this? Find out. Try running with this line commented
c.JSON(200, dn)
}
func AnimeBoard(c *gin.Context) {
c.Header("access-control-allow-origin", "*")
var a []HighScores
db.Find(&a)
c.JSON(200, a)
}
func PoliticsBoard(c *gin.Context) {
c.Header("access-control-allow-origin", "*")
var a []HighScores
db.Find(&a)
c.JSON(200, a)
}
|
package actions
import (
"errors"
"github.com/barrydev/api-3h-shop/src/factories"
"github.com/barrydev/api-3h-shop/src/model"
)
func GetOrderCoupon(orderId int64) (*model.Coupon, error) {
order, err := factories.FindOrderById(orderId)
if order == nil {
return nil, errors.New("order does not exists")
}
if order.CouponId == nil {
return nil, nil
}
coupon, err := factories.FindCouponById(*order.CouponId)
if err != nil {
return nil, err
}
return coupon, nil
}
|
package geoserver
import (
// "encoding/json"
)
const (
GeoWorkSpace = "titangrm"
)
type Workspaces struct {
Workspaces *NameObject `json:"workspace"`
}
type DataStores struct {
DataStore *DataStore `json:"dataStore"`
}
type DataStore struct {
Name string `json:"name"`
ConnectionParameters *ConnectionParameters `json:"connectionParameters"`
}
type ConnectionParameters struct {
Entrys []*Entry `json:"entry"`
}
type Entry struct {
Key string `json:"@key"`
Value string `json:"$"`
}
type FeatureTypeJson struct {
FeatureType *FeatureType `json:"featureType"`
}
type FeatureType struct {
Name string `json:"name,omitempty"`
NativeName string `json:"nativeName,omitempty"`
Srs string `json:"srs,omitempty"`
NativeBoundingBox *NativeBBox `json:"nativeBoundingBox,omitempty"`
LatLonBoundingBox *WGS84BBox `json:"latLonBoundingBox,omitempty"`
}
type WGS84BBox struct {
Minx float64 `json:"minx,omitempty"`
Maxx float64 `json:"maxx,omitempty"`
Miny float64 `json:"miny,omitempty"`
Maxy float64 `json:"maxy,omitempty"`
// Crs *Crs `json:"crs,omitempty"`
}
type Crs struct {
Class string `json:"@class,omitempty"`
Value string `json:"$,omitempty"`
}
type NativeBBox struct {
Minx float64 `json:"minx,omitempty"`
Maxx float64 `json:"maxx,omitempty"`
Miny float64 `json:"miny,omitempty"`
Maxy float64 `json:"maxy,omitempty"`
// Crs string `json:"crs,omitempty"`
}
type CoverageStores struct {
CoverageStore *CoverageStore `json:"coverageStore"`
}
type CoverageStore struct {
Name string `json:"name"`
Type string `json:"type"`
Url string `json:"url"`
Enabled bool `json:"enabled"`
Workspaces *NameObject `json:"workspace"`
}
type CoverageJson struct {
Coverage *CoverageInfo `json:"coverage"`
}
type CoverageInfo struct {
Name string `json:"name"`
NativeName string `json:"nativeName"`
Srs string `json:"srs,omitempty"`
NativeBoundingBox *NativeBBox `json:"nativeBoundingBox,omitempty"`
LatLonBoundingBox *WGS84BBox `json:"latLonBoundingBox,omitempty"`
}
type NameObject struct {
Name string `json:"name"`
}
type Layer struct {
Name string `json:"name,omitempty"`
DefaultStyle *NameObject `json:"defaultStyle,omitempty"`
Resource *Resource `json:"resource,omitempty"`
Styles *StylesStruct `json:"styles,omitempty"`
}
type Resource struct {
Class string `json:"@class,omitempty"`
Name string `json:"name,omitempty"`
Href string `json:"href,omitempty"`
}
type LayerJson struct {
Layer *Layer `json:"layer"`
}
type Style struct {
Name string `json:"name,omitempty"`
Href string `json:"href,omitempty"`
Pic string `json:"pic,omitempty"`
Title string `json:"title,omitempty"`
Sld string `json:"sld,omitempty"`
}
type StylesStruct struct {
Styles []*Style `json:"style"`
}
type StylesJson struct {
Styles *StylesStruct `json:"styles"`
}
|
package quizzee
import "strings"
type Answer struct {
Text string `json:"text"`
CroppedText string `json:"cropped_text"`
Words []string `json:"words"`
Keys []string `json:"keys"`
}
func NewAnswer(text string) *Answer {
return &Answer{
Text: text,
}
}
func (a *Answer) Parse() error {
a.CroppedText = strings.TrimSpace(strings.Replace(a.Text,
":", "", 1))
a.Words = cws.Tokenize(a.CroppedText)
a.Keys = make([]string, 0, len(a.Words))
for _, w := range a.Words {
if len(w) > 1 {
a.Keys = append(a.Keys, w)
}
}
return nil
}
func (a Answer) Score(s string) (count float64) {
count = float64(strings.Count(s, a.CroppedText))
for _, k := range a.Keys {
count += float64(strings.Count(s, k))
}
return
}
type Answers struct {
Answers []*Answer
Scores []float64
}
func NewAnswers(texts []string) *Answers {
a := &Answers{
Answers: make([]*Answer, 0, len(texts)),
}
for _, text := range texts {
if text != "" {
a.Answers = append(a.Answers, NewAnswer(text))
}
}
a.Scores = make([]float64, len(a.Answers))
return a
}
func (a Answers) Size() int {
return len(a.Answers)
}
func (a *Answers) Parse() (err error) {
for _, ans := range a.Answers {
if err = ans.Parse(); err != nil {
return
}
}
return
}
func (a *Answers) Score(s string, factor float64) {
if s != "" && factor > 0 {
for i, ans := range a.Answers {
a.Scores[i] += factor * ans.Score(s)
}
}
}
func (a *Answers) Rates() []float64 {
var sum float64
for _, score := range a.Scores {
sum += score
}
rate := make([]float64, len(a.Scores))
for i, score := range a.Scores {
rate[i] = score / sum
}
return rate
}
|
package main
import (
"fmt"
"github.com/jackytck/projecteuler/tools"
)
func solve() int {
low := 2
up := 354294 // 9**5 * 6
var sum int
for i := low; i <= up; i++ {
if int(tools.DigitSum(i, 5).Int64()) == i {
sum += i
}
}
return sum
}
func main() {
fmt.Println(solve())
}
// Sum of all the numbers that can be written as the sum of fifth powers of their digits.
// Note:
// If the desired number has seven digits, but 9**5 * 7 = 413343 has only 6 digits.
// So it could not have seven digits. Same for eight, nine, ... digits of number.
// So a loose upper searching bound is 9**5 * 6 = 354294.
|
package san_test
import (
"net"
"testing"
"github.com/IPA-CyberLab/kmgm/san"
)
func TestAdd(t *testing.T) {
var ns san.Names
if err := ns.Add("192.168.0.1"); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if len(ns.IPAddrs) != 1 {
t.Fatalf("unexpected")
}
if err := ns.Add("example.com"); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if len(ns.IPAddrs) != 1 {
t.Fatalf("unexpected")
}
if len(ns.DNSNames) != 1 {
t.Fatalf("unexpected")
}
if err := ns.Add("example.com"); err != nil {
t.Fatalf("unexpected err: %v", err)
}
if len(ns.DNSNames) != 1 {
t.Fatalf("unexpected")
}
}
func TestPunycode(t *testing.T) {
var ns san.Names
if err := ns.Add("日本語.example"); err != nil {
t.Fatalf("unexpected: %v", err)
}
if len(ns.DNSNames) != 1 {
t.Fatalf("unexpected")
}
if ns.DNSNames[0] != "xn--wgv71a119e.example" {
t.Fatalf("unexpected: %q", ns.DNSNames[0])
}
}
func TestForThisHost_IPAddr(t *testing.T) {
ns := san.ForThisHost("192.168.0.100:12345")
if len(ns.IPAddrs) != 1 {
t.Fatalf("unexpected len: %d", len(ns.IPAddrs))
}
exp := net.ParseIP("192.168.0.100")
if !ns.IPAddrs[0].Equal(exp) {
t.Fatalf("unexpected ip: %v", ns.IPAddrs[0])
}
}
func TestForThisHost_0000(t *testing.T) {
ns := san.ForThisHost("0.0.0.0:12345")
if len(ns.IPAddrs) == 0 {
t.Fatalf("unexpected len: %d", len(ns.IPAddrs))
}
inv := net.ParseIP("192.168.0.100")
for _, ip := range ns.IPAddrs {
if ip.Equal(inv) {
t.Fatalf("unexpected ip: %v", ip)
}
}
}
func TestForThisHost_Empty(t *testing.T) {
ns := san.ForThisHost(":12345")
if len(ns.IPAddrs) == 0 {
t.Fatalf("unexpected len: %d", len(ns.IPAddrs))
}
inv := net.ParseIP("192.168.0.100")
for _, ip := range ns.IPAddrs {
if ip.Equal(inv) {
t.Fatalf("unexpected ip: %v", ip)
}
}
}
|
package responses
type (
UserResponse struct {
Name string `json:"name"`
AvatarURL string `json:"avatar_url"`
}
)
func NewUserResponse(name, avatarURL string) UserResponse {
return UserResponse{
Name: name,
AvatarURL: avatarURL,
}
}
|
package main
import (
"fmt"
"log"
"net/http"
)
func Root(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "")
}
func Health(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "{\"result\": \"OK\"}")
}
func main() {
http.HandleFunc("/", Root)
http.HandleFunc("/health", Health)
log.Fatal(http.ListenAndServe(":8000", nil))
}
|
package main
import (
"log"
"net/http"
"github.com/michaldziurowski/tech-challenge-time/server/timetracking/infrastructure"
)
func corsHandler(h http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "*")
h.ServeHTTP(w, r)
}
}
func main() {
timeTrackingHandler := infrastructure.HttpHandler()
log.Println("The timetracking server is ON : http://localhost:8080")
log.Fatal(http.ListenAndServe(":8080", corsHandler(timeTrackingHandler)))
}
|
package protocol
type PingCmd struct {
*Cmd
}
func (c *PingCmd) Deal() []byte {
return Pong
}
func (c *PingCmd) paramInit() {
}
|
package dbi
import (
"database/sql"
"fmt"
"log"
"strings"
"time"
"github.com/bingoohuang/pump/util"
"github.com/bingoohuang/gou/str"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
)
// Batcher ...
type Batcher interface {
// GetBatchNum returns the batch num
GetBatchNum() int
// AddRow adds a row to batcher and execute when rows accumulated to the batch num.
AddRow(colValues []interface{}) error
// Complete completes the left rows that less than batch num.
Complete() (int, error)
}
// InsertBatcher ...
type InsertBatcher struct {
batchNum int
columnCount int
rowsCount int
batchExecuted int
rows []interface{}
db *sql.DB
batchSQL string
completeSQL func() string
batchOp func(int)
sleep time.Duration
verbose int
}
// NewInsertBatch ...
func NewInsertBatch(table string, columnNames []string, batchNum int, db *sql.DB,
batchOp func(int), verbose, rows int,
) *InsertBatcher {
b := &InsertBatcher{batchNum: batchNum, db: db, columnCount: len(columnNames)}
b.rows = make([]interface{}, 0, b.batchNum*b.columnCount)
bind := "(" + str.Repeat("?", ",", b.columnCount) + ")"
s := "insert into " + table + "(" + strings.Join(columnNames, ",") + ") values"
b.batchSQL = s + str.Repeat(bind, ",", batchNum)
if verbose > 0 && batchNum >= rows {
logrus.Infof("batchSQL:%s", util.Abbr(b.batchSQL, verbose, 500))
}
b.completeSQL = func() string { return s + str.Repeat(bind, ",", b.rowsCount) }
b.batchOp = batchOp
b.verbose = verbose
b.setSleepDuration()
return b
}
// GetBatchNum returns the batch num.
func (b InsertBatcher) GetBatchNum() int { return b.batchNum }
func (b *InsertBatcher) setSleepDuration() {
sleepDuration := viper.GetString("sleep")
if sleepDuration == "" {
return
}
var err error
b.sleep, err = time.ParseDuration(sleepDuration)
if err != nil {
log.Panicf("fail to parse sleep %s, error %v", sleepDuration, err)
}
}
// AddRow adds a row to batcher and execute when rows accumulated to the batch num.
func (b *InsertBatcher) AddRow(colValues []interface{}) error {
b.rowsCount++
b.rows = append(b.rows, colValues...)
if b.rowsCount == b.batchNum {
if err := b.executeBatch(b.batchSQL); err != nil {
return err
}
}
return nil
}
// Complete completes the left rows that less than batch num.
func (b *InsertBatcher) Complete() (int, error) {
if b.rowsCount <= 0 {
return 0, nil
}
if err := b.executeBatch(b.completeSQL()); err != nil {
return 0, err
}
return b.rowsCount, nil
}
func (b *InsertBatcher) executeBatch(sql string) error {
if b.batchExecuted > 0 && b.sleep > 0 {
time.Sleep(b.sleep)
}
if b.verbose > 0 {
logrus.Info(util.Abbr(fmt.Sprintf("values:%v", b.rows), b.verbose, 500))
}
if _, err := b.db.Exec(sql, b.rows...); err != nil {
b.resetBatcherRows()
return err
}
b.batchExecuted++
b.batchOp(b.rowsCount)
b.resetBatcherRows()
return nil
}
func (b *InsertBatcher) resetBatcherRows() {
b.rowsCount = 0
b.rows = b.rows[0:0]
}
|
package main
import (
"strings"
"bufio"
"os"
"fmt"
)
const inputPath = "input.txt"
func main() {
//import puzzle input into a 2d slice
g := makeGrid(1001)
tmp := parseInput(inputPath)
//padding
for i,j := (len(g)/2 - len(tmp)/2),0; j < len(tmp);i,j = i+1,j+1 {
for k,l := (len(g)/2 - len(tmp)/2),0; l < len(tmp); k,l = k+1,l+1 {
g[i][k] = tmp[j][l]
}
}
count := 0
d := 0
x,y := len(g) / 2, len(g)/2
for i := 0; i < 10000000; i++ {
if g[y][x] == 0 {
d--
if d == -1 {
d = 3
}
} else if g[y][x] == 2 {
d = (d+1) % 4
} else if g[y][x] == 3 {
d = (d+2) % 4
}
g[y][x] = (g[y][x]+1) % 4
if g[y][x] == 2 {
count++
}
x,y = move(x,y,d)
}
fmt.Println(count)
}
func move(x,y,d int) (_,_ int) {
switch d {
case 0: y--
break
case 1: x++
break
case 2: y++
break
case 3: x--
break
default:
panic("Unknown Direction")
}
return x,y
}
func parseInput(path string) (input [][]int) {
file,_ := os.Open(path)
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := strings.Split(scanner.Text(),"")
tmp := make([]int,0)
for i := range line {
if line[i] == "." {
tmp = append(tmp,0)
} else {
tmp = append(tmp,2)
}
}
input = append(input,tmp)
}
return
}
func makeGrid(size int) (g [][]int) {
g = make([][]int,size)
for i := range g {
g[i] = make([]int, size)
}
return
}
|
// Copyright 2020 Ant Group. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
package tests
import (
"fmt"
"os"
"os/exec"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
var registryPort = 5051
func run(t *testing.T, cmd string, ignoreStatus bool) {
_cmd := exec.Command("sh", "-c", cmd)
_cmd.Stdout = os.Stdout
_cmd.Stderr = os.Stderr
err := _cmd.Run()
if !ignoreStatus {
assert.Nil(t, err)
}
}
func runWithOutput(t *testing.T, cmd string) string {
_cmd := exec.Command("sh", "-c", cmd)
_cmd.Stderr = os.Stderr
output, err := _cmd.Output()
assert.Nil(t, err)
return string(output)
}
type Registry struct {
id string
host string
}
func NewRegistry(t *testing.T) *Registry {
containerID := runWithOutput(t, fmt.Sprintf("docker run -p %d:5000 --rm -d registry:2", registryPort))
time.Sleep(time.Second * 2)
return &Registry{
id: containerID,
host: fmt.Sprintf("localhost:%d", registryPort),
}
}
func (registry *Registry) Destory(t *testing.T) {
run(t, fmt.Sprintf("docker rm -f %s", registry.id), true)
}
func (registry *Registry) Build(t *testing.T, source string) {
run(t, fmt.Sprintf("docker rmi -f %s/%s", registry.Host(), source), true)
run(t, fmt.Sprintf("docker build -t %s/%s ./texture/%s", registry.Host(), source, source), false)
run(t, fmt.Sprintf("docker push %s/%s", registry.Host(), source), false)
}
func (registry *Registry) Host() string {
return registry.host
}
|
package main
import (
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
)
func main() {
router := gin.Default()
router.GET("/tasks", ListTasks)
router.POST("/tasks", NewTask)
router.PUT("/tasks/:id", UpdateTask)
router.Use(cors.Default())
router.Run(":9999")
}
|
package main
import (
"errors"
"github.com/pkgz/logg"
"github.com/spf13/cobra"
"log"
"os"
"strings"
)
var out string
var link string
var debug bool
var rootCmd = &cobra.Command{
Use: "AOSDownloader",
Short: "Apple OpenSource download tool",
Version: "0.0.1",
PreRunE: func(cmd *cobra.Command, args []string) error {
if debug {
logg.DebugMode()
}
if link == "" && len(args) > 0 {
link = args[0]
}
if link == "" {
return errors.New("please provide project url")
}
if out == "" && len(args) >= 2 {
out = args[1]
} else if out == "" && len(args) == 1 {
out = strings.Replace(link, "https://opensource.apple.com/source/", "", 1)
out = strings.Split(out, "/")[0]
}
if out == "" {
return errors.New("please provide destination path")
}
if !strings.Contains(link, "https://opensource.apple.com/source") {
return errors.New("project url must contain https://opensource.apple.com/source")
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
log.Printf("[INFO] Fetching project %s...", link)
links, n, err := parseURI(strings.TrimSuffix(link, "/"), "/")
if err != nil {
log.Printf("[ERROR] parse: %v", err)
return err
}
log.Printf("[INFO] Detect %d files in project", n)
if err := download(links, out); err != nil {
log.Printf("[ERROR] download: %v", err)
return err
}
log.Printf("[INFO] Project successfully downloaded to `%s`", out)
return nil
},
}
func init() {
logg.NewGlobal(os.Stdout)
logg.SetFlags(0)
rootCmd.Flags().StringVarP(&link, "url", "u", "", "url to project which you want to download")
rootCmd.Flags().StringVarP(&out, "out", "o", "", "destination path for project")
rootCmd.Flags().BoolVarP(&debug, "debug", "d", false, "debug mode")
}
func main() {
if err := rootCmd.Execute(); err != nil {
log.Print(err)
os.Exit(1)
}
}
|
package charts
import (
"github.com/go-echarts/go-echarts/v2/opts"
"github.com/go-echarts/go-echarts/v2/render"
"github.com/go-echarts/go-echarts/v2/types"
)
// Bar3D represents a 3D bar chart.
type Bar3D struct {
Chart3D
}
// Type returns the chart type.
func (*Bar3D) Type() string { return types.ChartBar3D }
// NewBar3D creates a new 3D bar chart.
func NewBar3D() *Bar3D {
c := &Bar3D{}
c.initBaseConfiguration()
c.Renderer = render.NewChartRender(c, c.Validate)
c.initChart3D()
return c
}
// AddSeries adds the new series.
func (c *Bar3D) AddSeries(name string, data []opts.Chart3DData, options ...SeriesOpts) *Bar3D {
c.addSeries(types.ChartBar3D, name, data, options...)
return c
}
|
package main
func main() {
}
var (
dx = []int{1, 0, 0, -1}
dy = []int{0, 1, -1, 0}
)
func floodFill(image [][]int, sr int, sc int, newColor int) [][]int {
currColor := image[sr][sc]
if currColor != newColor {
dfs(image, sr, sc, currColor, newColor)
}
return image
}
func dfs(image [][]int, x, y, color, newColor int) {
if image[x][y] == color {
image[x][y] = newColor
for i := 0; i < 4; i++ {
mx, my := x+dx[i], y+dy[i]
if mx >= 0 && mx < len(image) && my >= 0 && my < len(image[0]) {
dfs(image, mx, my, color, newColor)
}
}
}
}
|
package main
import (
"encoding/json"
"mysql_byroad/model"
log "github.com/Sirupsen/logrus"
"github.com/nsqio/go-nsq"
)
type MessageHandler struct {
}
func (h *MessageHandler) HandleMessage(msg *nsq.Message) error {
log.Debug(string(msg.Body))
evt := new(model.NotifyEvent)
err := json.Unmarshal(msg.Body, evt)
evt.RetryCount = int(msg.Attempts) - 1
ret, err := sendClient.SendMessage(evt)
log.Debugf("send message ret %s, error: %v", ret, err)
if !isSuccessSend(ret) {
var reason string
if err != nil {
reason = err.Error()
} else {
reason = ret
}
handleAlert(evt, reason)
sendClient.LogSendError(evt, reason)
msg.RequeueWithoutBackoff(-1)
}
return nil
}
|
package binance
import (
"testing"
"github.com/stretchr/testify/suite"
)
type tickerServiceTestSuite struct {
baseTestSuite
}
func TestTickerService(t *testing.T) {
suite.Run(t, new(tickerServiceTestSuite))
}
func (s *tickerServiceTestSuite) TestListBookTickers() {
data := []byte(`[
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
},
{
"symbol": "ETHBTC",
"bidPrice": "0.07946700",
"bidQty": "9.00000000",
"askPrice": "100000.00000000",
"askQty": "1000.00000000"
}
]`)
s.mockDo(data, nil)
defer s.assertDo()
s.assertReq(func(r *request) {
e := newRequest()
s.assertRequestEqual(e, r)
})
tickers, err := s.client.NewListBookTickersService().Do(newContext())
r := s.r()
r.NoError(err)
r.Len(tickers, 2)
e1 := &BookTicker{
Symbol: "LTCBTC",
BidPrice: "4.00000000",
BidQuantity: "431.00000000",
AskPrice: "4.00000200",
AskQuantity: "9.00000000",
}
e2 := &BookTicker{
Symbol: "ETHBTC",
BidPrice: "0.07946700",
BidQuantity: "9.00000000",
AskPrice: "100000.00000000",
AskQuantity: "1000.00000000",
}
s.assertBookTickerEqual(e1, tickers[0])
s.assertBookTickerEqual(e2, tickers[1])
}
func (s *tickerServiceTestSuite) TestSingleBookTicker() {
data := []byte(`{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
}`)
s.mockDo(data, nil)
defer s.assertDo()
symbol := "LTCBTC"
s.assertReq(func(r *request) {
e := newRequest().setParam("symbol", symbol)
s.assertRequestEqual(e, r)
})
tickers, err := s.client.NewListBookTickersService().Symbol("LTCBTC").Do(newContext())
r := s.r()
r.NoError(err)
r.Len(tickers, 1)
e := &BookTicker{
Symbol: "LTCBTC",
BidPrice: "4.00000000",
BidQuantity: "431.00000000",
AskPrice: "4.00000200",
AskQuantity: "9.00000000",
}
s.assertBookTickerEqual(e, tickers[0])
}
func (s *tickerServiceTestSuite) assertBookTickerEqual(e, a *BookTicker) {
r := s.r()
r.Equal(e.Symbol, a.Symbol, "Symbol")
r.Equal(e.BidPrice, a.BidPrice, "BidPrice")
r.Equal(e.BidQuantity, a.BidQuantity, "BidQuantity")
r.Equal(e.AskPrice, a.AskPrice, "AskPrice")
r.Equal(e.AskQuantity, a.AskQuantity, "AskQuantity")
}
func (s *tickerServiceTestSuite) TestListPrices() {
data := []byte(`[
{
"symbol": "LTCBTC",
"price": "4.00000200"
},
{
"symbol": "ETHBTC",
"price": "0.07946600"
}
]`)
s.mockDo(data, nil)
defer s.assertDo()
s.assertReq(func(r *request) {
e := newRequest()
s.assertRequestEqual(e, r)
})
prices, err := s.client.NewListPricesService().Do(newContext())
r := s.r()
r.NoError(err)
r.Len(prices, 2)
e1 := &SymbolPrice{
Symbol: "LTCBTC",
Price: "4.00000200",
}
e2 := &SymbolPrice{
Symbol: "ETHBTC",
Price: "0.07946600",
}
s.assertSymbolPriceEqual(e1, prices[0])
s.assertSymbolPriceEqual(e2, prices[1])
}
func (s *tickerServiceTestSuite) TestListPricesForMultipleSymbols() {
data := []byte(`[
{
"symbol": "LTCBTC",
"price": "4.00000200"
},
{
"symbol": "ETHUSDT",
"price": "2856.76"
}
]`)
s.mockDo(data, nil)
defer s.assertDo()
s.assertReq(func(r *request) {
e := newRequest()
s.assertRequestEqual(e, r)
})
symbol1, symbol2 := "ETHUSDT", "LTCBTC"
symbols := make([]string, 2)
symbols[0] = symbol1
symbols[1] = symbol2
s.assertReq(func(r *request) {
e := newRequest().setParam("symbols", `["ETHUSDT","LTCBTC"]`)
s.assertRequestEqual(e, r)
})
prices, err := s.client.NewListPricesService().Symbols(symbols).Do(newContext())
r := s.r()
r.NoError(err)
r.Len(prices, 2)
e1 := &SymbolPrice{
Symbol: "LTCBTC",
Price: "4.00000200",
}
e2 := &SymbolPrice{
Symbol: "ETHUSDT",
Price: "2856.76",
}
s.assertSymbolPriceEqual(e1, prices[0])
s.assertSymbolPriceEqual(e2, prices[1])
}
func (s *tickerServiceTestSuite) TestListSinglePrice() {
data := []byte(`{
"symbol": "LTCBTC",
"price": "4.00000200"
}`)
s.mockDo(data, nil)
defer s.assertDo()
symbol := "LTCBTC"
s.assertReq(func(r *request) {
e := newRequest().setParam("symbol", symbol)
s.assertRequestEqual(e, r)
})
prices, err := s.client.NewListPricesService().Symbol(symbol).Do(newContext())
r := s.r()
r.NoError(err)
r.Len(prices, 1)
e1 := &SymbolPrice{
Symbol: "LTCBTC",
Price: "4.00000200",
}
s.assertSymbolPriceEqual(e1, prices[0])
}
func (s *tickerServiceTestSuite) assertSymbolPriceEqual(e, a *SymbolPrice) {
r := s.r()
r.Equal(e.Price, a.Price, "Price")
r.Equal(e.Symbol, a.Symbol, "Symbol")
}
func (s *tickerServiceTestSuite) TestPriceChangeStats() {
data := []byte(`{
"symbol": "BNBBTC",
"priceChange": "-94.99999800",
"priceChangePercent": "-95.960",
"weightedAvgPrice": "0.29628482",
"prevClosePrice": "0.10002000",
"lastPrice": "4.00000200",
"lastQty": "200.00000000",
"bidPrice": "4.00000000",
"askPrice": "4.00000200",
"openPrice": "99.00000000",
"highPrice": "100.00000000",
"lowPrice": "0.10000000",
"volume": "8913.30000000",
"openTime": 1499783499040,
"closeTime": 1499869899040,
"firstId": 28385,
"lastId": 28460,
"count": 76,
"bidQty": "300.00000000",
"askQty": "400.00000000"
}`)
s.mockDo(data, nil)
defer s.assertDo()
symbol := "BNBBTC"
s.assertReq(func(r *request) {
e := newRequest().setParam("symbol", symbol)
s.assertRequestEqual(e, r)
})
stats, err := s.client.NewListPriceChangeStatsService().Symbol(symbol).Do(newContext())
r := s.r()
r.NoError(err)
r.Len(stats, 1)
e := &PriceChangeStats{
Symbol: "BNBBTC",
PriceChange: "-94.99999800",
PriceChangePercent: "-95.960",
WeightedAvgPrice: "0.29628482",
PrevClosePrice: "0.10002000",
LastPrice: "4.00000200",
LastQty: "200.00000000",
BidPrice: "4.00000000",
AskPrice: "4.00000200",
OpenPrice: "99.00000000",
HighPrice: "100.00000000",
LowPrice: "0.10000000",
Volume: "8913.30000000",
OpenTime: 1499783499040,
CloseTime: 1499869899040,
FristID: 28385,
LastID: 28460,
Count: 76,
BidQty: "300.00000000",
AskQty: "400.00000000",
}
s.assertPriceChangeStatsEqual(e, stats[0])
}
func (s *tickerServiceTestSuite) assertPriceChangeStatsEqual(e, a *PriceChangeStats) {
r := s.r()
r.Equal(e.Symbol, a.Symbol, "Symbol")
r.Equal(e.PriceChange, a.PriceChange, "PriceChange")
r.Equal(e.PriceChangePercent, a.PriceChangePercent, "PriceChangePercent")
r.Equal(e.WeightedAvgPrice, a.WeightedAvgPrice, "WeightedAvgPrice")
r.Equal(e.PrevClosePrice, a.PrevClosePrice, "PrevClosePrice")
r.Equal(e.LastPrice, a.LastPrice, "LastPrice")
r.Equal(e.LastQty, a.LastQty, "LastQty")
r.Equal(e.BidPrice, a.BidPrice, "BidPrice")
r.Equal(e.AskPrice, a.AskPrice, "AskPrice")
r.Equal(e.OpenPrice, a.OpenPrice, "OpenPrice")
r.Equal(e.HighPrice, a.HighPrice, "HighPrice")
r.Equal(e.LowPrice, a.LowPrice, "LowPrice")
r.Equal(e.Volume, a.Volume, "Volume")
r.Equal(e.OpenTime, a.OpenTime, "OpenTime")
r.Equal(e.CloseTime, a.CloseTime, "CloseTime")
r.Equal(e.FristID, a.FristID, "FristID")
r.Equal(e.LastID, a.LastID, "LastID")
r.Equal(e.Count, a.Count, "Count")
}
func (s *tickerServiceTestSuite) TestMultiplePriceChangeStats() {
data := []byte(`[{
"symbol": "BNBBTC",
"priceChange": "-94.99999800",
"priceChangePercent": "-95.960",
"weightedAvgPrice": "0.29628482",
"prevClosePrice": "0.10002000",
"lastPrice": "4.00000200",
"lastQty": "200.00000000",
"bidPrice": "4.00000000",
"askPrice": "4.00000200",
"openPrice": "99.00000000",
"highPrice": "100.00000000",
"lowPrice": "0.10000000",
"volume": "8913.30000000",
"quoteVolume": "15.30000000",
"openTime": 1499783499040,
"closeTime": 1499869899040,
"firstId": 28385,
"lastId": 28460,
"count": 76
},{
"symbol": "ETHBTC",
"priceChange": "-194.99999800",
"priceChangePercent": "-195.960",
"weightedAvgPrice": "10.29628482",
"prevClosePrice": "10.10002000",
"lastPrice": "14.00000200",
"lastQty": "1200.00000000",
"bidPrice": "14.00000000",
"askPrice": "14.00000200",
"openPrice": "199.00000000",
"highPrice": "1100.00000000",
"lowPrice": "10.10000000",
"volume": "18913.30000000",
"quoteVolume": "115.30000000",
"openTime": 1499783499041,
"closeTime": 1499869899041,
"firstId": 28381,
"lastId": 28461,
"count": 71
}]`)
s.mockDo(data, nil)
defer s.assertDo()
s.assertReq(func(r *request) {
e := newRequest().setParam("symbols", `["BNBBTC","ETHBTC"]`)
s.assertRequestEqual(e, r)
})
res, err := s.client.NewListPriceChangeStatsService().Symbols([]string{"BNBBTC", "ETHBTC"}).Do(newContext())
r := s.r()
r.NoError(err)
e := []*PriceChangeStats{
{
Symbol: "BNBBTC",
PriceChange: "-94.99999800",
PriceChangePercent: "-95.960",
WeightedAvgPrice: "0.29628482",
PrevClosePrice: "0.10002000",
LastPrice: "4.00000200",
BidPrice: "4.00000000",
AskPrice: "4.00000200",
OpenPrice: "99.00000000",
HighPrice: "100.00000000",
LowPrice: "0.10000000",
Volume: "8913.30000000",
QuoteVolume: "15.30000000",
OpenTime: 1499783499040,
CloseTime: 1499869899040,
FristID: 28385,
LastID: 28460,
Count: 76,
},
{
Symbol: "ETHBTC",
PriceChange: "-194.99999800",
PriceChangePercent: "-195.960",
WeightedAvgPrice: "10.29628482",
PrevClosePrice: "10.10002000",
LastPrice: "14.00000200",
BidPrice: "14.00000000",
AskPrice: "14.00000200",
OpenPrice: "199.00000000",
HighPrice: "1100.00000000",
LowPrice: "10.10000000",
Volume: "18913.30000000",
QuoteVolume: "115.30000000",
OpenTime: 1499783499041,
CloseTime: 1499869899041,
FristID: 28381,
LastID: 28461,
Count: 71,
},
}
s.assertListPriceChangeStatsEqual(e, res)
}
func (s *tickerServiceTestSuite) TestListPriceChangeStats() {
data := []byte(`[{
"symbol": "BNBBTC",
"priceChange": "-94.99999800",
"priceChangePercent": "-95.960",
"weightedAvgPrice": "0.29628482",
"prevClosePrice": "0.10002000",
"lastPrice": "4.00000200",
"lastQty": "200.00000000",
"bidPrice": "4.00000000",
"askPrice": "4.00000200",
"openPrice": "99.00000000",
"highPrice": "100.00000000",
"lowPrice": "0.10000000",
"volume": "8913.30000000",
"quoteVolume": "15.30000000",
"openTime": 1499783499040,
"closeTime": 1499869899040,
"firstId": 28385,
"lastId": 28460,
"count": 76
}]`)
s.mockDo(data, nil)
defer s.assertDo()
s.assertReq(func(r *request) {
e := newRequest()
s.assertRequestEqual(e, r)
})
res, err := s.client.NewListPriceChangeStatsService().Do(newContext())
r := s.r()
r.NoError(err)
e := []*PriceChangeStats{
{
Symbol: "BNBBTC",
PriceChange: "-94.99999800",
PriceChangePercent: "-95.960",
WeightedAvgPrice: "0.29628482",
PrevClosePrice: "0.10002000",
LastPrice: "4.00000200",
BidPrice: "4.00000000",
AskPrice: "4.00000200",
OpenPrice: "99.00000000",
HighPrice: "100.00000000",
LowPrice: "0.10000000",
Volume: "8913.30000000",
QuoteVolume: "15.30000000",
OpenTime: 1499783499040,
CloseTime: 1499869899040,
FristID: 28385,
LastID: 28460,
Count: 76,
},
}
s.assertListPriceChangeStatsEqual(e, res)
}
func (s *tickerServiceTestSuite) assertListPriceChangeStatsEqual(e, a []*PriceChangeStats) {
r := s.r()
for i := range e {
r.Equal(e[i].Symbol, a[i].Symbol, "Symbol")
r.Equal(e[i].PriceChange, a[i].PriceChange, "PriceChange")
r.Equal(e[i].PriceChangePercent, a[i].PriceChangePercent, "PriceChangePercent")
r.Equal(e[i].WeightedAvgPrice, a[i].WeightedAvgPrice, "WeightedAvgPrice")
r.Equal(e[i].PrevClosePrice, a[i].PrevClosePrice, "PrevClosePrice")
r.Equal(e[i].LastPrice, a[i].LastPrice, "LastPrice")
r.Equal(e[i].BidPrice, a[i].BidPrice, "BidPrice")
r.Equal(e[i].AskPrice, a[i].AskPrice, "AskPrice")
r.Equal(e[i].OpenPrice, a[i].OpenPrice, "OpenPrice")
r.Equal(e[i].HighPrice, a[i].HighPrice, "HighPrice")
r.Equal(e[i].LowPrice, a[i].LowPrice, "LowPrice")
r.Equal(e[i].Volume, a[i].Volume, "Volume")
r.Equal(e[i].OpenTime, a[i].OpenTime, "OpenTime")
r.Equal(e[i].CloseTime, a[i].CloseTime, "CloseTime")
r.Equal(e[i].FristID, a[i].FristID, "FristID")
r.Equal(e[i].LastID, a[i].LastID, "LastID")
r.Equal(e[i].Count, a[i].Count, "Count")
}
}
func (s *tickerServiceTestSuite) TestAveragePrice() {
data := []byte(`{
"mins": 5,
"price": "9.35751834"
}`)
s.mockDo(data, nil)
defer s.assertDo()
symbol := "LTCBTC"
s.assertReq(func(r *request) {
e := newRequest().setParam("symbol", symbol)
s.assertRequestEqual(e, r)
})
res, err := s.client.NewAveragePriceService().Symbol(symbol).Do(newContext())
r := s.r()
r.NoError(err)
e := &AvgPrice{
Mins: 5,
Price: "9.35751834",
}
s.assertAvgPrice(e, res)
}
func (s *tickerServiceTestSuite) assertAvgPrice(e, a *AvgPrice) {
s.r().Equal(e.Mins, a.Mins, "Mins")
s.r().Equal(e.Price, a.Price, "Price")
}
func (s *tickerServiceTestSuite) TestListSymbolTicker() {
data := []byte(`[
{
"symbol": "ETHBTC",
"priceChange": "0.00004700",
"priceChangePercent": "0.066",
"weightedAvgPrice": "0.07168666",
"openPrice": "0.07093500",
"highPrice": "0.07321800",
"lowPrice": "0.07054200",
"lastPrice": "0.07098200",
"volume": "86992.33370000",
"quoteVolume": "6236.18963157",
"openTime": 1659097380000,
"closeTime": 1659183780986,
"firstId": 359930693,
"lastId": 360209854,
"count": 279162
}
]`)
s.mockDo(data, nil)
defer s.assertDo()
symbol := "ETHBTC"
windowSize := "1m" // 1 minute
s.assertReq(func(r *request) {
e := newRequest().setParam("symbol", symbol).setParam("windowSize", windowSize)
s.assertRequestEqual(e, r)
})
res, err := s.client.NewListSymbolTickerService().Symbol(symbol).WindowSize(windowSize).Do(newContext())
r := s.r()
r.NoError(err)
e := make([]*SymbolTicker, 0)
e = append(e, &SymbolTicker{
Symbol: "ETHBTC",
PriceChange: "0.00004700",
PriceChangePercent: "0.066",
WeightedAvgPrice: "0.07168666",
OpenPrice: "0.07093500",
HighPrice: "0.07321800",
LowPrice: "0.07054200",
LastPrice: "0.07098200",
Volume: "86992.33370000",
QuoteVolume: "6236.18963157",
OpenTime: 1659097380000,
CloseTime: 1659183780986,
FirstId: 359930693,
LastId: 360209854,
Count: 279162,
})
s.assertSymbolTicker(e, res)
}
func (s *tickerServiceTestSuite) assertSymbolTicker(e, st []*SymbolTicker) {
for i := range e {
s.r().Equal(e[i].Symbol, st[i].Symbol, "Symbol")
s.r().Equal(e[i].PriceChange, st[i].PriceChange, "PriceChange")
s.r().Equal(e[i].PriceChangePercent, st[i].PriceChangePercent, "PriceChangePercent")
s.r().Equal(e[i].WeightedAvgPrice, st[i].WeightedAvgPrice, "WeightedAvgPrice")
s.r().Equal(e[i].OpenPrice, st[i].OpenPrice, "OpenPrice")
s.r().Equal(e[i].HighPrice, st[i].HighPrice, "HighPrice")
s.r().Equal(e[i].LowPrice, st[i].LowPrice, "LowPrice")
s.r().Equal(e[i].LastPrice, st[i].LastPrice, "LastPrice")
s.r().Equal(e[i].Volume, st[i].Volume, "Volume")
s.r().Equal(e[i].QuoteVolume, st[i].QuoteVolume, "QuoteVolume")
s.r().Equal(e[i].OpenTime, st[i].OpenTime, "OpenTime")
s.r().Equal(e[i].CloseTime, st[i].CloseTime, "CloseTime")
s.r().Equal(e[i].FirstId, st[i].FirstId, "FirstId")
s.r().Equal(e[i].LastId, st[i].LastId, "LastId")
s.r().Equal(e[i].Count, st[i].Count, "Count")
}
}
|
package main
import (
"log"
"net/http"
"golang.org/x/time/rate"
)
func main() {
limiter := rate.NewLimiter(1, 3)
http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {
if !limiter.Allow() {
writer.WriteHeader(http.StatusTooManyRequests)
writer.Write([]byte("too many request"))
return
}
log.Println("receiving http request")
writer.Write([]byte("ok"))
})
http.ListenAndServe(":8080", nil)
}
|
package abstractfactory
// SportMoterbike Concreate Implemenation of SportBike
type SportMoterbike struct {}
// NumWheels returns total number of wheels of SportMoterbike
func (m *SportMoterbike) NumWheels() int {
return 2
}
// NumSeats returns total num of seats in SportMoterbike
func (m *SportMoterbike) NumSeats() int {
return 1
}
// GetMoterbikeType Returns MoterbikeType i.e SportMoterbikeType
func (m *SportMoterbike) GetMoterbikeType() int {
return SportMoterbikeType
}
|
package main
import (
"errors"
"fmt"
)
type Employee struct {
ID int
FirstName string
LastName string
Address string
}
func main() {
employee, err := getInformation(1001)
if errors.Is(err, ErrNotFound) {
fmt.Printf("NOT FOUND: %v\n", err)
} else {
fmt.Print(employee)
}
}
var ErrNotFound = errors.New("Employee not found!")
func getInformation(id int) (*Employee, error) {
employee, err := apiCallEmployee(1000)
if err != nil {
return nil, ErrNotFound
}
return employee, err
}
func apiCallEmployee(id int) (*Employee, error) {
employee := Employee{LastName: "Doe", FirstName: "John"}
return &employee, nil
}
|
package kpatch
import (
"bytes"
"encoding/gob"
"io"
"os"
"github.com/spf13/afero"
)
var Fs = afero.NewOsFs()
func init() {
gob.Register(map[interface{}]interface{}{})
gob.Register([]interface{}{})
}
func deepCopy(m map[interface{}]interface{}) (map[interface{}]interface{}, error) {
var buf bytes.Buffer
var out map[interface{}]interface{}
enc := gob.NewEncoder(&buf)
dec := gob.NewDecoder(&buf)
errs := []error{enc.Encode(m), dec.Decode(&out)}
for _, err := range errs {
if err != nil {
return nil, err
}
}
return out, nil
}
func inputReaderFn(inputs []string) func() (io.Reader, error) {
current := 0
return func() (io.Reader, error) {
if current >= len(inputs) {
return nil, nil
}
input := inputs[current]
current++
if input == "-" {
return io.Reader(os.Stdin), nil
}
_, err := Fs.Stat(input)
if os.IsNotExist(err) {
return nil, err
}
return Fs.Open(input)
}
}
|
package entity
import "time"
type Task struct {
}
type TaskResult struct {
UUID int `json:"uuid"`
Type int `json:"type"`
Result interface{} `json:"result"`
Time time.Time `json:"time"`
}
|
package main
import (
"fmt"
"io"
"io/ioutil"
"net"
"path"
"strconv"
"strings"
"github.com/3Blades/go-sdk/client/projects"
cssh "golang.org/x/crypto/ssh"
)
// CreateSSHTunnels creates defined in db ssh tunnels
func CreateSSHTunnels(args *Args) error {
sshKeyAuth, err := getSSHKeyAuthMethod(args.ResourceDir)
if err != nil {
return err
}
cli := NewAPIClient(args.ApiRoot, args.ApiKey)
params := projects.NewProjectsServersSSHTunnelsListParams()
params.SetNamespace(args.Namespace)
params.SetProject(args.ProjectID)
params.SetServer(args.ServerID)
res, err := cli.Projects.ProjectsServersSSHTunnelsList(params, cli.AuthInfo)
if err != nil {
return err
}
for _, apiTunnel := range res.Payload {
splitEndpoint := strings.Split(*apiTunnel.Endpoint, ":")
tunnel := &sshTunnel{
local: &endpoint{
port: int(*apiTunnel.LocalPort),
host: "0.0.0.0",
},
server: &endpoint{
host: *apiTunnel.Host,
port: int(*apiTunnel.RemotePort),
},
remote: &endpoint{
host: splitEndpoint[0],
},
config: &cssh.ClientConfig{
User: *apiTunnel.Username,
Auth: []cssh.AuthMethod{sshKeyAuth},
},
}
tunnel.remote.port, err = strconv.Atoi(splitEndpoint[1])
if err != nil {
return err
}
go tunnel.start()
}
return nil
}
func getSSHKeyAuthMethod(resourceDir string) (cssh.AuthMethod, error) {
key, err := ioutil.ReadFile(path.Join(resourceDir, ".ssh", "id_rsa"))
if err != nil {
return nil, err
}
signer, err := cssh.ParsePrivateKey(key)
if err != nil {
return nil, err
}
return cssh.PublicKeys(signer), nil
}
type endpoint struct {
host string
port int
}
func (e *endpoint) String() string {
return fmt.Sprintf("%s:%d", e.host, e.port)
}
type sshTunnel struct {
local *endpoint
server *endpoint
remote *endpoint
config *cssh.ClientConfig
}
func (tunnel *sshTunnel) start() error {
listener, err := net.Listen("tcp", tunnel.local.String())
if err != nil {
return err
}
defer listener.Close()
for {
conn, err := listener.Accept()
if err != nil {
return err
}
go tunnel.forward(conn)
}
}
func (tunnel *sshTunnel) forward(localConn net.Conn) {
serverConn, err := cssh.Dial("tcp", tunnel.server.String(), tunnel.config)
handleError(err, "Server dial error")
remoteConn, err := serverConn.Dial("tcp", tunnel.remote.String())
handleError(err, "Remote dial error")
copyConn := func(writer, reader net.Conn) {
_, err := io.Copy(writer, reader)
handleError(err, "io.Copy error")
}
go copyConn(localConn, remoteConn)
go copyConn(remoteConn, localConn)
}
func handleError(err error, msg string) {
if err != nil {
logger.Fatalf("%s: %s\n", msg, err)
}
}
|
/*
Consider you have a hash function H which takes strings of length 2n and returns strings of length n and has the nice property that it is collision resistant, i.e. it is hard to find two different strings s≠s′ with the same hash H(s)=H(s′).
You would now like to build a new hash function H′ which takes strings of arbitrary length and maps them to strings of length n, while still being collision resistant.
Lucky for you, already in 1979 a method now known as the Merkle–Damgård construction was published which achieves exactly this.
The task of this challenge will be to implement this algorithm, so we'll first have a look at a formal description of the Merkle–Damgård construction,
before going through a step-by-step example which should show that the approach is simpler than it might appear at first.
Given some integer n>0, a hash function H as described above and an input string s of arbitrary length, the new hash function H′ does the following:
Set l=|s|, the length of s, and split s in chunks of length n, filling up the last chunk with trailing zeros if necessary. This yields m=⌈ln⌉ many chunks which are labeled c1,c2,…,cm.
Add a leading and a trailing chunk c0 and c[m+1], where c0 is a string consisting of n zeros and c[m+1] is n in binary, padded with leading zeros to length n.
Now iteratively apply H to the current chunk ci appended to the previous result r[i-1]: r[i]=H(r[i-1]c[i]), where r0=c0. (This step might be more clear after looking at the example below.)
The output of H′ is the final result r[m+1].
The Task
Write a program or function which takes as input a positive integer n, a hash function H as black box and a non-empty string s and returns the same result as H′ on the same inputs.
This is code-golf, so the shortest answer in each language wins.
Example
Let's say n=5, so our given hash function H takes strings of length 10 and returns strings of length 5.
Given an input of s="Programming Puzzles", we get the following chunks: s1="Progr", s2="ammin", s3="g Puz" and s4="zles0". Note that s4 needed to be padded to length 5 with one trailing zero.
c0="00000" is just a string of five zeros and c5="00101" is five in binary (101), padded with two leading zeros.
Now the chunks are combined with H:
r0=c0="00000"
r1=H(r0c1)=H("00000Progr")
r2=H(r1c2)=H(H("00000Progr")"ammin")
r3=H(r2c3)=H(H(H("00000Progr")"ammin")"g Puz")
r4=H(r3c4)=H(H(H(H("00000Progr")"ammin")"g Puz")"zles0")
r5=H(r4c5)=H(H(H(H(H("00000Progr")"ammin")"g Puz")"zles0")"00101")
r5 is our output.
Let's have a look how this output would look depending on some choices1 for H:
If H("0123456789")="13579", i.e. H just returns every second character, we get:
r1=H("00000Progr")="00Por"
r2=H("00Porammin")="0oamn"
r3=H("0oamng Puz")="omgPz"
r4=H("omgPzzles0")="mPze0"
r5=H("mPze000101")="Pe011"
So "Pe011" needs to be the output if such a H is given as black box function.
If H simply returns the first 5 chars of its input, the output of H′ is "00000". Similarly if H returns the last 5 chars, the output is "00101".
If H multiplies the character codes of its input and returns the first five digits of this number, e.g. H("PPCG123456")="56613", then H′("Programming Puzzles")="91579".
1 For simplicity, those H are actually not collision resistant, though this does not matter for testing your submission.
*/
package main
import (
"bytes"
"fmt"
"math/big"
"strings"
)
func main() {
assert(md(hash1, "Programming Puzzles", 5) == "Pe011")
assert(md(hash2, "Programming Puzzles", 5) == "00000")
assert(md(hash3, "Programming Puzzles", 5) == "00101")
assert(md(hash4, "Programming Puzzles", 5) == "91579")
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func md(h Hash, s string, n int) string {
l := roundup(len(s), n)
r := chunk(s, 0, n)
for i := 1; i <= l+1; i++ {
r = h(r + chunk(s, i, n))
if len(r) != n {
panic("length of current hash is not valid")
}
}
return r
}
func roundup(x, n int) int {
r := x % n
if r != 0 {
r = 1
}
return x/n + r
}
func chunk(s string, i, n int) string {
if i == 0 || i >= n {
return fmt.Sprintf("%0*b", n, i)
}
l := len(s)
a := (i - 1) * n
b := a + n
if a >= l {
return strings.Repeat("0", n)
}
if b > l {
b = l
}
r := s[a:b]
m := len(r)
if m < n {
r += strings.Repeat("0", n-m)
}
return r
}
func hash1(s string) string {
w := new(bytes.Buffer)
for i := 1; i < len(s); i += 2 {
w.WriteByte(s[i])
}
return w.String()
}
func hash2(s string) string {
n := len(s)
if n > 5 {
n = 5
}
return s[:n]
}
func hash3(s string) string {
l := len(s)
n := 5
if n > l {
n = l
}
return s[l-n:]
}
func hash4(s string) string {
x := big.NewInt(1)
for i := 0; i < len(s); i++ {
x.Mul(x, big.NewInt(int64(s[i])))
}
r := x.String()
if len(r) > 5 {
r = r[:5]
}
return r
}
type Hash func(string) string
|
// Package client provides common operations for files in cloud storage
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
*/
package client
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"strings"
"sync"
"time"
"github.com/NVIDIA/dfcpub/dfc"
"github.com/OneOfOne/xxhash"
)
var (
transport = &http.Transport{
Dial: (&net.Dialer{
Timeout: 60 * time.Second,
}).Dial,
TLSHandshakeTimeout: 600 * time.Second,
}
client = &http.Client{
Timeout: 600 * time.Second,
Transport: transport,
}
ProxyProto = "http"
ProxyIP = "localhost"
ProxyPort = 8080
RestAPIVersion = "v1"
RestAPIResource = "files"
)
type ReqError struct {
code int
message string
}
type BucketProps struct {
CloudProvider string
Versioning string
}
// Reader is the interface a client works with to read in data and send to a HTTP server
type Reader interface {
io.ReadCloser
io.Seeker
Open() (io.ReadCloser, error)
XXHash() string
Description() string
}
type bytesReaderCloser struct {
bytes.Reader
}
func (q *bytesReaderCloser) Close() error {
return nil
}
func (err ReqError) Error() string {
return err.message
}
func newReqError(msg string, code int) ReqError {
return ReqError{
code: code,
message: msg,
}
}
func Tcping(url string) (err error) {
addr := strings.TrimPrefix(url, "http://")
if addr == url {
addr = strings.TrimPrefix(url, "https://")
}
conn, err := net.Dial("tcp", addr)
if err == nil {
conn.Close()
}
return
}
func discardResponse(r *http.Response, err error, src string) (int64, error) {
var len int64
if err == nil {
if r.StatusCode >= http.StatusBadRequest {
return 0, fmt.Errorf("Bad status code from %s: http status %d", src, r.StatusCode)
}
bufreader := bufio.NewReader(r.Body)
if len, err = dfc.ReadToNull(bufreader); err != nil {
return 0, fmt.Errorf("Failed to read http response, err: %v", err)
}
} else {
return 0, fmt.Errorf("%s failed, err: %v", src, err)
}
return len, nil
}
func emitError(r *http.Response, err error, errch chan error) {
if err == nil || errch == nil {
return
}
if r != nil {
errObj := newReqError(err.Error(), r.StatusCode)
errch <- errObj
} else {
errch <- err
}
}
func Get(proxyurl, bucket string, keyname string, wg *sync.WaitGroup, errch chan error, silent bool, validate bool) (int64, error) {
var (
hash, hdhash, hdhashtype string
errstr string
)
if wg != nil {
defer wg.Done()
}
url := proxyurl + "/v1/files/" + bucket + "/" + keyname
r, err := http.Get(url)
defer func() {
if r != nil {
r.Body.Close()
}
}()
if validate && err == nil {
hdhash = r.Header.Get(dfc.HeaderDfcChecksumVal)
hdhashtype = r.Header.Get(dfc.HeaderDfcChecksumType)
if hdhashtype == dfc.ChecksumXXHash {
xx := xxhash.New64()
if hash, errstr = dfc.ComputeXXHash(r.Body, nil, xx); errstr != "" {
if errch != nil {
errch <- errors.New(errstr)
}
}
if hdhash != hash {
s := fmt.Sprintf("Header's hash %s doesn't match the file's %s \n", hdhash, hash)
if errch != nil {
errch <- errors.New(s)
}
} else {
if !silent {
fmt.Printf("Header's hash %s matches the file's %s \n", hdhash, hash)
}
}
}
}
len, err := discardResponse(r, err, fmt.Sprintf("GET (object %s from bucket %s)", keyname, bucket))
emitError(r, err, errch)
return len, err
}
func Del(proxyurl, bucket string, keyname string, wg *sync.WaitGroup, errch chan error, silent bool) (err error) {
if wg != nil {
defer wg.Done()
}
delurl := proxyurl + "/v1/files/" + bucket + "/" + keyname
if !silent {
fmt.Printf("DEL: %s\n", keyname)
}
req, httperr := http.NewRequest(http.MethodDelete, delurl, nil)
if httperr != nil {
err = fmt.Errorf("Failed to create new http request, err: %v", httperr)
emitError(nil, err, errch)
return err
}
r, httperr := client.Do(req)
if httperr != nil {
err = fmt.Errorf("Failed to delete file, err: %v", httperr)
emitError(nil, err, errch)
return err
}
defer func() {
r.Body.Close()
}()
_, err = discardResponse(r, err, "DELETE")
emitError(r, err, errch)
return err
}
func ListBucket(proxyurl, bucket string, injson []byte) (*dfc.BucketList, error) {
var (
url = proxyurl + "/v1/files/" + bucket
err error
request *http.Request
r *http.Response
)
if len(injson) == 0 {
r, err = client.Get(url)
} else {
request, err = http.NewRequest("GET", url, bytes.NewBuffer(injson))
if err == nil {
request.Header.Set("Content-Type", "application/json")
r, err = client.Do(request)
}
}
if err != nil {
return nil, err
}
if r != nil && r.StatusCode >= http.StatusBadRequest {
return nil, fmt.Errorf("List bucket %s failed, HTTP status %d", bucket, r.StatusCode)
}
defer func() {
r.Body.Close()
}()
var reslist = &dfc.BucketList{}
reslist.Entries = make([]*dfc.BucketEntry, 0, 1000)
b, err := ioutil.ReadAll(r.Body)
if err == nil {
err = json.Unmarshal(b, reslist)
if err != nil {
return nil, fmt.Errorf("Failed to json-unmarshal, err: %v [%s]", err, string(b))
}
} else {
return nil, fmt.Errorf("Failed to read json, err: %v", err)
}
return reslist, nil
}
func Evict(proxyurl, bucket string, fname string) error {
var (
req *http.Request
r *http.Response
injson []byte
err error
)
EvictMsg := dfc.ActionMsg{Action: dfc.ActEvict}
EvictMsg.Name = bucket + "/" + fname
injson, err = json.Marshal(EvictMsg)
if err != nil {
return fmt.Errorf("Failed to marshal EvictMsg: %v", err)
}
req, err = http.NewRequest("DELETE", proxyurl+"/v1/files/"+bucket+"/"+fname, bytes.NewBuffer(injson))
if err != nil {
return fmt.Errorf("Failed to create request: %v", err)
}
r, err = client.Do(req)
if r != nil {
r.Body.Close()
}
if err != nil {
return err
}
return nil
}
func doListRangeCall(proxyurl, bucket, action, method string, listrangemsg interface{}, wait bool) error {
var (
req *http.Request
r *http.Response
injson []byte
err error
)
actionMsg := dfc.ActionMsg{Action: action, Value: listrangemsg}
injson, err = json.Marshal(actionMsg)
if err != nil {
return fmt.Errorf("Failed to marhsal ActionMsg: %v", err)
}
req, err = http.NewRequest(method, proxyurl+"/v1/files/"+bucket+"/", bytes.NewBuffer(injson))
if err != nil {
return fmt.Errorf("Failed to create request: %v", err)
}
req.Header.Set("Content-Type", "application/json")
if wait {
r, err = client.Do(req)
} else {
r, err = client.Do(req)
}
if r != nil {
r.Body.Close()
}
return err
}
func PrefetchList(proxyurl, bucket string, fileslist []string, wait bool, deadline time.Duration) error {
rangeListMsgBase := dfc.RangeListMsgBase{Deadline: deadline, Wait: wait}
prefetchMsg := dfc.ListMsg{Objnames: fileslist, RangeListMsgBase: rangeListMsgBase}
return doListRangeCall(proxyurl, bucket, dfc.ActPrefetch, http.MethodPost, prefetchMsg, wait)
}
func PrefetchRange(proxyurl, bucket, prefix, regex, rng string, wait bool, deadline time.Duration) error {
prefetchMsgBase := dfc.RangeListMsgBase{Deadline: deadline, Wait: wait}
prefetchMsg := dfc.RangeMsg{Prefix: prefix, Regex: regex, Range: rng, RangeListMsgBase: prefetchMsgBase}
return doListRangeCall(proxyurl, bucket, dfc.ActPrefetch, http.MethodPost, prefetchMsg, wait)
}
func DeleteList(proxyurl, bucket string, fileslist []string, wait bool, deadline time.Duration) error {
rangeListMsgBase := dfc.RangeListMsgBase{Deadline: deadline, Wait: wait}
deleteMsg := dfc.ListMsg{Objnames: fileslist, RangeListMsgBase: rangeListMsgBase}
return doListRangeCall(proxyurl, bucket, dfc.ActDelete, http.MethodDelete, deleteMsg, wait)
}
func DeleteRange(proxyurl, bucket, prefix, regex, rng string, wait bool, deadline time.Duration) error {
rangeListMsgBase := dfc.RangeListMsgBase{Deadline: deadline, Wait: wait}
deleteMsg := dfc.RangeMsg{Prefix: prefix, Regex: regex, Range: rng, RangeListMsgBase: rangeListMsgBase}
return doListRangeCall(proxyurl, bucket, dfc.ActDelete, http.MethodDelete, deleteMsg, wait)
}
func EvictList(proxyurl, bucket string, fileslist []string, wait bool, deadline time.Duration) error {
rangeListMsgBase := dfc.RangeListMsgBase{Deadline: deadline, Wait: wait}
evictMsg := dfc.ListMsg{Objnames: fileslist, RangeListMsgBase: rangeListMsgBase}
return doListRangeCall(proxyurl, bucket, dfc.ActEvict, http.MethodDelete, evictMsg, wait)
}
func EvictRange(proxyurl, bucket, prefix, regex, rng string, wait bool, deadline time.Duration) error {
rangeListMsgBase := dfc.RangeListMsgBase{Deadline: deadline, Wait: wait}
evictMsg := dfc.RangeMsg{Prefix: prefix, Regex: regex, Range: rng, RangeListMsgBase: rangeListMsgBase}
return doListRangeCall(proxyurl, bucket, dfc.ActEvict, http.MethodDelete, evictMsg, wait)
}
// fastRandomFilename is taken from https://stackoverflow.com/questions/22892120/how-to-generate-a-random-string-of-a-fixed-length-in-golang
const (
letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
func FastRandomFilename(src *rand.Rand, fnlen int) string {
b := make([]byte, fnlen)
// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
for i, cache, remain := fnlen-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return string(b)
}
func HeadBucket(proxyurl, bucket string) (bucketprops *BucketProps, err error) {
var (
url = proxyurl + "/v1/files/" + bucket
r *http.Response
)
bucketprops = &BucketProps{}
r, err = client.Head(url)
if err != nil {
return
}
defer func() {
r.Body.Close()
}()
if r != nil && r.StatusCode >= http.StatusBadRequest {
err = fmt.Errorf("Head bucket %s failed, HTTP status %d", bucket, r.StatusCode)
return
}
bucketprops.CloudProvider = r.Header.Get(dfc.CloudProvider)
bucketprops.Versioning = r.Header.Get(dfc.Versioning)
return
}
func checkHTTPStatus(resp *http.Response, op string) error {
if resp.StatusCode >= http.StatusBadRequest {
return ReqError{
code: resp.StatusCode,
message: fmt.Sprintf("Bad status code from %s", op),
}
}
return nil
}
func discardHTTPResp(resp *http.Response) {
bufreader := bufio.NewReader(resp.Body)
dfc.ReadToNull(bufreader)
}
// Put sends a PUT request to the given URL
func Put(proxyURL string, reader Reader, bucket string, key string, silent bool) error {
url := proxyURL + "/v1/files/" + bucket + "/" + key
if !silent {
fmt.Printf("PUT: %s/%s\n", bucket, key)
}
handle, err := reader.Open()
if err != nil {
return fmt.Errorf("Failed to open reader, err: %v", err)
}
defer handle.Close()
req, err := http.NewRequest(http.MethodPut, url, handle)
if err != nil {
return fmt.Errorf("Failed to create new http request, err: %v", err)
}
// The HTTP package doesn't automatically set this for files, so it has to be done manually
// If it wasn't set, we would need to deal with the redirect manually.
req.GetBody = func() (io.ReadCloser, error) {
return reader.Open()
}
if reader.XXHash() != "" {
req.Header.Set(dfc.HeaderDfcChecksumType, dfc.ChecksumXXHash)
req.Header.Set(dfc.HeaderDfcChecksumVal, reader.XXHash())
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
err = checkHTTPStatus(resp, "PUT")
discardHTTPResp(resp)
return err
}
// PutAsync sends a PUT request to the given URL
func PutAsync(wg *sync.WaitGroup, proxyURL string, reader Reader, bucket string, key string, errch chan error, silent bool) {
defer wg.Done()
err := Put(proxyURL, reader, bucket, key, silent)
if err != nil {
if errch == nil {
fmt.Println("Error channel is not given, do know how to report error", err)
} else {
errch <- err
}
}
}
// CreateLocalBucket sends a HTTP request to a proxy and asks it to create a local bucket
func CreateLocalBucket(proxyURL, bucket string) error {
msg, err := json.Marshal(dfc.ActionMsg{Action: dfc.ActCreateLB})
if err != nil {
return err
}
req, err := http.NewRequest("POST", proxyURL+"/v1/files/"+bucket, bytes.NewBuffer(msg))
if err != nil {
return err
}
r, err := client.Do(req)
if r != nil {
r.Body.Close()
}
// FIXME: A few places are doing this already, need to address them
time.Sleep(time.Second * 2)
return err
}
// DestroyLocalBucket deletes a local bucket
func DestroyLocalBucket(proxyURL, bucket string) error {
msg, err := json.Marshal(dfc.ActionMsg{Action: dfc.ActDestroyLB})
if err != nil {
return err
}
req, err := http.NewRequest("DELETE", proxyURL+"/v1/files/"+bucket, bytes.NewBuffer(msg))
if err != nil {
return err
}
resp, err := client.Do(req)
if resp != nil {
resp.Body.Close()
}
return err
}
// ListObjects returns a slice of object names of all objects that match the prefix in a bucket
func ListObjects(proxyURL, bucket, prefix string) ([]string, error) {
msg, err := json.Marshal(&dfc.GetMsg{GetPrefix: prefix})
if err != nil {
return nil, err
}
data, err := ListBucket(proxyURL, bucket, msg)
if err != nil {
return nil, err
}
var objs []string
for _, obj := range data.Entries {
// Skip directories
if obj.Name[len(obj.Name)-1] != '/' {
objs = append(objs, obj.Name)
}
}
return objs, nil
}
|
package crypto
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strconv"
"github.com/NavenduDuari/goinfo/crypto/utils"
)
type coinData struct {
Id string `json:"id"`
Name string `json:"name"`
Price string `json:"price"`
Rank string `json:"rank"`
OneDay oneDay `json:"1D"`
}
type oneDay struct {
PriceChange string `json:"price_change"`
}
var (
baseUrl = "https://api.nomics.com/v1/currencies/ticker?key="
currencySymbol = utils.CurrencyDetails["INR"].Symbol
)
func getPrice(w http.ResponseWriter, coin, conv string) {
var (
ids = "&ids=BTC,ETH,XRP"
convert = "&convert=INR"
key = utils.NomicsApiKey
)
if coin != "" {
ids = "&ids=" + coin
}
if conv != "" {
convert = "&convert=" + conv
currencySymbol = utils.CurrencyDetails[conv].Symbol
}
finalUrl := baseUrl + key + ids + "&interval=1d" + convert
res, err := http.Get(finalUrl)
if err != nil {
fmt.Println("Unable to get price")
return
}
responseData, _ := ioutil.ReadAll(res.Body)
var coinDataArrObj []coinData
json.Unmarshal(responseData, &coinDataArrObj)
showPrice(w, coinDataArrObj)
}
func showPrice(w http.ResponseWriter, coinDataArrObj []coinData) {
content := ""
for _, coin := range coinDataArrObj {
priceChange, _ := strconv.ParseFloat(coin.OneDay.PriceChange, 64)
price, _ := strconv.ParseFloat(coin.Price, 64)
priceChangePercent := fmt.Sprintf("%.2f", priceChange/(priceChange+price)*100)
content = content + PrintCoinInfo(coin.Id, coin.Name)
if priceChange < 0 {
content = content + PrintPriceDown(currencySymbol, coin.Price, priceChangePercent)
} else {
content = content + PrintPriceUp(currencySymbol, coin.Price, priceChangePercent)
}
content = content + PrintRank(coin.Rank)
}
io.WriteString(w, content)
}
func getSuggestion(w http.ResponseWriter) {
content := PrintCoinSuggestion() + "\n" + PrintConvSuggestion()
io.WriteString(w, content)
}
func getHelp(w http.ResponseWriter) {
content := `*crypto* gives prices of crypto-currencies.` + " \n " + `
commands available:
*--coin* //to specify coin
*--conv* //to specify conversion
*--help* //to get help
*--suggest* //to get suggestion
Example:
*crypto* //gives price default coins in default conversion
*crypto --coin=BTC,LTC,BNB --conv=EUR* //gives price of LTC in EUR`
io.WriteString(w, content)
}
func Check(w http.ResponseWriter, args map[string]string, isCmdValid bool) {
if args["--suggest"] == "true" {
getSuggestion(w)
} else if args["--help"] == "true" || isCmdValid == false {
getHelp(w)
} else {
getPrice(w, args["--coin="], args["--conv="])
}
}
|
/*
* @lc app=leetcode.cn id=1941 lang=golang
*
* [1941] 检查是否所有字符出现次数相同
*/
package main
// @lc code=start
func areOccurrencesEqual(s string) bool {
counter := make([]int, 26)
for i := 0; i < len(s); i++ {
counter[s[i]-'a']++
}
count := counter[s[0]-'a']
for _, c := range counter {
if c != 0 && c != count {
return false
}
}
return true
}
// @lc code=end
|
package geo
import (
"math"
)
const (
EarthRadiusMi = 3959
EarthRadiusKm = 6371
)
// Calculate Haversine distance between two lat/lon points on Earth.
// Takes parameters in radians
// Returns result in meters
func Haversine(startLat, startLon, endLat, endLon float64) float64 {
dLat := endLat - startLat
dLon := endLon - startLon
a := math.Pow(math.Sin(dLat/2), 2) + math.Cos(startLat)*math.Cos(endLat)*math.Pow(math.Sin(dLon/2), 2)
c := 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a))
d := EarthRadiusKm * c
return d
}
// Rounding function courtesy David Vaini
// https://gist.github.com/DavidVaini/10308388
func Round(val float64, roundOn float64, places int) (newVal float64) {
var round float64
pow := math.Pow(10, float64(places))
digit := pow * val
_, div := math.Modf(digit)
if div >= roundOn {
round = math.Ceil(digit)
} else {
round = math.Floor(digit)
}
newVal = round / pow
return
}
// DistVicenty is a re-write of the function LatLong.distVincenty
// by Chris Veness ((c) 2002-2006 Chris Veness)
// http://www.5thandpenn.com/GeoMaps/GMapsExamples/distanceComplete2.html
// https://www.movable-type.co.uk/scripts/latlong-vincenty.html
// based on DIRECT AND INVERSE SOLUTIONS OF GEODESICS ON THE ELLIPSOID
// WITH APPLICATION OF NESTED EQUATIONS, T. Vincenty, April 1975
// https://www.ngs.noaa.gov/PUBS_LIB/inverse.pdf
// Calculate geodesic distance (in m) between two points specified by
// latitude/longitude using Vincenty inverse formula for ellipsoids
// Returns result in metres
func DistVincenty(startLat, startLon, endLat, endLon float64) float64 {
a := 6378137.0
b := 6356752.3142
f := 1 / 298.257223563 // WGS-84 ellipsiod
L := endLon - startLon
U1 := math.Atan((1 - f) * math.Tan(startLat))
U2 := math.Atan((1 - f) * math.Tan(endLat))
sinU1 := math.Sin(U1)
cosU1 := math.Cos(U1)
sinU2 := math.Sin(U2)
cosU2 := math.Cos(U2)
lambda := L
lambdaP := 2 * math.Pi
iterLimit := 20
cosSqAlpha := 1.0
sinSigma := 1.0
cos2SigmaM := 1.0
cosSigma := 1.0
sigma := 1.0
for ((math.Abs(lambda - lambdaP)) > math.Pow10(-12)) && (iterLimit > 0) {
sinLambda := math.Sin(lambda)
cosLambda := math.Cos(lambda)
sinSigma = math.Sqrt((cosU2*sinLambda)*(cosU2*sinLambda) + (cosU1*sinU2-sinU1*cosU2*cosLambda)*(cosU1*sinU2-sinU1*cosU2*cosLambda))
if sinSigma == 0 {
return 0.0
}
cosSigma = sinU1*sinU2 + cosU1*cosU2*cosLambda
sigma = math.Atan2(sinSigma, cosSigma)
sinAlpha := cosU1 * cosU2 * sinLambda / sinSigma
cosSqAlpha = 1 - sinAlpha*sinAlpha
cos2SigmaM = cosSigma - 2*sinU1*sinU2/cosSqAlpha
if math.IsNaN(cos2SigmaM) {
cos2SigmaM = 0.0 // equatorial line: cosSqAlpha=0 (§6)
}
C := f / 16 * cosSqAlpha * (4 + f*(4-3*cosSqAlpha))
lambdaP = lambda
lambda = L + (1-C)*f*sinAlpha*(sigma+C*sinSigma*(cos2SigmaM+C*cosSigma*(-1+2*cos2SigmaM*cos2SigmaM)))
iterLimit = iterLimit - 1
}
if iterLimit == 0 {
return -1.0
}
uSq := cosSqAlpha * (a*a - b*b) / (b * b)
A := 1 + uSq/16384*(4096+uSq*(-768+uSq*(320-175*uSq)))
B := uSq / 1024 * (256 + uSq*(-128+uSq*(74-47*uSq)))
deltaSigma := B * sinSigma * (cos2SigmaM + B/4*(cosSigma*(-1+2*cos2SigmaM*cos2SigmaM)-B/6*cos2SigmaM*(-3+4*sinSigma*sinSigma)*(-3+4*cos2SigmaM*cos2SigmaM)))
s := b * A * (sigma - deltaSigma)
s = s * math.Pow10(-3)
s = (Round(s, 0.5, 3))
return s
}
|
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println("****Pointers****")
i, j := 42, 2701
p := &i // p = address of i
fmt.Println(p)
fmt.Println(*p) // print value referenced by p
*p = 21 //value referenced by p = 21
fmt.Println(i)
p = &j // p = address of j
*p = *p / 37 // value referenced by p = value referenced by p / 37 i.e. j = j/37
fmt.Println(j)
fmt.Println("****Structs****")
v := Vertex{1, 2}
fmt.Println(v)
v.x = 98
fmt.Println(v)
//struct pointers
ptr := &v // ptr = address of v
ptr.x = 1e9 // or (*ptr).x which is more accurate but this notation is too cumbersome
fmt.Println(v)
fmt.Println(avp)
fmt.Println(avp.x)
fmt.Println(avp.y)
fmt.Println("****Arrays****")
var a [10]int
for j := 0; j < 10; j++ {
a[j] = j * 2
}
u := [4]string{"hi", "I", "am", "xyz"}
fmt.Println(u)
primes := [6]int{2, 3, 5, 7, 11, 13}
var s []int = primes[1:4] //slices are dynamically sized, flexible views into the elements of an array; in this context, the []int is unnecessary
fmt.Println(s)
// slices do not store any data of their own.
// They only provide views into an array are pretty much references to them.
//So changes in slices mean changes in the array as well
first := u[0:2]
last := u[1:3]
fmt.Println(first, last)
first[0] = "Hello!!"
fmt.Println(first, last)
fmt.Println(u)
//slice literals -> like array literals without having to mention the size
slice1 := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}
fmt.Println(slice1)
slice2 := []bool{true, false, true, true, false, true}
fmt.Println(slice2)
slice3 := []struct { //** note this way of initializing a slice of structs
i int
b bool
}{
{2, false},
{3, true},
{4, true},
{5, false},
{76, false},
{13, false},
}
fmt.Println(slice3)
//for slices, the default bounds start from 0 for lower and the upper bound is at the length of the slice
var slice4 = []int{1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89}
fmt.Println(slice4)
slice4 = slice4[1:6]
fmt.Println("slice4[1:6]=", slice4)
slice4 = slice4[:4]
fmt.Println("slice4[:4]=", slice4)
slice4 = slice4[1:]
fmt.Println("slice4[1:]=", slice4)
//length of the slice is the number of elements it contains
//capacity is the size of the underlying array counting from the first element of the slice
slice5 := []int{9, 1, 2, 3, 4, 5, 6, 7, 8, 9}
fmt.Println(slice5)
slice5 = slice5[:0]
fmt.Println("slice5[:0]=", slice5)
slice5 = slice5[:4]
fmt.Println("slice5[:4]=", slice5)
slice5 = slice5[2:]
fmt.Println("slice5[2:]=", slice5)
fmt.Println("length: ", len(slice5))
fmt.Println("Capacity: ", cap(slice5))
//nil slices are those with zero value
var nilSlice []int
fmt.Println(nilSlice, len(nilSlice), cap(nilSlice))
if nilSlice == nil {
fmt.Println("nil!")
}
//slices can be made with the make() func. It allocates a zeroed array and returns a slice that refers to that array
slice6 := make([]int, 5) //length of this slice is 5
slice7 := make([]int, 0, 5) //length of slice = 0 and capacity = 5
fmt.Println("slice6", slice6)
fmt.Println("slice7", slice7)
//slices can contain any type. Including other slices
board := [][]string{
[]string{"_", "_", "_"},
[]string{"_", "_", "_"},
[]string{"_", "_", "_"},
}
board[0][0] = "X"
board[2][2] = "O"
board[1][2] = "X"
board[1][0] = "O"
board[0][2] = "X"
for i := 0; i < len(board); i++ {
fmt.Printf("%s\n", strings.Join(board[i], " "))
}
// the append new elements to the slice
slice8 := make([]int, 0)
fmt.Println("slice8", slice8)
slice8 = append(slice8, 0)
fmt.Println("slice8", slice8)
slice8 = append(slice8, 1)
fmt.Println("slice8", slice8)
slice8 = append(slice8, 2, 3, 4, 5, 6)
fmt.Println("slice8", slice8)
//the 'range' form for loop iterates over a slice or map. Sort of like iterators
var pow = []int{1, 2, 4, 8, 16, 32, 64, 128}
for i, r := range pow {
fmt.Printf("%d = %d, ", i, r)
}
fmt.Println()
//other alternatives here
for i := range pow {
fmt.Printf("%d, ", pow[i])
}
for _, value := range pow {
fmt.Printf("%d\n", value)
}
fmt.Println()
}
var ( //struct literals -> you denote a newly allocated struct value by listing the values of its fields
v1 = Vertex{1, 2} // has type Vertex
v2 = Vertex{x: 1} // Y:0 is implicit
v3 = Vertex{} // X:0 and Y:0
avp = &Vertex{1, 2} // has type *Vertex
)
type Vertex struct {
x int
y int
}
|
package lily
import (
"fmt"
"log"
"runtime"
"runtime/debug"
)
func ErrPanic(err error, msg ...string) {
if err != nil {
if len(msg) > 0 {
log.Panicln(err, msg[0])
} else {
log.Panicln(err)
}
}
}
func ErrPanicSilent(err error) {
if err != nil {
panic(err)
}
}
func ErrPanicWS(err error, msg ...string) {
if err != nil {
if len(msg) > 0 {
log.Panicln(err, msg[0], "\r\n", string(debug.Stack()))
} else {
log.Panicln(err, "\r\n", string(debug.Stack()))
}
}
}
func ErrFatal(err error, msg ...string) {
if err != nil {
_, file, line, _ := runtime.Caller(1)
log.Fatalf("fatal (%s:%d):\n\t%s\n\t%s\n", file, line, msg, err.Error())
}
}
type VErr struct {
Err error
Code string
Detail string
DetailPars map[string]string
Extras []string // for http-responding
}
func (e *VErr) Error() string {
return fmt.Sprintf("%s, %s", e.Code, e.Detail)
}
func NewVErr(err error, code, det string, detP map[string]string) *VErr {
return &VErr{
Err: err,
Code: code,
Detail: det,
DetailPars: detP,
}
}
|
package irc
import (
"container/ring"
"fmt"
log "github.com/Sirupsen/logrus"
"github.com/fluffle/goirc/client"
"github.com/fluffle/goirc/state"
"go-chat-relay/src/globals"
"go-chat-relay/src/helpers"
"go-chat-relay/src/plug"
"go-chat-relay/src/plug/config"
"go-chat-relay/src/plugs"
"runtime"
"time"
)
const (
PING_FREQ = 30 * time.Second
SPLIT_LEN = 255
TIMEOUT = 60 * time.Second
CHANGE_CAPACITY = 64
)
func init() {
plugs.Register("irc", New)
}
func nextNick(nicks []string) func(string) string {
r := ring.New(len(nicks))
for i := 0; i < len(nicks); i++ {
r.Value = nicks[i]
r = r.Next()
}
return func(s string) string {
// start from second nick, first already used in state.Nick.Nick
r = r.Next()
return r.Value.(string)
}
}
func logPanic(conn *client.Conn, line *client.Line) {
if err := recover(); err != nil {
_, f, l, _ := runtime.Caller(2)
log.Error("%s:%d: panic: %+v", f, l, err)
}
}
type IRCPlug struct {
config config.Config
client *client.Conn
endloop chan bool
changes chan plug.Change
}
func (p *IRCPlug) Connect() error { return p.client.Connect() }
func (p *IRCPlug) Loop() { <-p.endloop }
func (p *IRCPlug) Changes() <-chan plug.Change { return p.changes }
func (p *IRCPlug) Send(msg string) error {
for _, c := range p.config.Channels {
p.client.Privmsg(c, msg)
}
return nil
}
func (p *IRCPlug) onDisconnected(c *client.Conn, l *client.Line) { p.endloop <- true }
func (p *IRCPlug) onConnected(c *client.Conn, l *client.Line) {
for _, ch := range p.config.Channels {
c.Join(ch)
}
}
func (p *IRCPlug) onMessage(c *client.Conn, l *client.Line) {
channel, msg := l.Args[0], l.Args[1]
if !helpers.ContainsString(p.config.Channels, channel) {
// ignore non channel messages, etc
return
}
if helpers.ContainsString(p.config.Identity.Nick, l.Nick) {
// do not relay messages from me
return
}
p.changes <- plug.Change{
User: l.Nick,
Channel: channel,
Server: p.config.Connection.Addr,
Data: msg,
}
}
func (p *IRCPlug) onTopic(c *client.Conn, l *client.Line) {
channel, msg := l.Args[0], l.Args[1]
if !helpers.ContainsString(p.config.Channels, channel) {
// ignore non channel messages, etc
return
}
p.changes <- plug.Change{
User: l.Nick,
Channel: channel,
Server: p.config.Connection.Addr,
Data: fmt.Sprintf("Topic changed: %s", msg),
}
}
func New(pc config.Config) plug.Plug {
st := &state.Nick{
Nick: pc.Identity.Nick[0],
Ident: pc.Identity.RealName,
Name: globals.Name(),
}
cf := &client.Config{
Server: pc.Connection.Addr,
Pass: pc.Identity.Password,
Me: st,
NewNick: nextNick(pc.Identity.Nick),
Version: globals.Version(),
PingFreq: PING_FREQ,
SplitLen: SPLIT_LEN,
Timeout: TIMEOUT,
Recover: logPanic,
Flood: true,
}
cl := client.Client(cf)
p := &IRCPlug{
config: pc,
client: cl,
endloop: make(chan bool),
changes: make(chan plug.Change, CHANGE_CAPACITY),
}
cl.HandleFunc(client.CONNECTED, p.onConnected)
cl.HandleFunc(client.DISCONNECTED, p.onDisconnected)
cl.HandleFunc(client.PRIVMSG, p.onMessage)
cl.HandleFunc(client.TOPIC, p.onTopic)
return p
}
|
package gosnowth
import (
"bytes"
"context"
"encoding/json"
"fmt"
"path"
"strconv"
"time"
)
// NumericAllValueResponse values represent numeric data responses from IRONdb.
type NumericAllValueResponse struct {
Data []NumericAllValue
}
// UnmarshalJSON decodes a JSON format byte slice into a
// NumericAllValueResponse.
func (nv *NumericAllValueResponse) UnmarshalJSON(b []byte) error {
nv.Data = []NumericAllValue{}
values := [][]interface{}{}
if err := json.Unmarshal(b, &values); err != nil {
return fmt.Errorf("failed to deserialize numeric average response: %w",
err)
}
for _, entry := range values {
nav := NumericAllValue{}
if m, ok := entry[1].(map[string]interface{}); ok {
valueBytes, err := json.Marshal(m)
if err != nil {
return fmt.Errorf(
"failed to marshal intermediate value from tuple: %w", err)
}
if err := json.Unmarshal(valueBytes, &nav); err != nil {
return fmt.Errorf("failed to unmarshal value from tuple: %w",
err)
}
}
// grab the timestamp
if v, ok := entry[0].(float64); ok {
nav.Time = time.Unix(int64(v), 0)
}
nv.Data = append(nv.Data, nav)
}
return nil
}
// NumericAllValue values represent numeric data.
type NumericAllValue struct {
Time time.Time `json:"-"`
Count int64 `json:"count"`
Value int64 `json:"value"`
StdDev int64 `json:"stddev"`
Derivative int64 `json:"derivative"`
DerivativeStdDev int64 `json:"derivative_stddev"`
Counter int64 `json:"counter"`
CounterStdDev int64 `json:"counter_stddev"`
Derivative2 int64 `json:"derivative2"`
Derivative2StdDev int64 `json:"derivative2_stddev"`
Counter2 int64 `json:"counter2"`
Counter2StdDev int64 `json:"counter2_stddev"`
}
// NumericValueResponse values represent responses containing numeric data.
type NumericValueResponse struct {
Data []NumericValue
}
// UnmarshalJSON decodes a JSON format byte slice into a NumericValueResponse.
func (nv *NumericValueResponse) UnmarshalJSON(b []byte) error {
nv.Data = []NumericValue{}
values := [][]int64{}
if err := json.Unmarshal(b, &values); err != nil {
return fmt.Errorf("failed to deserialize numeric average response: %w",
err)
}
for _, tuple := range values {
nv.Data = append(nv.Data, NumericValue{
Time: time.Unix(tuple[0], 0),
Value: tuple[1],
})
}
return nil
}
// NumericValue values represent individual numeric data values.
type NumericValue struct {
Time time.Time
Value int64
}
// NumericWrite values represent numeric data.
type NumericWrite struct {
Count int64 `json:"count"`
Value int64 `json:"value"`
Derivative int64 `json:"derivative"`
Counter int64 `json:"counter"`
StdDev int64 `json:"stddev"`
DerivativeStdDev int64 `json:"derivative_stddev"`
CounterStdDev int64 `json:"counter_stddev"`
Metric string `json:"metric"`
ID string `json:"id"`
Offset int64 `json:"offset"`
Parts NumericParts `json:"parts"`
}
// NumericPartsData values represent numeric base data parts.
type NumericPartsData struct {
Count int64 `json:"count"`
Value int64 `json:"value"`
Derivative int64 `json:"derivative"`
Counter int64 `json:"counter"`
StdDev int64 `json:"stddev"`
DerivativeStdDev int64 `json:"derivative_stddev"`
CounterStdDev int64 `json:"counter_stddev"`
}
// NumericParts values contain the NumericWrite submission parts of an
// numeric rollup.
type NumericParts struct {
Period int64 `json:"period"`
Data []NumericPartsData `json:"data"`
}
// MarshalJSON marshals a NumericParts value into a JSON format byte slice.
func (p *NumericParts) MarshalJSON() ([]byte, error) {
tuple := []interface{}{}
tuple = append(tuple, p.Period, p.Data)
buf := bytes.NewBuffer([]byte{})
enc := json.NewEncoder(buf)
if err := enc.Encode(tuple); err != nil {
return buf.Bytes(), err
}
return buf.Bytes(), nil
}
// WriteNumeric writes numeric data to a node.
func (sc *SnowthClient) WriteNumeric(data []NumericWrite,
nodes ...*SnowthNode,
) error {
return sc.WriteNumericContext(context.Background(), data, nodes...)
}
// WriteNumericContext is the context aware version of WriteNumeric.
func (sc *SnowthClient) WriteNumericContext(ctx context.Context,
data []NumericWrite, nodes ...*SnowthNode,
) error {
buf := new(bytes.Buffer)
if err := json.NewEncoder(buf).Encode(data); err != nil {
return fmt.Errorf("failed to encode NumericWrite for write: %w", err)
}
var node *SnowthNode
if len(nodes) > 0 && nodes[0] != nil {
node = nodes[0]
} else if len(data) > 0 {
node = sc.GetActiveNode(sc.FindMetricNodeIDs(data[0].ID,
data[0].Metric))
}
if node == nil {
return fmt.Errorf("unable to get active node")
}
_, _, err := sc.DoRequestContext(ctx, node, "POST",
"/write/numeric", buf, nil)
return err
}
// ReadNumericValues reads numeric data from a node.
func (sc *SnowthClient) ReadNumericValues(start, end time.Time, period int64,
t, id, metric string, nodes ...*SnowthNode,
) ([]NumericValue, error) {
return sc.ReadNumericValuesContext(context.Background(), start, end,
period, t, id, metric, nodes...)
}
// ReadNumericValuesContext is the context aware version of ReadNumericValues.
func (sc *SnowthClient) ReadNumericValuesContext(ctx context.Context,
start, end time.Time, period int64,
t, id, metric string, nodes ...*SnowthNode,
) ([]NumericValue, error) {
var node *SnowthNode
if len(nodes) > 0 && nodes[0] != nil {
node = nodes[0]
} else {
node = sc.GetActiveNode(sc.FindMetricNodeIDs(id, metric))
}
if node == nil {
return nil, fmt.Errorf("unable to get active node")
}
r := &NumericValueResponse{}
body, _, err := sc.DoRequestContext(ctx, node, "GET", path.Join("/read",
strconv.FormatInt(start.Unix(), 10),
strconv.FormatInt(end.Unix(), 10),
strconv.FormatInt(period, 10), id, t, metric), nil, nil)
if err != nil {
return nil, err
}
if err := decodeJSON(body, &r); err != nil {
return nil, fmt.Errorf("unable to decode IRONdb response: %w", err)
}
return r.Data, nil
}
// ReadNumericAllValues reads all numeric data from a node.
func (sc *SnowthClient) ReadNumericAllValues(start, end time.Time, period int64,
id, metric string, nodes ...*SnowthNode,
) ([]NumericAllValue, error) {
return sc.ReadNumericAllValuesContext(context.Background(), start, end,
period, id, metric, nodes...)
}
// ReadNumericAllValuesContext is the context aware version of
// ReadNumericAllValues.
func (sc *SnowthClient) ReadNumericAllValuesContext(ctx context.Context,
start, end time.Time, period int64,
id, metric string, nodes ...*SnowthNode,
) ([]NumericAllValue, error) {
var node *SnowthNode
if len(nodes) > 0 && nodes[0] != nil {
node = nodes[0]
} else {
node = sc.GetActiveNode(sc.FindMetricNodeIDs(id, metric))
}
if node == nil {
return nil, fmt.Errorf("unable to get active node")
}
r := &NumericAllValueResponse{}
body, _, err := sc.DoRequestContext(ctx, node, "GET", path.Join("/read",
strconv.FormatInt(start.Unix(), 10),
strconv.FormatInt(end.Unix(), 10),
strconv.FormatInt(period, 10), id, "all", metric), nil, nil)
if err != nil {
return nil, err
}
if err := decodeJSON(body, &r); err != nil {
return nil, fmt.Errorf("unable to decode IRONdb response: %w", err)
}
return r.Data, nil
}
|
package config
import (
"github.com/spf13/cobra"
"github.com/wish/ctl/cmd/util/config"
"github.com/wish/ctl/pkg/client"
)
func fetchCmd(c *client.Client) *cobra.Command {
return &cobra.Command{
Use: "fetch",
Short: "Update extensions",
Run: func(cmd *cobra.Command, args []string) {
config.WriteCtlExt(c.GetCtlExt())
},
}
}
|
package leetcode
/*Assume you are an awesome parent and want to give your children some cookies. But, you should give each child at most one cookie. Each child i has a greed factor gi, which is the minimum size of a cookie that the child will be content with; and each cookie j has a size sj. If sj >= gi, we can assign the cookie j to the child i, and the child i will be content. Your goal is to maximize the number of your content children and output the maximum number.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/assign-cookies
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。*/
import "sort"
func findContentChildren(g []int, s []int) int {
res := 0
leftCookie := 0
sort.Ints(g)
sort.Ints(s)
for i := 0; i < len(g) && leftCookie < len(s); i++ {
for j := leftCookie; j < len(s); j++ {
if s[j] >= g[i] {
res++
leftCookie = j + 1
break
}
}
}
return res
}
|
package main
import (
qclReader "github.com/kf8a/qclreader"
)
type qcl struct {
connections map[*connection]bool
register chan *connection
unregister chan *connection
host string
}
func newQcl(hostName string) *qcl {
return &qcl{
connections: make(map[*connection]bool),
register: make(chan *connection),
unregister: make(chan *connection),
host: hostName,
}
}
func (q *qcl) read(test bool) {
myqcl := qclReader.QCL{}
cs := make(chan string)
go myqcl.Sampler(test, cs)
for {
data := <-cs
select {
case c := <-q.register:
q.connections[c] = true
case c := <-q.unregister:
q.connections[c] = false
default:
for c := range q.connections {
select {
case c.send <- []byte(data):
default:
delete(q.connections, c)
close(c.send)
}
}
}
}
}
|
package main
import "fmt"
type A interface {
Foo()
}
type B interface {
A
Bar()
}
type T struct {
}
func (t T) Foo() {
fmt.Println("Call Foo function form interface A")
}
func (t T) Bar() {
fmt.Println("Call Bar function form interface B")
}
func main() {
var t = T{}
var a A = t
a.Foo()
var b B = t
b.Foo()
b.Bar()
}
|
package myeth
import (
"bytes"
"encoding/json"
"errors"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/golang/glog"
"io/ioutil"
"runtime"
"strings"
)
/*
1.accountKey 对应的是要获取的账户地址的私钥信息,在节点的keystore下面。
2.passphrase是当前操作的账户在创建账户是输入的密码(字符串)。
*/
func ParseEthAuth(key_string string, conn *ethclient.Client, hasNonce bool) (*bind.TransactOpts, error) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 1024)
glog.Infoln("panic:", runtime.Stack(buf, true), r)
}
}()
var mp map[string]interface{}
err := json.Unmarshal(bytes.NewBufferString(string(key_string)).Bytes(), &mp)
if err != nil {
glog.Infoln("ParseEthAuth err", err)
return nil, err
}
//addr := ""
if _, exist := mp["address"]; !exist {
return nil, errors.New("don't have address")
} else {
//addr = v.(string)
}
if _, exist := mp["id"]; !exist {
return nil, errors.New("don't have id")
}
if _, exist := mp["privatekey"]; !exist {
return nil, errors.New("don't have privatekey")
}
key := &keystore.Key{}
err = json.Unmarshal(bytes.NewBufferString(key_string).Bytes(), key)
if err != nil {
glog.Infoln(err)
return nil, err
} else {
}
auth := bind.NewKeyedTransactor(key.PrivateKey)
if hasNonce {
nonce := getNonce(auth,conn)
auth.Nonce = &nonce
//auth.Nonce = big.NewInt(NonceMap.calc(addr, auth, conn))
glog.Infoln("ParseEthAuth Nonce Calc After:", auth.Nonce, "from:", auth.From.String())
}
return auth, nil
}
// build eth key store by using keystore and keyparse
// the func DecryptKey is very fucking slow
func ParseKeyStore(accountKey string, passphrase string) *keystore.Key {
json, err := ioutil.ReadAll(strings.NewReader(accountKey))
if err != nil {
return nil
}
k, _ := keystore.DecryptKey(json, passphrase)
return k
}
func ParseKeyStoreToString(userKeyStore string, passphrase string) string {
data, err := ioutil.ReadAll(strings.NewReader(userKeyStore))
if err != nil {
return ""
}
k, _ := keystore.DecryptKey(data, passphrase)
if err != nil {
return ""
}
b, err := json.Marshal(k)
if err != nil {
return ""
}
return string(b)
}
|
package myhttp
import (
"fmt"
"runtime"
"sync"
)
func Start(limit int, urls []string) (err error) {
// make available the maximum number possible of available cpu to execute go routine simultaneously
runtime.GOMAXPROCS(runtime.NumCPU())
// create wait group that waits until go routines finish requesting urls
wg := &sync.WaitGroup{}
wg.Add(limit)
ch := fillChan(urls)
for i := 0; i < limit; i++ {
go func() {
taskErr := performTask(ch, wg)
if taskErr != nil {
err = fmt.Errorf("%w", taskErr)
}
}()
}
wg.Wait()
return
}
// fillChan sends the urls in a channel
func fillChan(urls []string) <-chan string {
ch := make(chan string)
go func(){
defer close(ch)
for _, url := range urls {
ch <- url
}
}()
return ch
}
// performTask holds the logic that handles http get request for an url and hashing the response.
// printing the url and the hash
func performTask(ch <-chan string, wg *sync.WaitGroup) error {
defer wg.Done()
for url := range ch {
c := NewClient(url)
resp, err := c.GetRequest()
if err != nil {
return err
}
md5 := HashMD5(resp)
HTTPPrinter(c.GetURL(), md5)
}
return nil
}
|
/*
* @lc app=leetcode.cn id=32 lang=golang
*
* [32] 最长有效括号
*/
package main
import (
"fmt"
)
/*
栈
func longestValidParentheses(s string) int {
stack := []int{}
maxLen := 0
// 方便求差
stack = append(stack, -1)
for i, v := range s {
if v == '(' {
stack = append(stack, i)
} else {
stack = stack[:len(stack)-1]
if len(stack) == 0 {
stack = append(stack, i)
} else {
maxLen = max(maxLen, i-stack[len(stack)-1])
}
}
}
return maxLen
} */
/*
动态规划
func longestValidParentheses(s string) int {
sLen := len(s)
dp := make([]int, sLen+1)
maxLen := 0
for i := 1; i < sLen; i++ {
if s[i] == ')' {
if s[i-1] == '(' {
// 除去当前括号,将前面连续匹配的括号加上
if i >= 2 {
dp[i] = dp[i-2] + 2
} else {
dp[i] = 2
}
// i - dp[i-1]-1 最后一次匹配到括号的前一个
// 如果是'('说明当前括号匹配上
} else if i-dp[i-1] > 0 && s[i-dp[i-1]-1] == '(' {
if i-dp[i-1] >= 2 {
dp[i] = dp[i-1] + 2 + dp[i-dp[i-1]-2]
} else {
dp[i] = dp[i-1] + 2
}
}
}
maxLen = max(maxLen, dp[i])
}
return maxLen
} */
// @lc code=start
func max(a, b int) int {
if a > b {
return a
}
return b
}
func longestValidParentheses(s string) int {
left, right, maxLength := 0, 0, 0
for i := 0; i < len(s); i++ {
if s[i] == '(' {
left++
} else {
right++
}
if left == right {
maxLength = max(maxLength, 2*right)
} else if right > left {
left, right = 0, 0
}
}
left, right = 0, 0
for i := len(s) - 1; i >= 0; i-- {
if s[i] == '(' {
left++
} else {
right++
}
if left == right {
maxLength = max(maxLength, 2*left)
} else if left > right {
left, right = 0, 0
}
}
return maxLength
}
// @lc code=end
func main() {
fmt.Println(longestValidParentheses("()(())"))
}
|
package dht
import (
"context"
"fmt"
"testing"
"time"
tu "github.com/libp2p/go-libp2p-testing/etc"
"github.com/stretchr/testify/require"
)
// TODO Debug test failures due to timing issue on windows
// Tests are timing dependent as can be seen in the 2 seconds timed context that we use in "tu.WaitFor".
// While the tests work fine on OSX and complete in under a second,
// they repeatedly fail to complete in the stipulated time on Windows.
// However, increasing the timeout makes them pass on Windows.
func TestRTEvictionOnFailedQuery(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
d1 := setupDHT(ctx, t, false)
d2 := setupDHT(ctx, t, false)
for i := 0; i < 10; i++ {
connect(t, ctx, d1, d2)
for _, conn := range d1.host.Network().ConnsToPeer(d2.self) {
conn.Close()
}
}
// peers should be in the RT because of fixLowPeers
require.NoError(t, tu.WaitFor(ctx, func() error {
if !checkRoutingTable(d1, d2) {
return fmt.Errorf("should have routes")
}
return nil
}))
// close both hosts so query fails
require.NoError(t, d1.host.Close())
require.NoError(t, d2.host.Close())
// peers will still be in the RT because we have decoupled membership from connectivity
require.NoError(t, tu.WaitFor(ctx, func() error {
if !checkRoutingTable(d1, d2) {
return fmt.Errorf("should have routes")
}
return nil
}))
// failed queries should remove the peers from the RT
_, err := d1.GetClosestPeers(ctx, "test")
require.NoError(t, err)
_, err = d2.GetClosestPeers(ctx, "test")
require.NoError(t, err)
require.NoError(t, tu.WaitFor(ctx, func() error {
if checkRoutingTable(d1, d2) {
return fmt.Errorf("should not have routes")
}
return nil
}))
}
func TestRTAdditionOnSuccessfulQuery(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
d1 := setupDHT(ctx, t, false)
d2 := setupDHT(ctx, t, false)
d3 := setupDHT(ctx, t, false)
connect(t, ctx, d1, d2)
connect(t, ctx, d2, d3)
// validate RT states
// d1 has d2
require.NoError(t, tu.WaitFor(ctx, func() error {
if !checkRoutingTable(d1, d2) {
return fmt.Errorf("should have routes")
}
return nil
}))
// d2 has d3
require.NoError(t, tu.WaitFor(ctx, func() error {
if !checkRoutingTable(d2, d3) {
return fmt.Errorf("should have routes")
}
return nil
}))
// however, d1 does not know about d3
require.NoError(t, tu.WaitFor(ctx, func() error {
if checkRoutingTable(d1, d3) {
return fmt.Errorf("should not have routes")
}
return nil
}))
// but when d3 queries d2, d1 and d3 discover each other
_, err := d3.GetClosestPeers(ctx, "something")
require.NoError(t, err)
require.NoError(t, tu.WaitFor(ctx, func() error {
if !checkRoutingTable(d1, d3) {
return fmt.Errorf("should have routes")
}
return nil
}))
}
func checkRoutingTable(a, b *IpfsDHT) bool {
// loop until connection notification has been received.
// under high load, this may not happen as immediately as we would like.
return a.routingTable.Find(b.self) != "" && b.routingTable.Find(a.self) != ""
}
|
package main
import (
"fmt"
"math"
)
type Point3 struct{ x,y,z float64}
func (p *Point3) Abs() float64{
return math.Sqrt(p.x*p.x+p.y*p.y+p.z*p.z)
}
func main(){
t :=new(Point3) // 实例化
t.x=3 //选择器
t.y=4
t.z=5
fmt.Println(t.Abs())
k :=&Point3{8,9,10} //实例化,并初始化值
fmt.Println(k.Abs())
}
|
package oiio
import (
"testing"
)
func TestNewROIEmpty(t *testing.T) {
roi := NewROI()
if roi.Defined() {
t.Error("Expected empty ROI to returned Defined=false")
}
}
func TestNewROIRegion2D(t *testing.T) {
roi := NewROIRegion2D(100, 500, 50, 600)
if !roi.Defined() {
t.Error("Expected ROI to have a defined region")
}
actual := roi.Width()
if actual != 400 {
t.Errorf("Expected width 400, got %v", actual)
}
actual = roi.Height()
if actual != 550 {
t.Errorf("Expected height 550, got %v", actual)
}
actual = roi.Depth()
if actual != 1 {
t.Errorf("Expected depth 1, got %v", actual)
}
actual = roi.NumChannels()
if actual != 1000 {
t.Errorf("Expected 1000 channels, got %v", actual)
}
actual = roi.NumPixels()
expected := (500 - 100) * (600 - 50) * 1
if actual != expected {
t.Errorf("Expected %v pixels, got %v", expected, actual)
}
}
func TestNewROIRegion3D(t *testing.T) {
roi := NewROIRegion3D(100, 500, 50, 600, 10, 200, 3, 10)
if !roi.Defined() {
t.Error("Expected ROI to have a defined region")
}
actual := roi.Width()
if actual != 400 {
t.Errorf("Expected width 400, got %v", actual)
}
actual = roi.Height()
if actual != 550 {
t.Errorf("Expected height 550, got %v", actual)
}
actual = roi.Depth()
if actual != 190 {
t.Errorf("Expected depth 1, got %v", actual)
}
actual = roi.NumChannels()
if actual != 7 {
t.Errorf("Expected 1000 channels, got %v", actual)
}
actual = roi.NumPixels()
expected := (500 - 100) * (600 - 50) * (200 - 10)
if actual != expected {
t.Errorf("Expected %v pixels, got %v", expected, actual)
}
}
func TestROIProperties(t *testing.T) {
roi := NewROIRegion3D(100, 500, 50, 600, 10, 200, 3, 10)
if roi.XBegin() != 100 {
t.Errorf("Expected XBegin 100, got %v", roi.XBegin())
}
roi.SetXBegin(200)
if roi.XBegin() != 200 {
t.Errorf("Expected XBegin 200, got %v", roi.XBegin())
}
if roi.XEnd() != 500 {
t.Errorf("Expected XEnd 500, got %v", roi.XEnd())
}
roi.SetXEnd(1000)
if roi.XEnd() != 1000 {
t.Errorf("Expected XEnd 1000, got %v", roi.XEnd())
}
if roi.YBegin() != 50 {
t.Errorf("Expected YBegin 50, got %v", roi.YBegin())
}
roi.SetYBegin(100)
if roi.YBegin() != 100 {
t.Errorf("Expected YBegin 100, got %v", roi.YBegin())
}
if roi.YEnd() != 600 {
t.Errorf("Expected YEnd 600, got %v", roi.YEnd())
}
roi.SetYEnd(1200)
if roi.YEnd() != 1200 {
t.Errorf("Expected YEnd 1200, got %v", roi.YEnd())
}
if roi.ZBegin() != 10 {
t.Errorf("Expected ZBegin 10, got %v", roi.ZBegin())
}
roi.SetZBegin(20)
if roi.ZBegin() != 20 {
t.Errorf("Expected ZBegin 20, got %v", roi.ZBegin())
}
if roi.ZEnd() != 200 {
t.Errorf("Expected ZEnd 200, got %v", roi.ZEnd())
}
roi.SetZEnd(400)
if roi.ZEnd() != 400 {
t.Errorf("Expected ZEnd 400, got %v", roi.ZEnd())
}
if roi.ChannelsBegin() != 3 {
t.Errorf("Expected ChannelsBegin 3, got %v", roi.ChannelsBegin())
}
roi.SetChannelsBegin(6)
if roi.ChannelsBegin() != 6 {
t.Errorf("Expected ChannelsBegin 6, got %v", roi.ChannelsBegin())
}
if roi.ChannelsEnd() != 10 {
t.Errorf("Expected ChannelsEnd 10, got %v", roi.ChannelsEnd())
}
roi.SetChannelsEnd(20)
if roi.ChannelsEnd() != 20 {
t.Errorf("Expected ChannelsEnd 20, got %v", roi.ChannelsEnd())
}
}
|
package relax
import (
"io/ioutil"
"math"
"github.com/mccoyst/vorbis"
)
type Track struct {
SampleRate int
slider *VolumeSlider
data []int16
curIdx int
playing bool
}
func TrackFromOGGData(raw []byte) *Track {
data, _, sr, err := vorbis.Decode(raw)
if err != nil {
panic(err)
}
track := &Track{
data: data,
slider: NewVolumeSlider(),
SampleRate: sr,
playing: true,
}
return track
}
func TrackFromOGG(filename string) *Track {
raw, err := ioutil.ReadFile(filename)
if err != nil {
panic(err)
}
return TrackFromOGGData(raw)
}
func (track *Track) PlayPause() {
track.playing = !track.playing
}
func (track *Track) Stream(samples [][2]float64) (n int, ok bool) {
if !track.playing {
for i := range samples {
samples[i] = [2]float64{}
}
return len(samples), true
}
var myIdx int
var val float64
for i := range samples {
myIdx = track.curIdx + (i * 2)
if myIdx >= len(track.data) {
// loop
myIdx -= len(track.data)
}
val = track.slider.Val()
samples[i][0] = val * (float64(track.data[myIdx]) / float64(math.MaxInt16))
samples[i][1] = val * (float64(track.data[(myIdx)+1]) / float64(math.MaxInt16))
}
track.curIdx += len(samples) * 2
if track.curIdx > len(track.data) {
track.curIdx -= len(track.data)
}
return len(samples), true
}
func (track *Track) Err() error {
return nil
}
|
package mdb
import (
crand "crypto/rand"
"io/ioutil"
"math/rand"
"os"
"testing"
)
// repeatedly put (overwrite) keys.
func BenchmarkTxnPut(b *testing.B) {
initRandSource(b)
env, path := setupBenchDB(b)
defer teardownBenchDB(b, env, path)
dbi := openBenchDBI(b, env)
var ps [][]byte
rc := newRandSourceCursor()
txn, err := env.BeginTxn(nil, 0)
bMust(b, err, "starting transaction")
for i := 0; i < benchDBNumKeys; i++ {
k := makeBenchDBKey(&rc)
v := makeBenchDBVal(&rc)
err := txn.Put(dbi, k, v, 0)
ps = append(ps, k, v)
bTxnMust(b, txn, err, "putting data")
}
err = txn.Commit()
bMust(b, err, "commiting transaction")
txn, err = env.BeginTxn(nil, 0)
b.ResetTimer()
for i := 0; i < b.N; i++ {
k := ps[rand.Intn(len(ps)/2)*2]
v := makeBenchDBVal(&rc)
err := txn.Put(dbi, k, v, 0)
bTxnMust(b, txn, err, "putting data")
}
b.StopTimer()
err = txn.Commit()
bMust(b, err, "commiting transaction")
}
// repeatedly get random keys.
func BenchmarkTxnGetRDONLY(b *testing.B) {
initRandSource(b)
env, path := setupBenchDB(b)
defer teardownBenchDB(b, env, path)
dbi := openBenchDBI(b, env)
var ps [][]byte
rc := newRandSourceCursor()
txn, err := env.BeginTxn(nil, 0)
bMust(b, err, "starting transaction")
for i := 0; i < benchDBNumKeys; i++ {
k := makeBenchDBKey(&rc)
v := makeBenchDBVal(&rc)
err := txn.Put(dbi, k, v, 0)
ps = append(ps, k, v)
bTxnMust(b, txn, err, "putting data")
}
err = txn.Commit()
bMust(b, err, "commiting transaction")
txn, err = env.BeginTxn(nil, RDONLY)
bMust(b, err, "starting transaction")
defer txn.Abort()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := txn.Get(dbi, ps[rand.Intn(len(ps))])
if err == NotFound {
continue
}
if err != nil {
b.Fatalf("error getting data: %v", err)
}
}
b.StopTimer()
}
// like BenchmarkTxnGetRDONLY, but txn.GetVal() is called instead.
func BenchmarkTxnGetValRDONLY(b *testing.B) {
initRandSource(b)
env, path := setupBenchDB(b)
defer teardownBenchDB(b, env, path)
dbi := openBenchDBI(b, env)
var ps [][]byte
rc := newRandSourceCursor()
txn, err := env.BeginTxn(nil, 0)
bMust(b, err, "starting transaction")
for i := 0; i < benchDBNumKeys; i++ {
k := makeBenchDBKey(&rc)
v := makeBenchDBVal(&rc)
err := txn.Put(dbi, k, v, 0)
ps = append(ps, k, v)
bTxnMust(b, txn, err, "putting data")
}
err = txn.Commit()
bMust(b, err, "commiting transaction")
txn, err = env.BeginTxn(nil, RDONLY)
bMust(b, err, "starting transaction")
defer txn.Abort()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := txn.GetVal(dbi, ps[rand.Intn(len(ps))])
if err == NotFound {
continue
}
if err != nil {
b.Fatalf("error getting data: %v", err)
}
}
b.StopTimer()
}
// repeatedly scan all the values in a database.
func BenchmarkCursorScanRDONLY(b *testing.B) {
initRandSource(b)
env, path := setupBenchDB(b)
defer teardownBenchDB(b, env, path)
dbi := openBenchDBI(b, env)
var ps [][]byte
rc := newRandSourceCursor()
txn, err := env.BeginTxn(nil, 0)
bMust(b, err, "starting transaction")
for i := 0; i < benchDBNumKeys; i++ {
k := makeBenchDBKey(&rc)
v := makeBenchDBVal(&rc)
err := txn.Put(dbi, k, v, 0)
ps = append(ps, k, v)
bTxnMust(b, txn, err, "putting data")
}
err = txn.Commit()
bMust(b, err, "commiting transaction")
txn, err = env.BeginTxn(nil, RDONLY)
bMust(b, err, "starting transaction")
defer txn.Abort()
b.ResetTimer()
for i := 0; i < b.N; i++ {
func() {
cur, err := txn.CursorOpen(dbi)
bMust(b, err, "opening cursor")
defer cur.Close()
var count int64
for {
_, _, err := cur.Get(nil, nil, NEXT)
if err == NotFound {
return
}
if err != nil {
b.Fatalf("error getting data: %v", err)
}
count++
}
if count != benchDBNumKeys {
b.Fatalf("unexpected number of keys: %d", count)
}
}()
}
b.StopTimer()
}
// like BenchmarkCursoreScanRDONLY, but cursor.GetVal() is called instead.
func BenchmarkCursorScanValRDONLY(b *testing.B) {
initRandSource(b)
env, path := setupBenchDB(b)
defer teardownBenchDB(b, env, path)
dbi := openBenchDBI(b, env)
var ps [][]byte
rc := newRandSourceCursor()
txn, err := env.BeginTxn(nil, 0)
bMust(b, err, "starting transaction")
for i := 0; i < benchDBNumKeys; i++ {
k := makeBenchDBKey(&rc)
v := makeBenchDBVal(&rc)
err := txn.Put(dbi, k, v, 0)
ps = append(ps, k, v)
bTxnMust(b, txn, err, "putting data")
}
err = txn.Commit()
bMust(b, err, "commiting transaction")
txn, err = env.BeginTxn(nil, RDONLY)
bMust(b, err, "starting transaction")
defer txn.Abort()
b.ResetTimer()
for i := 0; i < b.N; i++ {
func() {
cur, err := txn.CursorOpen(dbi)
bMust(b, err, "opening cursor")
defer cur.Close()
var count int64
for {
_, _, err := cur.GetVal(nil, nil, NEXT)
if err == NotFound {
return
}
if err != nil {
b.Fatalf("error getting data: %v", err)
}
count++
}
if count != benchDBNumKeys {
b.Fatalf("unexpected number of keys: %d", count)
}
}()
}
b.StopTimer()
}
func setupBenchDB(b *testing.B) (*Env, string) {
env, err := NewEnv()
bMust(b, err, "creating env")
err = env.SetMaxDBs(26)
bMust(b, err, "setting max dbs")
err = env.SetMapSize(1 << 30) // 1GB
bMust(b, err, "sizing env")
path, err := ioutil.TempDir("", "mdb_test-bench-")
bMust(b, err, "creating temp directory")
err = env.Open(path, 0, 0644)
if err != nil {
teardownBenchDB(b, env, path)
}
bMust(b, err, "opening database")
return env, path
}
func openBenchDBI(b *testing.B, env *Env) DBI {
txn, err := env.BeginTxn(nil, 0)
bMust(b, err, "starting transaction")
name := "benchmark"
dbi, err := txn.DBIOpen(&name, CREATE)
if err != nil {
txn.Abort()
b.Fatalf("error opening dbi: %v", err)
}
err = txn.Commit()
bMust(b, err, "commiting transaction")
return dbi
}
func teardownBenchDB(b *testing.B, env *Env, path string) {
env.Close()
os.RemoveAll(path)
}
func randBytes(n int) []byte {
p := make([]byte, n)
crand.Read(p)
return p
}
func bMust(b *testing.B, err error, action string) {
if err != nil {
b.Fatalf("error %s: %v", action, err)
}
}
func bTxnMust(b *testing.B, txn *Txn, err error, action string) {
if err != nil {
txn.Abort()
b.Fatalf("error %s: %v", action, err)
}
}
const randSourceSize = 500 << 20 // size of the 'entropy pool' for random byte generation.
const benchDBNumKeys = 100000 // number of keys to store in benchmark databases
const benchDBMaxKeyLen = 30 // maximum length for database keys (size is limited by MDB)
const benchDBMaxValLen = 2000 // maximum lengh for database values
func makeBenchDBKey(c *randSourceCursor) []byte {
return c.NBytes(rand.Intn(benchDBMaxKeyLen) + 1)
}
func makeBenchDBVal(c *randSourceCursor) []byte {
return c.NBytes(rand.Intn(benchDBMaxValLen) + 1)
}
// holds a bunch of random bytes so repeated generation af 'random' slices is
// cheap. acts as a ring which can be read from (although doesn't implement io.Reader).
var randSource [randSourceSize]byte
func initRandSource(b *testing.B) {
if randSource[0] == 0 && randSource[1] == 0 && randSource[2] == 0 && randSource[3] == 0 {
b.Logf("initializing random source data")
n, err := crand.Read(randSource[:])
bMust(b, err, "initializing random source")
if n < len(randSource) {
b.Fatalf("unable to read enough random source data %d", n)
}
}
}
// acts as a simple byte slice generator.
type randSourceCursor int
func newRandSourceCursor() randSourceCursor {
i := rand.Intn(randSourceSize)
return randSourceCursor(i)
}
func (c *randSourceCursor) NBytes(n int) []byte {
i := int(*c)
if n >= randSourceSize {
panic("rand size too big")
}
*c = (*c + randSourceCursor(n)) % randSourceSize
_n := i + n - randSourceSize
if _n > 0 {
p := make([]byte, n)
m := copy(p, randSource[i:])
copy(p[m:], randSource[:])
return p
}
return randSource[i : i+n]
}
|
package example
import "fmt"
func (e *Element) print() {
if e == nil {
return
}
fmt.Println("Id:\t\t", e.Id)
fmt.Println("Name:\t\t", e.Name)
fmt.Println("Age:\t\t", e.Age)
fmt.Println("Statue:\t\t", e.Status)
fmt.Println("CreateAt:\t", e.CreatedAt)
fmt.Println("UpdateAt:\t", e.CreatedAt)
}
func (es *Elements) print() {
if es == nil {
return
}
for i := range es.Elements {
es.Elements[i].print()
fmt.Println("-------")
}
}
|
package pkg
import (
"testing"
"time"
"github.com/kyma-incubator/milv/cli"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewFileConfig(t *testing.T) {
trueBool := true
falseBool := false
t.Run("Check if links to ignore are merged", func(t *testing.T) {
//GIVEN
commands := cli.Commands{
ConfigFile: "test-markdowns/milv-test.config.yaml",
BasePath: "test-markdowns",
}
expected := &FileConfig{
ExternalLinksToIgnore: []string{"localhost", "abc.com", "github.com"},
InternalLinksToIgnore: []string{"LICENSE", "#contributing"},
}
config, err := NewConfig(commands)
require.NoError(t, err)
//WHEN
result := NewFileConfig("./src/foo.md", config)
//THEN
require.NoError(t, err)
assert.ElementsMatch(t, expected.ExternalLinksToIgnore, result.ExternalLinksToIgnore)
assert.ElementsMatch(t, expected.InternalLinksToIgnore, result.InternalLinksToIgnore)
})
t.Run("Check different scenario for ignoring internal links paths", func(t *testing.T) {
tcs := []struct {
Name string
FilePath string
ShouldBeIgnored bool
FilesToIgnoreInternalLinksIn []string
}{
{
Name: "File and Ignore has relative path",
FilePath: "ignore-me-internally/my-markdown.md",
ShouldBeIgnored: true,
FilesToIgnoreInternalLinksIn: []string{"ignore-me-internally"},
}, {
Name: "File has relative path with ./ path and Ignore has relative path",
FilePath: "./ignore-me-internally/my-markdown.md",
ShouldBeIgnored: true,
FilesToIgnoreInternalLinksIn: []string{"ignore-me-internally"},
}, {
Name: "File and Ignore has relative path with ./",
FilePath: "./ignore-me-internally/my-markdown.md",
ShouldBeIgnored: true,
FilesToIgnoreInternalLinksIn: []string{"./ignore-me-internally"},
}, {
Name: "File has relative path path and Ignore has relative path with ./",
FilePath: "ignore-me-internally/my-markdown.md",
ShouldBeIgnored: true,
FilesToIgnoreInternalLinksIn: []string{"./ignore-me-internally"},
}, {
Name: "File should not be ignored when contains ignored substring in path",
FilePath: "not-ignore-me/my-markdown.md",
ShouldBeIgnored: false,
FilesToIgnoreInternalLinksIn: []string{"ignore"},
},
{
Name: "File should be ignored",
FilePath: "./ignore-me-internally/not-ignore-me/my-markdown.md",
ShouldBeIgnored: true,
FilesToIgnoreInternalLinksIn: []string{"./ignore-me-internally"},
}}
for _, tc := range tcs {
t.Run(tc.Name, func(t *testing.T) {
//GIVEN
cfg := &Config{
FilesToIgnoreInternalLinksIn: tc.FilesToIgnoreInternalLinksIn,
}
//WHEN
fileCfg := NewFileConfig(tc.FilePath, cfg)
//THEN
require.NotNil(t, fileCfg)
require.NotNil(t, fileCfg.IgnoreInternal)
require.Equal(t, tc.ShouldBeIgnored, *fileCfg.IgnoreInternal)
})
}
})
t.Run("Config without file Configs", func(t *testing.T) {
//GIVEN
timeout := 5
requestRepeats := 6
cfg := &Config{
BasePath: "path",
RequestRepeats: requestRepeats,
Timeout: timeout,
AllowRedirect: true,
AllowCodeBlocks: true,
IgnoreExternal: false,
IgnoreInternal: true,
Backoff: 5 * time.Hour,
}
expectedCfg := FileConfig{
BasePath: "path",
Timeout: &timeout,
RequestRepeats: &requestRepeats,
AllowRedirect: &trueBool,
AllowCodeBlocks: &trueBool,
IgnoreExternal: &falseBool,
IgnoreInternal: &trueBool,
Backoff: 5 * time.Hour,
}
//WHEN
newConfig := NewFileConfig("any-path", cfg)
//THEN
require.NotNil(t, newConfig)
assert.Equal(t, expectedCfg, newConfig)
})
t.Run("Config with matching File Configs", func(t *testing.T) {
//GIVEN
timeout := 5
requestRepeats := 6
filePath := "path"
files := []File{
{RelPath: "some-random/documentation.md"},
{
RelPath: filePath,
Config: &FileConfig{
Timeout: &timeout,
RequestRepeats: &requestRepeats,
AllowRedirect: &trueBool,
AllowCodeBlocks: &trueBool,
IgnoreExternal: &trueBool,
IgnoreInternal: &falseBool,
Backoff: 10 * time.Second,
}},
}
cfg := &Config{
Files: files,
Timeout: timeout,
RequestRepeats: requestRepeats,
AllowRedirect: false,
AllowCodeBlocks: false,
IgnoreExternal: false,
IgnoreInternal: true,
Backoff: 1 * time.Second,
}
expectedCfg := FileConfig{
ExternalLinksToIgnore: []string{},
InternalLinksToIgnore: []string{},
Timeout: &timeout,
RequestRepeats: &requestRepeats,
AllowRedirect: &trueBool,
AllowCodeBlocks: &trueBool,
IgnoreExternal: &trueBool,
IgnoreInternal: &falseBool,
Backoff: 10 * time.Second,
}
//WHEN
mergedFileConfig := NewFileConfig(filePath, cfg)
//THEN
require.NotNil(t, mergedFileConfig)
assert.Equal(t, expectedCfg, mergedFileConfig)
})
t.Run("Test with several ignore paths", func(t *testing.T) {
//GIVEN
firstPathToIgnore := "./website/src"
secondPathToIgnore := "./website/content"
filePath := "./website/content/news.md"
cfg := Config{FilesToIgnoreInternalLinksIn: []string{firstPathToIgnore, secondPathToIgnore}}
//WHEN
output := NewFileConfig(filePath, &cfg)
//THEN
require.NotNil(t, output)
require.NotNil(t, output.IgnoreInternal)
require.True(t, *output.IgnoreInternal)
})
}
|
package v1
import (
"context"
"github.com/asecurityteam/nexpose-vuln-hydrator/pkg/domain"
)
// DependencyCheckHandler takes in a domain.DependencyChecker, which
// contains procedures that checks external dependencies
type DependencyCheckHandler struct {
DependencyChecker domain.DependencyChecker
}
// Handle calls dependency checker
func (h *DependencyCheckHandler) Handle(ctx context.Context) error {
return h.DependencyChecker.CheckDependencies(ctx)
}
|
package gatekeeper
import (
"io"
"io/ioutil"
"net/http"
)
type ResponseType uint
const (
OkResponse ResponseType = iota + 1
RedirectResponse
UserErrorResponse
InternalErrorResponse
)
var responseTypeMapping = map[ResponseType]string{
OkResponse: "2xx: ok",
RedirectResponse: "3xx: redirect",
UserErrorResponse: "4xx: user error",
InternalErrorResponse: "5xx: internal error",
}
func NewResponseType(statusCode int) ResponseType {
mapping := map[int]ResponseType{
2: OkResponse,
3: RedirectResponse,
4: UserErrorResponse,
5: InternalErrorResponse,
}
responseType, found := mapping[statusCode/100]
if !found {
ProgrammingError("invalid status code for response type")
return InternalErrorResponse
}
return responseType
}
// Response is a rpc compatible representation of an http.Response type which by default, _does_ not pass the _actual_ body of a request over RPC
type Response struct {
Status string
StatusCode int
Proto string
ProtoMajor int
ProtoMinor int
Header http.Header
ContentLength int64
TransferEncoding []string
Close bool
Trailer http.Header
// if an error has occurred, its attached to the response and used as
// the body. This is used to add additional context to a response in
// the case that an error occurred.
Error *Error
// if a plugin would like to overwrite the body being returned, then a
// reader can be passed along to read data back to the responseWriter
// instead of using the default. If nil, then we don't copy over. NOTE
// if override body is used, its recommended to use the SetBody(reader)
// method which will accept a reader and write it into a buffer that is
// compatible over the wire.
Body []byte
}
func NewResponse(resp *http.Response) *Response {
return &Response{
Status: resp.Status,
StatusCode: resp.StatusCode,
Proto: resp.Proto,
ProtoMajor: resp.ProtoMajor,
ProtoMinor: resp.ProtoMinor,
Header: resp.Header,
ContentLength: resp.ContentLength,
TransferEncoding: resp.TransferEncoding,
Close: resp.Close,
Trailer: resp.Trailer,
Body: nil,
Error: nil,
}
}
func NewErrorResponse(statusCode int, err error) *Response {
return &Response{
StatusCode: statusCode,
Error: NewError(err),
Body: []byte(err.Error()),
ContentLength: int64(len(err.Error()) - 1),
}
}
func (r *Response) SetCode(code int) {
r.StatusCode = code
r.Status = http.StatusText(code)
}
func (r *Response) SetBody(reader io.Reader) error {
bytes, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
r.Body = bytes
r.ContentLength = int64(len(bytes)) - 1
return nil
}
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package handle
import (
"context"
"encoding/json"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/statistics/handle/cache"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/mathutil"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/tikv/client-go/v2/oracle"
"go.uber.org/zap"
)
// GCStats will garbage collect the useless stats info. For dropped tables, we will first update their version so that
// other tidb could know that table is deleted.
func (h *Handle) GCStats(is infoschema.InfoSchema, ddlLease time.Duration) error {
ctx := context.Background()
// To make sure that all the deleted tables' schema and stats info have been acknowledged to all tidb,
// we only garbage collect version before 10 lease.
lease := mathutil.Max(h.Lease(), ddlLease)
offset := DurationToTS(10 * lease)
now := oracle.GoTimeToTS(time.Now())
if now < offset {
return nil
}
gcVer := now - offset
rows, _, err := h.execRestrictedSQL(ctx, "select table_id from mysql.stats_meta where version < %?", gcVer)
if err != nil {
return errors.Trace(err)
}
for _, row := range rows {
if err := h.gcTableStats(is, row.GetInt64(0)); err != nil {
return errors.Trace(err)
}
_, existed := is.TableByID(row.GetInt64(0))
if !existed {
if err := h.gcHistoryStatsFromKV(row.GetInt64(0)); err != nil {
return errors.Trace(err)
}
}
}
if err := h.ClearOutdatedHistoryStats(); err != nil {
logutil.BgLogger().Warn("failed to gc outdated historical stats",
zap.Duration("duration", variable.HistoricalStatsDuration.Load()),
zap.Error(err))
}
return h.removeDeletedExtendedStats(gcVer)
}
func (h *Handle) gcTableStats(is infoschema.InfoSchema, physicalID int64) error {
ctx := context.Background()
rows, _, err := h.execRestrictedSQL(ctx, "select is_index, hist_id from mysql.stats_histograms where table_id = %?", physicalID)
if err != nil {
return errors.Trace(err)
}
// The table has already been deleted in stats and acknowledged to all tidb,
// we can safely remove the meta info now.
if len(rows) == 0 {
_, _, err = h.execRestrictedSQL(ctx, "delete from mysql.stats_meta where table_id = %?", physicalID)
if err != nil {
return errors.Trace(err)
}
cache.TableRowStatsCache.Invalidate(physicalID)
}
tbl, ok := h.getTableByPhysicalID(is, physicalID)
if !ok {
logutil.BgLogger().Info("remove stats in GC due to dropped table", zap.Int64("table_id", physicalID))
return errors.Trace(h.DeleteTableStatsFromKV([]int64{physicalID}))
}
tblInfo := tbl.Meta()
for _, row := range rows {
isIndex, histID := row.GetInt64(0), row.GetInt64(1)
find := false
if isIndex == 1 {
for _, idx := range tblInfo.Indices {
if idx.ID == histID {
find = true
break
}
}
} else {
for _, col := range tblInfo.Columns {
if col.ID == histID {
find = true
break
}
}
}
if !find {
if err := h.deleteHistStatsFromKV(physicalID, histID, int(isIndex)); err != nil {
return errors.Trace(err)
}
}
}
// Mark records in mysql.stats_extended as `deleted`.
rows, _, err = h.execRestrictedSQL(ctx, "select name, column_ids from mysql.stats_extended where table_id = %? and status in (%?, %?)", physicalID, statistics.ExtendedStatsAnalyzed, statistics.ExtendedStatsInited)
if err != nil {
return errors.Trace(err)
}
if len(rows) == 0 {
return nil
}
for _, row := range rows {
statsName, strColIDs := row.GetString(0), row.GetString(1)
var colIDs []int64
err = json.Unmarshal([]byte(strColIDs), &colIDs)
if err != nil {
logutil.BgLogger().Debug("decode column IDs failed", zap.String("column_ids", strColIDs), zap.Error(err))
return errors.Trace(err)
}
for _, colID := range colIDs {
found := false
for _, col := range tblInfo.Columns {
if colID == col.ID {
found = true
break
}
}
if !found {
logutil.BgLogger().Info("mark mysql.stats_extended record as 'deleted' in GC due to dropped columns", zap.String("table_name", tblInfo.Name.L), zap.Int64("table_id", physicalID), zap.String("stats_name", statsName), zap.Int64("dropped_column_id", colID))
err = h.MarkExtendedStatsDeleted(statsName, physicalID, true)
if err != nil {
logutil.BgLogger().Debug("update stats_extended status failed", zap.String("stats_name", statsName), zap.Error(err))
return errors.Trace(err)
}
break
}
}
}
return nil
}
// ClearOutdatedHistoryStats clear outdated historical stats
func (h *Handle) ClearOutdatedHistoryStats() error {
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
h.mu.Lock()
defer h.mu.Unlock()
exec := h.mu.ctx.(sqlexec.SQLExecutor)
sql := "select count(*) from mysql.stats_meta_history use index (idx_create_time) where create_time <= NOW() - INTERVAL %? SECOND"
rs, err := exec.ExecuteInternal(ctx, sql, variable.HistoricalStatsDuration.Load().Seconds())
if err != nil {
return err
}
if rs == nil {
return nil
}
var rows []chunk.Row
defer terror.Call(rs.Close)
if rows, err = sqlexec.DrainRecordSet(ctx, rs, 8); err != nil {
return errors.Trace(err)
}
count := rows[0].GetInt64(0)
if count > 0 {
sql = "delete from mysql.stats_meta_history use index (idx_create_time) where create_time <= NOW() - INTERVAL %? SECOND"
_, err = exec.ExecuteInternal(ctx, sql, variable.HistoricalStatsDuration.Load().Seconds())
if err != nil {
return err
}
sql = "delete from mysql.stats_history use index (idx_create_time) where create_time <= NOW() - INTERVAL %? SECOND"
_, err = exec.ExecuteInternal(ctx, sql, variable.HistoricalStatsDuration.Load().Seconds())
logutil.BgLogger().Info("clear outdated historical stats")
return err
}
return nil
}
func (h *Handle) gcHistoryStatsFromKV(physicalID int64) error {
h.mu.Lock()
defer h.mu.Unlock()
exec := h.mu.ctx.(sqlexec.SQLExecutor)
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
_, err := exec.ExecuteInternal(ctx, "begin pessimistic")
if err != nil {
return errors.Trace(err)
}
defer func() {
err = finishTransaction(ctx, exec, err)
}()
sql := "delete from mysql.stats_history where table_id = %?"
_, err = exec.ExecuteInternal(ctx, sql, physicalID)
if err != nil {
return errors.Trace(err)
}
sql = "delete from mysql.stats_meta_history where table_id = %?"
_, err = exec.ExecuteInternal(ctx, sql, physicalID)
return err
}
// deleteHistStatsFromKV deletes all records about a column or an index and updates version.
func (h *Handle) deleteHistStatsFromKV(physicalID int64, histID int64, isIndex int) (err error) {
h.mu.Lock()
defer h.mu.Unlock()
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
exec := h.mu.ctx.(sqlexec.SQLExecutor)
_, err = exec.ExecuteInternal(ctx, "begin")
if err != nil {
return errors.Trace(err)
}
defer func() {
err = finishTransaction(ctx, exec, err)
}()
txn, err := h.mu.ctx.Txn(true)
if err != nil {
return errors.Trace(err)
}
startTS := txn.StartTS()
// First of all, we update the version. If this table doesn't exist, it won't have any problem. Because we cannot delete anything.
if _, err = exec.ExecuteInternal(ctx, "update mysql.stats_meta set version = %? where table_id = %? ", startTS, physicalID); err != nil {
return err
}
// delete histogram meta
if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_histograms where table_id = %? and hist_id = %? and is_index = %?", physicalID, histID, isIndex); err != nil {
return err
}
// delete top n data
if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_top_n where table_id = %? and hist_id = %? and is_index = %?", physicalID, histID, isIndex); err != nil {
return err
}
// delete all buckets
if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_buckets where table_id = %? and hist_id = %? and is_index = %?", physicalID, histID, isIndex); err != nil {
return err
}
// delete all fm sketch
if _, err := exec.ExecuteInternal(ctx, "delete from mysql.stats_fm_sketch where table_id = %? and hist_id = %? and is_index = %?", physicalID, histID, isIndex); err != nil {
return err
}
if isIndex == 0 {
// delete the record in mysql.column_stats_usage
if _, err = exec.ExecuteInternal(ctx, "delete from mysql.column_stats_usage where table_id = %? and column_id = %?", physicalID, histID); err != nil {
return err
}
}
return nil
}
// DeleteTableStatsFromKV deletes table statistics from kv.
// A statsID refers to statistic of a table or a partition.
func (h *Handle) DeleteTableStatsFromKV(statsIDs []int64) (err error) {
h.mu.Lock()
defer h.mu.Unlock()
exec := h.mu.ctx.(sqlexec.SQLExecutor)
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
_, err = exec.ExecuteInternal(ctx, "begin")
if err != nil {
return errors.Trace(err)
}
defer func() {
err = finishTransaction(ctx, exec, err)
}()
txn, err := h.mu.ctx.Txn(true)
if err != nil {
return errors.Trace(err)
}
startTS := txn.StartTS()
for _, statsID := range statsIDs {
// We only update the version so that other tidb will know that this table is deleted.
if _, err = exec.ExecuteInternal(ctx, "update mysql.stats_meta set version = %? where table_id = %? ", startTS, statsID); err != nil {
return err
}
if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_histograms where table_id = %?", statsID); err != nil {
return err
}
if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_buckets where table_id = %?", statsID); err != nil {
return err
}
if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_top_n where table_id = %?", statsID); err != nil {
return err
}
if _, err = exec.ExecuteInternal(ctx, "update mysql.stats_extended set version = %?, status = %? where table_id = %? and status in (%?, %?)", startTS, statistics.ExtendedStatsDeleted, statsID, statistics.ExtendedStatsAnalyzed, statistics.ExtendedStatsInited); err != nil {
return err
}
if _, err = exec.ExecuteInternal(ctx, "delete from mysql.stats_fm_sketch where table_id = %?", statsID); err != nil {
return err
}
if _, err = exec.ExecuteInternal(ctx, "delete from mysql.column_stats_usage where table_id = %?", statsID); err != nil {
return err
}
if _, err = exec.ExecuteInternal(ctx, "delete from mysql.analyze_options where table_id = %?", statsID); err != nil {
return err
}
}
return nil
}
func (h *Handle) removeDeletedExtendedStats(version uint64) (err error) {
h.mu.Lock()
defer h.mu.Unlock()
exec := h.mu.ctx.(sqlexec.SQLExecutor)
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats)
_, err = exec.ExecuteInternal(ctx, "begin pessimistic")
if err != nil {
return errors.Trace(err)
}
defer func() {
err = finishTransaction(ctx, exec, err)
}()
const sql = "delete from mysql.stats_extended where status = %? and version < %?"
_, err = exec.ExecuteInternal(ctx, sql, statistics.ExtendedStatsDeleted, version)
return
}
|
package main
import (
"fmt"
"sort"
)
type IntSlice []int64
func (c IntSlice) Len() int {
return len(c)
}
func (c IntSlice) Swap(i, j int) {
c[i], c[j] = c[j], c[i]
}
func (c IntSlice) Less(i, j int) bool {
return c[i] < c[j]
}
func main() {
var n int
var l int64
var a IntSlice
fmt.Scanf("%d %d\n",&n,&l)
a = make([]int64,n)
for i := 0; i < n; i++ {
fmt.Scanf("%d",&a[i])
}
sort.Sort(a)
d := float64(a[0])
last := 0
next := 1
for next < n {
if float64(a[next] - a[last]) > 2 * d {
d = float64(a[next] - a[last]) / 2
}
next++
last++
}
if float64(l - a[n-1]) > d {
d = float64(l - a[n-1])
}
fmt.Printf("%f",d)
}
|
package testfixtures
const TomlConfigPath string = "testfixtures/testconfig.toml"
const TestAppPath string = "testfixtures/postapp"
var TestAppConfigPath = map[string]string{
"app": "config.toml",
"route": "routes.toml",
"middleware": "middlewares.toml",
}
|
package main
import (
"bufio"
"container/list"
"fmt"
"os"
)
type LRU struct {
Len int
Map map[int]*list.Element //哈希
link *list.List //双向队列,自带的
}
type Elem struct {
key int
value interface{}
}
func NewLRU(Len int) *LRU {
//实例化
return &LRU{
Len: Len,
Map: map[int]*list.Element{},
link: list.New(),
}
}
func (l *LRU) Get(key int) (value interface{}, ok bool) {
if e, ok := l.Map[key]; ok {
//元素e插入到链表的头结点
l.link.MoveToFront(e)
value = e.Value.(*Elem).value
}
return
}
func (l *LRU) Set(key int, value interface{}) {
if e, ok := l.Map[key]; ok {
//移动到头结点并更新值
l.link.MoveToFront(e)
e.Value.(*Elem).value = value
return
}
v := l.link.PushFront(&Elem{key, value})
l.Map[key] = v
if l.link.Len() > l.Len {
//删除最后一个结点
l.DeleteLast()
}
}
func (l *LRU) DeleteLast() {
e := l.link.Back()
if e != nil {
l.removeElem(e)
}
}
func (l *LRU) delete(key int) {
if e, ok := l.Map[key]; ok {
l.removeElem(e)
}
}
func (l *LRU) removeElem(e *list.Element) {
l.link.Remove(e)
v := e.Value.(*Elem)
delete(l.Map, v.key)
}
func (l *LRU) print() {
tmp := l.link.Front()
for tmp != nil {
fmt.Printf("%v -> ", tmp.Value.(*Elem).value)
tmp = tmp.Next()
}
fmt.Println()
}
var index int
func enter(l *LRU, v interface{}) {
index++
l.Set(index, v)
l.print()
}
func main() {
l := NewLRU(5)
fmt.Println("please enter a value")
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
t := scanner.Text()
enter(l, t)
}
}
|
package mock
//RepositoryMock ...
type RepositoryMock struct{}
const ConnectMock = 66
//ConnectName ...
func (r RepositoryMock) ConnectName() int { return ConnectMock }
|
package failuredetector
import (
"time"
)
// EvtFailureDetector represents a Eventually Perfect Failure Detector as
// described at page 53 in:
// Christian Cachin, Rachid Guerraoui, and Luís Rodrigues: "Introduction to
// Reliable and Secure Distributed Programming" Springer, 2nd edition, 2011.
type EvtFailureDetector struct {
id int // the id of this node
nodeIDs []int // node ids for every node in cluster
alive map[int]bool // map of node ids considered alive
suspected map[int]bool // map of node ids considered suspected
sr SuspectRestorer // Provided SuspectRestorer implementation
delay time.Duration // the current delay for the timeout procedure
delta time.Duration // the delta value to be used when increasing delay
timeoutSignal *time.Ticker // the timeout procedure ticker
hbSend chan<- Heartbeat // channel for sending outgoing heartbeat messages
hbIn chan Heartbeat // channel for receiving incoming heartbeat messages
stop chan struct{} // channel for signaling a stop request to the main run loop
testingHook func() // DO NOT REMOVE THIS LINE. A no-op when not testing.
}
// NewEvtFailureDetector returns a new Eventual Failure Detector. It takes the
// following arguments:
//
// id: The id of the node running this instance of the failure detector.
//
// nodeIDs: A list of ids for every node in the cluster (including the node
// running this instance of the failure detector).
//
// ld: A leader detector implementing the SuspectRestorer interface.
//
// delta: The initial value for the timeout interval. Also the value to be used
// when increasing delay.
//
// hbSend: A send only channel used to send heartbeats to other nodes.
func NewEvtFailureDetector(id int, nodeIDs []int, sr SuspectRestorer, delta time.Duration, hbSend chan<- Heartbeat) *EvtFailureDetector {
suspected := make(map[int]bool)
alive := make(map[int]bool)
// marking all the node as alive
for _, x := range nodeIDs {
alive[x] = true
// suspected[id] = false
}
return &EvtFailureDetector{
id: id,
nodeIDs: nodeIDs,
alive: alive,
suspected: suspected,
sr: sr,
delay: delta,
delta: delta,
hbSend: hbSend,
hbIn: make(chan Heartbeat, 8),
stop: make(chan struct{}),
testingHook: func() {}, // DO NOT REMOVE THIS LINE. A no-op when not testing.
}
}
// Start starts e's main run loop as a separate goroutine. The main run loop
// handles incoming heartbeat requests and responses. The loop also trigger e's
// timeout procedure at an interval corresponding to e's internal delay
// duration variable.
func (e *EvtFailureDetector) Start() {
e.timeoutSignal = time.NewTicker(e.delay)
go func(timeout <-chan time.Time) {
for {
e.testingHook() // DO NOT REMOVE THIS LINE. A no-op when not testing.
select {
case z := <-e.hbIn:
// delivering hearbeat reply
if z.Request {
// if e.hbresponse == true {
e.hbSend <- Heartbeat{
From: e.id,
To: z.From,
Request: false,
}
// } else if e.hbresponse == false {
} else if !z.Request {
e.alive[z.From] = true
}
case <-timeout:
e.timeout()
case <-e.stop:
return
}
}
}(e.timeoutSignal.C)
}
// DeliverHeartbeat delivers heartbeat hb to failure detector e.
func (e *EvtFailureDetector) DeliverHeartbeat(hb Heartbeat) {
e.hbIn <- hb
}
// Stop stops e's main run loop.
func (e *EvtFailureDetector) Stop() {
e.stop <- struct{}{}
}
func (e *EvtFailureDetector) timeout() {
// condition to check and increase delay
// if any suspected value is found then the function will increase the delay
for i := range e.alive {
if e.alive[i] && e.suspected[i] {
e.delay = e.delay + e.delta
}
}
for _, nodeId := range e.nodeIDs {
// for the first time of a starting node this will mark other node
// (except the node which we have just started) as suspected. it means
// that other nodes are not online
if !e.alive[nodeId] && !e.suspected[nodeId] {
e.suspected[nodeId] = true
e.sr.Suspect(nodeId)
// fmt.Println("empty node: ", nodeId)
// adding new node trigger this event. when a new node is added
// this event restore the node and making suspected as false
} else if e.alive[nodeId] && e.suspected[nodeId] {
e.suspected[nodeId] = false
e.sr.Restore(nodeId)
// fmt.Println("added node: ", nodeId)
}
e.hbSend <- Heartbeat{
To: nodeId,
From: e.id,
Request: true,
}
}
// function for passing the wantPostSuspected which will empty the
// map of e.suspected
emptySuspect := func() {
for x := range e.suspected {
delete(e.suspected, x)
}
}
// condition to check the post suspect
// if e.alive equals to the first e.nodeIDs then there is no post suspect
// if e.alive is less then the e.nodeIDs then this will check which value
// is missing from e.nodeIDs and mark as suspected(e.suspected[x] = true)
if len(e.alive) == len(e.nodeIDs) {
emptySuspect()
} else if len(e.alive) < len(e.nodeIDs) {
for x := range e.nodeIDs {
// value, ok := e.alive[x]
ok := e.alive[x]
if ok {
_ = 1
} else {
e.suspected[x] = true
}
}
}
e.alive = map[int]bool{}
}
|
package generate
const CONTROLLER_TEMPLATE = `package no.fint.consumer.models.{{ modelPkg .Package }}{{ ToLower .Name }};
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.google.common.collect.ImmutableMap;
import io.swagger.annotations.Api;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import no.fint.audit.FintAuditService;
import no.fint.cache.exceptions.*;
import no.fint.consumer.config.Constants;
import no.fint.consumer.config.ConsumerProps;
import no.fint.consumer.event.ConsumerEventUtil;
import no.fint.consumer.event.SynchronousEvents;
import no.fint.consumer.exceptions.*;
import no.fint.consumer.status.StatusCache;
import no.fint.consumer.utils.EventResponses;
import no.fint.consumer.utils.RestEndpoints;
import no.fint.event.model.*;
import no.fint.relations.FintRelationsMediaType;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.HttpStatus;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.util.UriComponentsBuilder;
import javax.servlet.http.HttpServletRequest;
import java.net.UnknownHostException;
import java.net.URI;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;
import {{ resourcePkg .Package }}.{{ .Name }}Resource;
import {{ resourcePkg .Package }}.{{ .Name }}Resources;
import {{ GetActionPackage .Package }};
@Slf4j
@Api(tags = {"{{ .Name }}"})
@CrossOrigin
@RestController
@RequestMapping(name = "{{ .Name }}", value = RestEndpoints.{{ ToUpper .Name }}, produces = {FintRelationsMediaType.APPLICATION_HAL_JSON_VALUE, MediaType.APPLICATION_JSON_UTF8_VALUE})
public class {{ .Name }}Controller {
@Autowired(required = false)
private {{ .Name }}CacheService cacheService;
@Autowired
private FintAuditService fintAuditService;
@Autowired
private {{ .Name }}Linker linker;
@Autowired
private ConsumerProps props;
@Autowired
private StatusCache statusCache;
@Autowired
private ConsumerEventUtil consumerEventUtil;
@Autowired
private ObjectMapper objectMapper;
@Autowired
private SynchronousEvents synchronousEvents;
@GetMapping("/last-updated")
public Map<String, String> getLastUpdated(@RequestHeader(name = HeaderConstants.ORG_ID, required = false) String orgId) {
if (cacheService == null) {
throw new CacheDisabledException("{{ .Name }} cache is disabled.");
}
if (props.isOverrideOrgId() || orgId == null) {
orgId = props.getDefaultOrgId();
}
String lastUpdated = Long.toString(cacheService.getLastUpdated(orgId));
return ImmutableMap.of("lastUpdated", lastUpdated);
}
@GetMapping("/cache/size")
public ImmutableMap<String, Integer> getCacheSize(@RequestHeader(name = HeaderConstants.ORG_ID, required = false) String orgId) {
if (cacheService == null) {
throw new CacheDisabledException("{{ .Name }} cache is disabled.");
}
if (props.isOverrideOrgId() || orgId == null) {
orgId = props.getDefaultOrgId();
}
return ImmutableMap.of("size", cacheService.getCacheSize(orgId));
}
@GetMapping
public {{ .Name }}Resources get{{ .Name }}(
@RequestHeader(name = HeaderConstants.ORG_ID, required = false) String orgId,
@RequestHeader(name = HeaderConstants.CLIENT, required = false) String client,
@RequestParam(defaultValue = "0") long sinceTimeStamp,
@RequestParam(defaultValue = "0") int size,
@RequestParam(defaultValue = "0") int offset,
HttpServletRequest request) {
if (cacheService == null) {
throw new CacheDisabledException("{{ .Name }} cache is disabled.");
}
if (props.isOverrideOrgId() || orgId == null) {
orgId = props.getDefaultOrgId();
}
if (client == null) {
client = props.getDefaultClient();
}
log.debug("OrgId: {}, Client: {}", orgId, client);
Event event = new Event(orgId, Constants.COMPONENT, {{ GetAction .Package }}.GET_ALL_{{ ToUpper .Name }}, client);
event.setOperation(Operation.READ);
if (StringUtils.isNotBlank(request.getQueryString())) {
event.setQuery("?" + request.getQueryString());
}
fintAuditService.audit(event);
fintAuditService.audit(event, Status.CACHE);
Stream<{{ .Name }}Resource> resources;
if (size > 0 && offset >= 0 && sinceTimeStamp > 0) {
resources = cacheService.streamSliceSince(orgId, sinceTimeStamp, offset, size);
} else if (size > 0 && offset >= 0) {
resources = cacheService.streamSlice(orgId, offset, size);
} else if (sinceTimeStamp > 0) {
resources = cacheService.streamSince(orgId, sinceTimeStamp);
} else {
resources = cacheService.streamAll(orgId);
}
fintAuditService.audit(event, Status.CACHE_RESPONSE, Status.SENT_TO_CLIENT);
return linker.toResources(resources, offset, size, cacheService.getCacheSize(orgId));
}
{{ range $i, $ident := .Identifiers }}
@GetMapping("/{{ ToLower $ident.Name }}/{id:.+}")
public {{$.Name}}Resource get{{ $.Name }}By{{ ToTitle $ident.Name }}(
@PathVariable String id,
@RequestHeader(name = HeaderConstants.ORG_ID, required = false) String orgId,
@RequestHeader(name = HeaderConstants.CLIENT, required = false) String client) throws InterruptedException {
if (props.isOverrideOrgId() || orgId == null) {
orgId = props.getDefaultOrgId();
}
if (client == null) {
client = props.getDefaultClient();
}
log.debug("{{ $ident.Name }}: {}, OrgId: {}, Client: {}", id, orgId, client);
Event event = new Event(orgId, Constants.COMPONENT, {{ GetAction $.Package }}.GET_{{ ToUpper $.Name }}, client);
event.setOperation(Operation.READ);
event.setQuery("{{ $ident.Name }}/" + id);
if (cacheService != null) {
fintAuditService.audit(event);
fintAuditService.audit(event, Status.CACHE);
Optional<{{ $.Name }}Resource> {{ ToLower $.Name }} = cacheService.get{{ $.Name }}By{{ ToTitle $ident.Name }}(orgId, id);
fintAuditService.audit(event, Status.CACHE_RESPONSE, Status.SENT_TO_CLIENT);
return {{ ToLower $.Name }}.map(linker::toResource).orElseThrow(() -> new EntityNotFoundException(id));
} else {
BlockingQueue<Event> queue = synchronousEvents.register(event);
consumerEventUtil.send(event);
Event response = EventResponses.handle(queue.poll(5, TimeUnit.MINUTES));
if (response.getData() == null ||
response.getData().isEmpty()) throw new EntityNotFoundException(id);
{{ $.Name }}Resource {{ ToLower $.Name }} = objectMapper.convertValue(response.getData().get(0), {{ $.Name }}Resource.class);
fintAuditService.audit(response, Status.SENT_TO_CLIENT);
return linker.toResource({{ ToLower $.Name }});
}
}
{{ end }}
{{ if .Writable }}
// Writable class
@GetMapping("/status/{id}")
public ResponseEntity getStatus(
@PathVariable String id,
@RequestHeader(HeaderConstants.ORG_ID) String orgId,
@RequestHeader(HeaderConstants.CLIENT) String client) {
log.debug("/status/{} for {} from {}", id, orgId, client);
return statusCache.handleStatusRequest(id, orgId, linker, {{.Name}}Resource.class);
}
@PostMapping
public ResponseEntity post{{.Name}}(
@RequestHeader(name = HeaderConstants.ORG_ID) String orgId,
@RequestHeader(name = HeaderConstants.CLIENT) String client,
@RequestBody {{.Name}}Resource body,
@RequestParam(name = "validate", required = false) boolean validate
) {
log.debug("post{{.Name}}, Validate: {}, OrgId: {}, Client: {}", validate, orgId, client);
log.trace("Body: {}", body);
linker.mapLinks(body);
Event event = new Event(orgId, Constants.COMPONENT, {{ GetAction .Package}}.UPDATE_{{ ToUpper .Name }}, client);
event.addObject(objectMapper.disable(SerializationFeature.FAIL_ON_EMPTY_BEANS).convertValue(body, Map.class));
event.setOperation(validate ? Operation.VALIDATE : Operation.CREATE);
consumerEventUtil.send(event);
statusCache.put(event.getCorrId(), event);
URI location = UriComponentsBuilder.fromUriString(linker.self()).path("status/{id}").buildAndExpand(event.getCorrId()).toUri();
return ResponseEntity.status(HttpStatus.ACCEPTED).location(location).build();
}
{{ range $i, $ident := .Identifiers }}
@PutMapping("/{{ ToLower $ident.Name }}/{id:.+}")
public ResponseEntity put{{ $.Name }}By{{ ToTitle $ident.Name }}(
@PathVariable String id,
@RequestHeader(name = HeaderConstants.ORG_ID) String orgId,
@RequestHeader(name = HeaderConstants.CLIENT) String client,
@RequestBody {{$.Name}}Resource body
) {
log.debug("put{{$.Name}}By{{ ToTitle $ident.Name}} {}, OrgId: {}, Client: {}", id, orgId, client);
log.trace("Body: {}", body);
linker.mapLinks(body);
Event event = new Event(orgId, Constants.COMPONENT, {{ GetAction $.Package}}.UPDATE_{{ ToUpper $.Name }}, client);
event.setQuery("{{ ToLower $ident.Name }}/" + id);
event.addObject(objectMapper.disable(SerializationFeature.FAIL_ON_EMPTY_BEANS).convertValue(body, Map.class));
event.setOperation(Operation.UPDATE);
fintAuditService.audit(event);
consumerEventUtil.send(event);
statusCache.put(event.getCorrId(), event);
URI location = UriComponentsBuilder.fromUriString(linker.self()).path("status/{id}").buildAndExpand(event.getCorrId()).toUri();
return ResponseEntity.status(HttpStatus.ACCEPTED).location(location).build();
}
{{ end }}
{{- end }}
//
// Exception handlers
//
@ExceptionHandler(EventResponseException.class)
public ResponseEntity handleEventResponseException(EventResponseException e) {
return ResponseEntity.status(e.getStatus()).body(e.getResponse());
}
@ExceptionHandler(UpdateEntityMismatchException.class)
public ResponseEntity handleUpdateEntityMismatch(Exception e) {
return ResponseEntity.badRequest().body(ErrorResponse.of(e));
}
@ExceptionHandler(EntityNotFoundException.class)
public ResponseEntity handleEntityNotFound(Exception e) {
return ResponseEntity.status(HttpStatus.NOT_FOUND).body(ErrorResponse.of(e));
}
@ExceptionHandler(CreateEntityMismatchException.class)
public ResponseEntity handleCreateEntityMismatch(Exception e) {
return ResponseEntity.badRequest().body(ErrorResponse.of(e));
}
@ExceptionHandler(EntityFoundException.class)
public ResponseEntity handleEntityFound(Exception e) {
return ResponseEntity.status(HttpStatus.FOUND).body(ErrorResponse.of(e));
}
@ExceptionHandler(CacheDisabledException.class)
public ResponseEntity handleBadRequest(Exception e) {
return ResponseEntity.status(HttpStatus.SERVICE_UNAVAILABLE).body(ErrorResponse.of(e));
}
@ExceptionHandler(UnknownHostException.class)
public ResponseEntity handleUnkownHost(Exception e) {
return ResponseEntity.status(HttpStatus.SERVICE_UNAVAILABLE).body(ErrorResponse.of(e));
}
@ExceptionHandler(CacheNotFoundException.class)
public ResponseEntity handleCacheNotFound(Exception e) {
return ResponseEntity.status(HttpStatus.SERVICE_UNAVAILABLE).body(ErrorResponse.of(e));
}
}
`
|
package playfair
import "strings"
func Decrypt(cipher, key string) (string, error) {
table := GenerateTable(key)
var result []rune
for i := 0; i < len(cipher); i += 2 {
a, b := rune(cipher[i]), rune(cipher[i+1])
// shifts characters
pos1, err := table.Find(a)
if err != nil {
return "", err
}
pos2, err := table.Find(b)
if err != nil {
return "", err
}
var encryptedBigram []rune
var newPos1, newPos2 int
if pos1/5 == pos2/5 {
// same row
newPos1, newPos2 = table.ShiftHorizontal(pos1, pos2, -1)
} else if pos1%5 == pos2%5 {
// same col
newPos1, newPos2 = table.ShiftVertical(pos1, pos2, -1)
} else {
// different row & col
newPos1, newPos2 = table.ShiftCycle(pos1, pos2)
}
encryptedBigram = []rune{rune(table[newPos1]), rune(table[newPos2])}
result = append(result, encryptedBigram...)
}
decrypted := strings.ReplaceAll(string(result), "X", "")
return decrypted, nil
}
|
package SIMPLE
type Environment map[string]Operand
func CopyEnvironment(environment Environment) Environment {
result := make(Environment, len(environment))
for key, value := range environment {
result[key] = value
}
return result
}
|
package systems
import (
c "arkanoid/components"
"github.com/ByteArena/ecs"
)
// Views contains references to all views
type Views struct {
SpriteView *ecs.View
}
// InitViews initializes views
func InitViews(manager *ecs.Manager, components *c.Components) *Views {
return &Views{
SpriteView: manager.CreateView(ecs.BuildTag(components.Sprite, components.Transform)),
}
}
|
package main
import "fmt"
func removeDuplicates(nums []int) int {
n := len(nums)
if n < 2 {
return n
}
c, i := 0, 1
for i < n {
if nums[c] != nums[i] {
c++
nums[c] = nums[i]
}
i++
}
return c + 1
}
func main() {
nums := []int{0, 0, 1, 1, 1, 2, 2, 3, 3, 4}
// nums is passed in by reference. (i.e., without making a copy)
len := removeDuplicates(nums)
// any modification to nums in your function would be known by the caller.
// using the length returned by your function, it prints the first len elements.
for i := 0; i < len; i++ {
fmt.Println(nums[i])
}
}
|
package equinix
import (
"context"
"fmt"
"log"
"testing"
"github.com/equinix/ne-go"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
const (
networkDeviceMetroEnvVar = "TF_ACC_NETWORK_DEVICE_METRO"
networkDeviceSecondaryMetroEnvVar = "TF_ACC_NETWORK_DEVICE_SECONDARY_METRO"
networkDeviceLicenseFileEnvVar = "TF_ACC_NETWORK_DEVICE_LICENSE_FILE"
networkDeviceVersaController1EnvVar = "TF_ACC_NETWORK_DEVICE_VERSA_CONTROLLER1"
networkDeviceVersaController2EnvVar = "TF_ACC_NETWORK_DEVICE_VERSA_CONTROLLER2"
networkDeviceVersaLocalIDEnvVar = "TF_ACC_NETWORK_DEVICE_VERSA_LOCALID"
networkDeviceVersaRemoteIDEnvVar = "TF_ACC_NETWORK_DEVICE_VERSA_REMOTEID"
networkDeviceVersaSerialNumberEnvVar = "TF_ACC_NETWORK_DEVICE_VERSA_SERIAL"
networkDeviceCGENIXLicenseKeyEnvVar = "TF_ACC_NETWORK_DEVICE_CGENIX_LICENSE_KEY"
networkDeviceCGENIXLicenseSecretEnvVar = "TF_ACC_NETWORK_DEVICE_CGENIX_LICENSE_SECRET"
)
func init() {
resource.AddTestSweepers("NetworkDevice", &resource.Sweeper{
Name: "NetworkDevice",
F: testSweepNetworkDevice,
})
}
func testSweepNetworkDevice(region string) error {
config, err := sharedConfigForRegion(region)
if err != nil {
return err
}
if err := config.Load(context.Background()); err != nil {
log.Printf("[INFO][SWEEPER_LOG] error loading configuration: %s", err)
return err
}
devices, err := config.ne.GetDevices([]string{
ne.DeviceStateInitializing,
ne.DeviceStateProvisioned,
ne.DeviceStateProvisioning,
ne.DeviceStateWaitingSecondary,
ne.DeviceStateFailed})
if err != nil {
log.Printf("[INFO][SWEEPER_LOG] error fetching NetworkDevice list: %s", err)
return err
}
nonSweepableCount := 0
for _, device := range devices {
if !isSweepableTestResource(ne.StringValue(device.Name)) {
nonSweepableCount++
continue
}
if ne.StringValue(device.RedundancyType) != "PRIMARY" {
continue
}
if err := config.ne.DeleteDevice(ne.StringValue(device.UUID)); err != nil {
log.Printf("[INFO][SWEEPER_LOG] error deleting NetworkDevice resource %s (%s): %s", ne.StringValue(device.UUID), ne.StringValue(device.Name), err)
} else {
log.Printf("[INFO][SWEEPER_LOG] sent delete request for NetworkDevice resource %s (%s)", ne.StringValue(device.UUID), ne.StringValue(device.Name))
}
}
if nonSweepableCount > 0 {
log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonSweepableCount)
}
return nil
}
func TestAccNetworkDevice_CSR100V_HA_Managed_Sub(t *testing.T) {
t.Parallel()
metro, _ := schema.EnvDefaultFunc(networkDeviceMetroEnvVar, "SV")()
context := map[string]interface{}{
"device-resourceName": "test",
"device-self_managed": false,
"device-byol": false,
"device-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-throughput": 500,
"device-throughput_unit": "Mbps",
"device-metro_code": metro.(string),
"device-type_code": "CSR1000V",
"device-package_code": "SEC",
"device-notifications": []string{"marry@equinix.com", "john@equinix.com"},
"device-hostname": fmt.Sprintf("tf-%s", randString(6)),
"device-term_length": 1,
"device-version": "16.09.05",
"device-core_count": 2,
"device-purchase_order_number": randString(10),
"device-order_reference": randString(10),
"device-interface_count": 24,
"device-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-secondary_hostname": fmt.Sprintf("tf-%s", randString(6)),
"device-secondary_notifications": []string{"secondary@equinix.com"},
"user-resourceName": "tst-user",
"user-username": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"user-password": randString(10),
}
contextWithACLs := copyMap(context)
contextWithACLs["acl-resourceName"] = "acl-pri"
contextWithACLs["acl-name"] = fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6))
contextWithACLs["acl-description"] = randString(50)
contextWithACLs["acl-metroCode"] = metro.(string)
contextWithACLs["acl-secondary_resourceName"] = "acl-sec"
contextWithACLs["acl-secondary_name"] = fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6))
contextWithACLs["acl-secondary_description"] = randString(50)
contextWithACLs["acl-secondary_metroCode"] = metro.(string)
deviceResourceName := fmt.Sprintf("equinix_network_device.%s", context["device-resourceName"].(string))
userResourceName := fmt.Sprintf("equinix_network_ssh_user.%s", context["user-resourceName"].(string))
priACLResourceName := fmt.Sprintf("equinix_network_acl_template.%s", contextWithACLs["acl-resourceName"].(string))
secACLResourceName := fmt.Sprintf("equinix_network_acl_template.%s", contextWithACLs["acl-secondary_resourceName"].(string))
var primary, secondary ne.Device
var user ne.SSHUser
var primaryACL, secondaryACL ne.ACLTemplate
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: newTestAccConfig(context).withDevice().withSSHUser().build(),
Check: resource.ComposeTestCheckFunc(
testAccNeDeviceExists(deviceResourceName, &primary),
testAccNeDeviceAttributes(&primary, context),
testAccNeDeviceStatusAttributes(&primary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateRegistered),
testAccNeDeviceSecondaryExists(&primary, &secondary),
testAccNeDeviceSecondaryAttributes(&secondary, context),
testAccNeDeviceStatusAttributes(&secondary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateRegistered),
testAccNeDeviceRedundancyAttributes(&primary, &secondary),
testAccNeDeviceHAAttributes(deviceResourceName),
testAccNeSSHUserExists(userResourceName, &user),
testAccNeSSHUserAttributes(&user, []*ne.Device{&primary, &secondary}, context),
resource.TestCheckResourceAttrSet(userResourceName, "uuid"),
),
},
{
Config: newTestAccConfig(contextWithACLs).withDevice().
withSSHUser().withACL().build(),
Check: resource.ComposeTestCheckFunc(
testAccNetworkACLTemplateExists(priACLResourceName, &primaryACL),
testAccNetworkACLTemplateExists(secACLResourceName, &secondaryACL),
testAccNeDeviceExists(deviceResourceName, &primary),
testAccNeDeviceSecondaryExists(&primary, &secondary),
testAccNeDeviceACLs(&primary, &secondary, &primaryACL, &secondaryACL),
),
},
},
})
}
func TestAccNetworkDevice_CSR100V_HA_Self_BYOL(t *testing.T) {
t.Parallel()
metro, _ := schema.EnvDefaultFunc(networkDeviceMetroEnvVar, "SV")()
context := map[string]interface{}{
"device-resourceName": "test",
"device-self_managed": true,
"device-byol": true,
"device-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-throughput": 500,
"device-throughput_unit": "Mbps",
"device-metro_code": metro.(string),
"device-type_code": "CSR1000V",
"device-package_code": "SEC",
"device-notifications": []string{"marry@equinix.com", "john@equinix.com"},
"device-hostname": fmt.Sprintf("tf-%s", randString(6)),
"device-term_length": 1,
"device-version": "16.09.05",
"device-core_count": 2,
"device-purchase_order_number": randString(10),
"device-order_reference": randString(10),
"device-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-secondary_hostname": fmt.Sprintf("tf-%s", randString(6)),
"device-secondary_notifications": []string{"secondary@equinix.com"},
"sshkey-resourceName": "test",
"sshkey-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"sshkey-public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCXdzXBHaVpKpdO0udnB+4JOgUq7APO2rPXfrevvlZrps98AtlwXXVWZ5duRH5NFNfU4G9HCSiAPsebgjY0fG85tcShpXfHfACLt0tBW8XhfLQP2T6S50FQ1brBdURMDCMsD7duOXqvc0dlbs2/KcswHvuUmqVzob3bz7n1bQ48wIHsPg4ARqYhy5LN3OkllJH/6GEfqi8lKZx01/P/gmJMORcJujuOyXRB+F2iXBVYdhjML3Qg4+tEekBcVZOxUbERRZ0pvQ52Y6wUhn2VsjljixyqeOdmD0m6DayDQgSWms6bKPpBqN7zhXXk4qe8bXT4tQQba65b2CQ2A91jw2KgM/YZNmjyUJ+Rf1cQosJf9twqbAZDZ6rAEmj9zzvQ5vD/CGuzxdVMkePLlUK4VGjPu7cVzhXrnq4318WqZ5/lNiCST8NQ0fssChN8ANUzr/p/wwv3faFMVNmjxXTZMsbMFT/fbb2MVVuqNFN65drntlg6/xEao8gZROuRYiakBx8= user@host",
}
contextWithACLs := copyMap(context)
contextWithACLs["acl-resourceName"] = "acl-pri"
contextWithACLs["acl-name"] = fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6))
contextWithACLs["acl-description"] = randString(50)
contextWithACLs["acl-metroCode"] = metro.(string)
contextWithACLs["acl-secondary_resourceName"] = "acl-sec"
contextWithACLs["acl-secondary_name"] = fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6))
contextWithACLs["acl-secondary_description"] = randString(50)
contextWithACLs["acl-secondary_metroCode"] = metro.(string)
deviceResourceName := fmt.Sprintf("equinix_network_device.%s", context["device-resourceName"].(string))
priACLResourceName := fmt.Sprintf("equinix_network_acl_template.%s", contextWithACLs["acl-resourceName"].(string))
secACLResourceName := fmt.Sprintf("equinix_network_acl_template.%s", contextWithACLs["acl-secondary_resourceName"].(string))
var primary, secondary ne.Device
var primaryACL, secondaryACL ne.ACLTemplate
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: newTestAccConfig(context).withDevice().withSSHKey().build(),
Check: resource.ComposeTestCheckFunc(
testAccNeDeviceExists(deviceResourceName, &primary),
testAccNeDeviceAttributes(&primary, context),
testAccNeDeviceStatusAttributes(&primary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateApplied),
testAccNeDeviceSecondaryExists(&primary, &secondary),
testAccNeDeviceSecondaryAttributes(&secondary, context),
testAccNeDeviceStatusAttributes(&secondary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateApplied),
testAccNeDeviceRedundancyAttributes(&primary, &secondary),
testAccNeDeviceHAAttributes(deviceResourceName),
),
},
{
Config: newTestAccConfig(contextWithACLs).withDevice().
withSSHKey().withACL().build(),
Check: resource.ComposeTestCheckFunc(
testAccNetworkACLTemplateExists(priACLResourceName, &primaryACL),
testAccNetworkACLTemplateExists(secACLResourceName, &secondaryACL),
testAccNeDeviceExists(deviceResourceName, &primary),
testAccNeDeviceSecondaryExists(&primary, &secondary),
testAccNeDeviceACLs(&primary, &secondary, &primaryACL, &secondaryACL),
),
},
},
})
}
func TestAccNetworkDevice_vSRX_HA_Managed_Sub(t *testing.T) {
t.Parallel()
metro, _ := schema.EnvDefaultFunc(networkDeviceMetroEnvVar, "SV")()
context := map[string]interface{}{
"device-resourceName": "test",
"device-self_managed": false,
"device-byol": false,
"device-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-metro_code": metro.(string),
"device-type_code": "VSRX",
"device-package_code": "STD",
"device-notifications": []string{"marry@equinix.com", "john@equinix.com"},
"device-hostname": fmt.Sprintf("tf-%s", randString(6)),
"device-term_length": 1,
"device-version": "19.2R2.7",
"device-core_count": 2,
"device-purchase_order_number": randString(10),
"device-order_reference": randString(10),
"device-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-secondary_hostname": fmt.Sprintf("tf-%s", randString(6)),
"device-secondary_notifications": []string{"secondary@equinix.com"},
}
contextWithChanges := copyMap(context)
contextWithChanges["user-resourceName"] = "test"
contextWithChanges["user-username"] = fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6))
contextWithChanges["user-password"] = randString(10)
deviceResourceName := fmt.Sprintf("equinix_network_device.%s", context["device-resourceName"].(string))
userResourceName := fmt.Sprintf("equinix_network_ssh_user.%s", contextWithChanges["user-resourceName"].(string))
var primary, secondary ne.Device
var user ne.SSHUser
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: newTestAccConfig(context).withDevice().build(),
Check: resource.ComposeTestCheckFunc(
testAccNeDeviceExists(deviceResourceName, &primary),
testAccNeDeviceAttributes(&primary, context),
testAccNeDeviceStatusAttributes(&primary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateRegistered),
testAccNeDeviceSecondaryExists(&primary, &secondary),
testAccNeDeviceSecondaryAttributes(&secondary, context),
testAccNeDeviceStatusAttributes(&secondary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateRegistered),
testAccNeDeviceRedundancyAttributes(&primary, &secondary),
testAccNeDeviceHAAttributes(deviceResourceName),
),
},
{
Config: newTestAccConfig(contextWithChanges).withDevice().withSSHUser().build(),
Check: resource.ComposeTestCheckFunc(
testAccNeDeviceExists(deviceResourceName, &primary),
testAccNeDeviceAttributes(&primary, contextWithChanges),
testAccNeDeviceStatusAttributes(&primary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateRegistered),
testAccNeDeviceSecondaryExists(&primary, &secondary),
testAccNeDeviceSecondaryAttributes(&secondary, contextWithChanges),
testAccNeDeviceStatusAttributes(&secondary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateRegistered),
testAccNeSSHUserExists(userResourceName, &user),
testAccNeSSHUserAttributes(&user, []*ne.Device{&primary, &secondary}, contextWithChanges),
),
},
},
})
}
func TestAccNetworkDevice_vSRX_HA_Managed_BYOL(t *testing.T) {
t.Parallel()
metro, _ := schema.EnvDefaultFunc(networkDeviceMetroEnvVar, "SV")()
licFile, _ := schema.EnvDefaultFunc(networkDeviceLicenseFileEnvVar, "jnpr.lic")()
context := map[string]interface{}{
"device-resourceName": "test",
"device-self_managed": false,
"device-byol": true,
"device-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-license_file": licFile.(string),
"device-metro_code": metro.(string),
"device-type_code": "VSRX",
"device-package_code": "STD",
"device-notifications": []string{"marry@equinix.com", "john@equinix.com"},
"device-hostname": fmt.Sprintf("tf-%s", randString(6)),
"device-term_length": 1,
"device-version": "19.2R2.7",
"device-core_count": 2,
"device-purchase_order_number": randString(10),
"device-order_reference": randString(10),
"device-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-secondary_license_file": licFile.(string),
"device-secondary_hostname": fmt.Sprintf("tf-%s", randString(6)),
"device-secondary_notifications": []string{"secondary@equinix.com"},
"acl-resourceName": "acl-pri",
"acl-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"acl-description": randString(50),
"acl-metroCode": metro.(string),
"acl-secondary_resourceName": "acl-sec",
"acl-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"acl-secondary_description": randString(50),
"acl-secondary_metroCode": metro.(string),
}
contextWithChanges := copyMap(context)
contextWithChanges["device-name"] = fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6))
contextWithChanges["device-additional_bandwidth"] = 100
contextWithChanges["device-notifications"] = []string{"jerry@equinix.com", "tom@equinix.com"}
contextWithChanges["device-secondary_name"] = fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6))
contextWithChanges["device-secondary_additional_bandwidth"] = 100
contextWithChanges["device-secondary_notifications"] = []string{"miki@equinix.com", "mini@equinix.com"}
contextWithChanges["user-resourceName"] = "test"
contextWithChanges["user-username"] = fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6))
contextWithChanges["user-password"] = randString(10)
deviceResourceName := fmt.Sprintf("equinix_network_device.%s", context["device-resourceName"].(string))
userResourceName := fmt.Sprintf("equinix_network_ssh_user.%s", contextWithChanges["user-resourceName"].(string))
var primary, secondary ne.Device
var user ne.SSHUser
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: newTestAccConfig(context).withDevice().withACL().build(),
Check: resource.ComposeTestCheckFunc(
testAccNeDeviceExists(deviceResourceName, &primary),
testAccNeDeviceAttributes(&primary, context),
testAccNeDeviceStatusAttributes(&primary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateRegistered),
testAccNeDeviceSecondaryExists(&primary, &secondary),
testAccNeDeviceSecondaryAttributes(&secondary, context),
testAccNeDeviceStatusAttributes(&secondary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateRegistered),
testAccNeDeviceRedundancyAttributes(&primary, &secondary),
testAccNeDeviceHAAttributes(deviceResourceName),
resource.TestCheckResourceAttrSet(deviceResourceName, "license_file_id"),
resource.TestCheckResourceAttrSet(deviceResourceName, "secondary_device.0.license_file_id"),
),
},
{
Config: newTestAccConfig(contextWithChanges).withDevice().withACL().withSSHUser().build(),
Check: resource.ComposeTestCheckFunc(
testAccNeDeviceExists(deviceResourceName, &primary),
testAccNeDeviceAttributes(&primary, contextWithChanges),
testAccNeDeviceStatusAttributes(&primary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateRegistered),
testAccNeDeviceSecondaryExists(&primary, &secondary),
testAccNeDeviceSecondaryAttributes(&secondary, contextWithChanges),
testAccNeDeviceStatusAttributes(&secondary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateRegistered),
testAccNeSSHUserExists(userResourceName, &user),
testAccNeSSHUserAttributes(&user, []*ne.Device{&primary, &secondary}, contextWithChanges),
),
},
},
})
}
func TestAccNetworkDevice_vSRX_HA_Self_BYOL(t *testing.T) {
t.Parallel()
metro, _ := schema.EnvDefaultFunc(networkDeviceMetroEnvVar, "SV")()
context := map[string]interface{}{
"device-resourceName": "test",
"device-self_managed": true,
"device-byol": true,
"device-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-metro_code": metro.(string),
"device-type_code": "VSRX",
"device-package_code": "STD",
"device-notifications": []string{"marry@equinix.com", "john@equinix.com"},
"device-hostname": fmt.Sprintf("tf-%s", randString(6)),
"device-term_length": 1,
"device-version": "19.2R2.7",
"device-core_count": 2,
"device-purchase_order_number": randString(10),
"device-order_reference": randString(10),
"device-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-secondary_hostname": fmt.Sprintf("tf-%s", randString(6)),
"device-secondary_notifications": []string{"secondary@equinix.com"},
"acl-resourceName": "acl-pri",
"acl-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"acl-description": randString(50),
"acl-metroCode": metro.(string),
"acl-secondary_resourceName": "acl-sec",
"acl-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"acl-secondary_description": randString(50),
"acl-secondary_metroCode": metro.(string),
"sshkey-resourceName": "test",
"sshkey-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"sshkey-public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCXdzXBHaVpKpdO0udnB+4JOgUq7APO2rPXfrevvlZrps98AtlwXXVWZ5duRH5NFNfU4G9HCSiAPsebgjY0fG85tcShpXfHfACLt0tBW8XhfLQP2T6S50FQ1brBdURMDCMsD7duOXqvc0dlbs2/KcswHvuUmqVzob3bz7n1bQ48wIHsPg4ARqYhy5LN3OkllJH/6GEfqi8lKZx01/P/gmJMORcJujuOyXRB+F2iXBVYdhjML3Qg4+tEekBcVZOxUbERRZ0pvQ52Y6wUhn2VsjljixyqeOdmD0m6DayDQgSWms6bKPpBqN7zhXXk4qe8bXT4tQQba65b2CQ2A91jw2KgM/YZNmjyUJ+Rf1cQosJf9twqbAZDZ6rAEmj9zzvQ5vD/CGuzxdVMkePLlUK4VGjPu7cVzhXrnq4318WqZ5/lNiCST8NQ0fssChN8ANUzr/p/wwv3faFMVNmjxXTZMsbMFT/fbb2MVVuqNFN65drntlg6/xEao8gZROuRYiakBx8= user@host",
}
deviceResourceName := fmt.Sprintf("equinix_network_device.%s", context["device-resourceName"].(string))
var primary, secondary ne.Device
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: newTestAccConfig(context).withDevice().withACL().withSSHKey().build(),
Check: resource.ComposeTestCheckFunc(
testAccNeDeviceExists(deviceResourceName, &primary),
testAccNeDeviceAttributes(&primary, context),
testAccNeDeviceStatusAttributes(&primary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateApplied),
testAccNeDeviceSecondaryExists(&primary, &secondary),
testAccNeDeviceSecondaryAttributes(&secondary, context),
testAccNeDeviceStatusAttributes(&secondary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateApplied),
testAccNeDeviceRedundancyAttributes(&primary, &secondary),
testAccNeDeviceHAAttributes(deviceResourceName),
),
},
},
})
}
func TestAccNetworkDevice_PaloAlto_HA_Managed_Sub(t *testing.T) {
t.Parallel()
metro, _ := schema.EnvDefaultFunc(networkDeviceMetroEnvVar, "SV")()
context := map[string]interface{}{
"device-resourceName": "test",
"device-self_managed": false,
"device-byol": false,
"device-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-metro_code": metro.(string),
"device-type_code": "PA-VM",
"device-package_code": "VM100",
"device-notifications": []string{"marry@equinix.com", "john@equinix.com"},
"device-hostname": fmt.Sprintf("tf-%s", randString(6)),
"device-term_length": 1,
"device-version": "9.0.4",
"device-core_count": 2,
"device-purchase_order_number": randString(10),
"device-order_reference": randString(10),
"device-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-secondary_hostname": fmt.Sprintf("tf-%s", randString(6)),
"device-secondary_notifications": []string{"secondary@equinix.com"},
"acl-resourceName": "acl-pri",
"acl-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"acl-description": randString(50),
"acl-metroCode": metro.(string),
"acl-secondary_resourceName": "acl-sec",
"acl-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"acl-secondary_description": randString(50),
"acl-secondary_metroCode": metro.(string),
}
contextWithChanges := copyMap(context)
contextWithChanges["device-additional_bandwidth"] = 50
contextWithChanges["device-secondary_additional_bandwidth"] = 50
contextWithChanges["user-resourceName"] = "tst-user"
contextWithChanges["user-username"] = fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6))
contextWithChanges["user-password"] = randString(10)
var primary, secondary ne.Device
var primaryACL, secondaryACL ne.ACLTemplate
var user ne.SSHUser
deviceResourceName := fmt.Sprintf("equinix_network_device.%s", context["device-resourceName"].(string))
priACLResourceName := fmt.Sprintf("equinix_network_acl_template.%s", context["acl-resourceName"].(string))
secACLResourceName := fmt.Sprintf("equinix_network_acl_template.%s", context["acl-secondary_resourceName"].(string))
userResourceName := fmt.Sprintf("equinix_network_ssh_user.%s", contextWithChanges["user-resourceName"].(string))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: newTestAccConfig(context).withDevice().withACL().build(),
Check: resource.ComposeTestCheckFunc(
testAccNeDeviceExists(deviceResourceName, &primary),
testAccNeDeviceAttributes(&primary, context),
testAccNeDeviceStatusAttributes(&primary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateRegistered),
testAccNeDeviceSecondaryExists(&primary, &secondary),
testAccNeDeviceSecondaryAttributes(&secondary, context),
testAccNeDeviceStatusAttributes(&secondary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateRegistered),
testAccNeDeviceRedundancyAttributes(&primary, &secondary),
testAccNeDeviceHAAttributes(deviceResourceName),
testAccNetworkACLTemplateExists(priACLResourceName, &primaryACL),
testAccNetworkACLTemplateExists(secACLResourceName, &secondaryACL),
testAccNeDeviceACLs(&primary, &secondary, &primaryACL, &secondaryACL),
),
},
{
Config: newTestAccConfig(contextWithChanges).withDevice().withACL().withSSHUser().build(),
Check: resource.ComposeTestCheckFunc(
testAccNeDeviceExists(deviceResourceName, &primary),
testAccNeDeviceAttributes(&primary, contextWithChanges),
testAccNeDeviceStatusAttributes(&primary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateRegistered),
testAccNeDeviceSecondaryExists(&primary, &secondary),
testAccNeDeviceSecondaryAttributes(&secondary, contextWithChanges),
testAccNeDeviceStatusAttributes(&secondary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateRegistered),
testAccNeSSHUserExists(userResourceName, &user),
testAccNeSSHUserAttributes(&user, []*ne.Device{&primary, &secondary}, contextWithChanges),
),
},
},
})
}
func TestAccNetworkDevice_PaloAlto_HA_Self_BYOL(t *testing.T) {
t.Parallel()
metro, _ := schema.EnvDefaultFunc(networkDeviceMetroEnvVar, "SV")()
context := map[string]interface{}{
"device-resourceName": "test",
"device-self_managed": true,
"device-byol": true,
"device-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-metro_code": metro.(string),
"device-type_code": "PA-VM",
"device-package_code": "VM100",
"device-notifications": []string{"marry@equinix.com", "john@equinix.com"},
"device-hostname": fmt.Sprintf("tf-%s", randString(6)),
"device-term_length": 1,
"device-version": "9.0.4",
"device-core_count": 2,
"device-purchase_order_number": randString(10),
"device-order_reference": randString(10),
"device-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-secondary_hostname": fmt.Sprintf("tf-%s", randString(6)),
"device-secondary_notifications": []string{"secondary@equinix.com"},
"sshkey-resourceName": "test",
"sshkey-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"sshkey-public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCXdzXBHaVpKpdO0udnB+4JOgUq7APO2rPXfrevvlZrps98AtlwXXVWZ5duRH5NFNfU4G9HCSiAPsebgjY0fG85tcShpXfHfACLt0tBW8XhfLQP2T6S50FQ1brBdURMDCMsD7duOXqvc0dlbs2/KcswHvuUmqVzob3bz7n1bQ48wIHsPg4ARqYhy5LN3OkllJH/6GEfqi8lKZx01/P/gmJMORcJujuOyXRB+F2iXBVYdhjML3Qg4+tEekBcVZOxUbERRZ0pvQ52Y6wUhn2VsjljixyqeOdmD0m6DayDQgSWms6bKPpBqN7zhXXk4qe8bXT4tQQba65b2CQ2A91jw2KgM/YZNmjyUJ+Rf1cQosJf9twqbAZDZ6rAEmj9zzvQ5vD/CGuzxdVMkePLlUK4VGjPu7cVzhXrnq4318WqZ5/lNiCST8NQ0fssChN8ANUzr/p/wwv3faFMVNmjxXTZMsbMFT/fbb2MVVuqNFN65drntlg6/xEao8gZROuRYiakBx8= user@host",
}
contextWithACLs := copyMap(context)
contextWithACLs["acl-resourceName"] = "acl-pri"
contextWithACLs["acl-name"] = fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6))
contextWithACLs["acl-description"] = randString(50)
contextWithACLs["acl-metroCode"] = metro.(string)
contextWithACLs["acl-secondary_resourceName"] = "acl-sec"
contextWithACLs["acl-secondary_name"] = fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6))
contextWithACLs["acl-secondary_description"] = randString(50)
contextWithACLs["acl-secondary_metroCode"] = metro.(string)
deviceResourceName := fmt.Sprintf("equinix_network_device.%s", context["device-resourceName"].(string))
priACLResourceName := fmt.Sprintf("equinix_network_acl_template.%s", contextWithACLs["acl-resourceName"].(string))
secACLResourceName := fmt.Sprintf("equinix_network_acl_template.%s", contextWithACLs["acl-secondary_resourceName"].(string))
var primary, secondary ne.Device
var primaryACL, secondaryACL ne.ACLTemplate
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: newTestAccConfig(context).withDevice().withSSHKey().build(),
Check: resource.ComposeTestCheckFunc(
testAccNeDeviceExists(deviceResourceName, &primary),
testAccNeDeviceAttributes(&primary, context),
testAccNeDeviceStatusAttributes(&primary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateApplied),
testAccNeDeviceSecondaryExists(&primary, &secondary),
testAccNeDeviceSecondaryAttributes(&secondary, context),
testAccNeDeviceStatusAttributes(&secondary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateApplied),
testAccNeDeviceRedundancyAttributes(&primary, &secondary),
testAccNeDeviceHAAttributes(deviceResourceName),
),
},
{
Config: newTestAccConfig(contextWithACLs).withDevice().withSSHKey().withACL().build(),
Check: resource.ComposeTestCheckFunc(
testAccNetworkACLTemplateExists(priACLResourceName, &primaryACL),
testAccNetworkACLTemplateExists(secACLResourceName, &secondaryACL),
testAccNeDeviceExists(deviceResourceName, &primary),
testAccNeDeviceSecondaryExists(&primary, &secondary),
testAccNeDeviceACLs(&primary, &secondary, &primaryACL, &secondaryACL),
),
},
},
})
}
func TestAccNetworkDevice_CSRSDWAN_HA_Self_BYOL(t *testing.T) {
t.Parallel()
metro, _ := schema.EnvDefaultFunc(networkDeviceMetroEnvVar, "SV")()
licFile, _ := schema.EnvDefaultFunc(networkDeviceLicenseFileEnvVar, "CSRSDWAN.cfg")()
context := map[string]interface{}{
"device-resourceName": "test",
"device-self_managed": true,
"device-byol": true,
"device-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-metro_code": metro.(string),
"device-license_file": licFile.(string),
"device-throughput": 500,
"device-throughput_unit": "Mbps",
"device-type_code": "CSRSDWAN",
"device-package_code": "ESSENTIALS",
"device-notifications": []string{"marry@equinix.com", "john@equinix.com"},
"device-term_length": 1,
"device-version": "16.12.3",
"device-core_count": 2,
"device-purchase_order_number": randString(10),
"device-order_reference": randString(10),
"device-vendorConfig_enabled": true,
"device-vendorConfig_siteId": "10",
"device-vendorConfig_systemIpAddress": "1.1.1.1",
"device-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-secondary_license_file": licFile.(string),
"device-secondary_notifications": []string{"secondary@equinix.com"},
"device-secondary_vendorConfig_enabled": true,
"device-secondary_vendorConfig_siteId": "20",
"device-secondary_vendorConfig_systemIpAddress": "2.2.2.2",
"acl-resourceName": "acl-pri",
"acl-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"acl-description": randString(50),
"acl-metroCode": metro.(string),
"acl-secondary_resourceName": "acl-sec",
"acl-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"acl-secondary_description": randString(50),
"acl-secondary_metroCode": metro.(string),
}
deviceResourceName := fmt.Sprintf("equinix_network_device.%s", context["device-resourceName"].(string))
priACLResourceName := fmt.Sprintf("equinix_network_acl_template.%s", context["acl-resourceName"].(string))
secACLResourceName := fmt.Sprintf("equinix_network_acl_template.%s", context["acl-secondary_resourceName"].(string))
var primary, secondary ne.Device
var primaryACL, secondaryACL ne.ACLTemplate
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: newTestAccConfig(context).withDevice().withACL().build(),
Check: resource.ComposeTestCheckFunc(
testAccNeDeviceExists(deviceResourceName, &primary),
testAccNeDeviceAttributes(&primary, context),
testAccNeDeviceStatusAttributes(&primary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateApplied),
testAccNeDeviceSecondaryExists(&primary, &secondary),
testAccNeDeviceSecondaryAttributes(&secondary, context),
testAccNeDeviceStatusAttributes(&secondary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateApplied),
testAccNeDeviceRedundancyAttributes(&primary, &secondary),
testAccNetworkACLTemplateExists(priACLResourceName, &primaryACL),
testAccNetworkACLTemplateExists(secACLResourceName, &secondaryACL),
testAccNeDeviceACLs(&primary, &secondary, &primaryACL, &secondaryACL),
testAccNeDeviceHAAttributes(deviceResourceName),
resource.TestCheckResourceAttrSet(deviceResourceName, "license_file_id"),
resource.TestCheckResourceAttrSet(deviceResourceName, "secondary_device.0.license_file_id"),
),
},
},
})
}
func TestAccNetworkDevice_Versa_HA_Self_BYOL(t *testing.T) {
t.Parallel()
metro, _ := schema.EnvDefaultFunc(networkDeviceMetroEnvVar, "SV")()
controller1, _ := schema.EnvDefaultFunc(networkDeviceVersaController1EnvVar, "1.1.1.1")()
controller2, _ := schema.EnvDefaultFunc(networkDeviceVersaController2EnvVar, "2.2.2.2")()
localID, _ := schema.EnvDefaultFunc(networkDeviceVersaLocalIDEnvVar, "test@versa.com")()
remoteID, _ := schema.EnvDefaultFunc(networkDeviceVersaRemoteIDEnvVar, "test@versa.com")()
serialNumber, _ := schema.EnvDefaultFunc(networkDeviceVersaSerialNumberEnvVar, "Test")()
context := map[string]interface{}{
"device-resourceName": "test",
"device-self_managed": true,
"device-byol": true,
"device-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-metro_code": metro.(string),
"device-type_code": "VERSA_SDWAN",
"device-package_code": "FLEX_VNF_2",
"device-notifications": []string{"marry@equinix.com", "john@equinix.com"},
"device-term_length": 1,
"device-version": "16.1R2S8",
"device-core_count": 2,
"device-purchase_order_number": randString(10),
"device-order_reference": randString(10),
"device-vendorConfig_enabled": true,
"device-vendorConfig_controller1": controller1.(string),
"device-vendorConfig_controller2": controller2.(string),
"device-vendorConfig_localId": localID.(string),
"device-vendorConfig_remoteId": remoteID.(string),
"device-vendorConfig_serialNumber": serialNumber.(string),
"device-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-secondary_notifications": []string{"secondary@equinix.com"},
"device-secondary_vendorConfig_enabled": true,
"device-secondary_vendorConfig_controller1": controller1.(string),
"device-secondary_vendorConfig_controller2": controller2.(string),
"device-secondary_vendorConfig_localId": localID.(string),
"device-secondary_vendorConfig_remoteId": remoteID.(string),
"device-secondary_vendorConfig_serialNumber": serialNumber.(string),
"acl-resourceName": "acl-pri",
"acl-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"acl-description": randString(50),
"acl-metroCode": metro.(string),
"acl-secondary_resourceName": "acl-sec",
"acl-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"acl-secondary_description": randString(50),
"acl-secondary_metroCode": metro.(string),
}
deviceResourceName := fmt.Sprintf("equinix_network_device.%s", context["device-resourceName"].(string))
priACLResourceName := fmt.Sprintf("equinix_network_acl_template.%s", context["acl-resourceName"].(string))
secACLResourceName := fmt.Sprintf("equinix_network_acl_template.%s", context["acl-secondary_resourceName"].(string))
var primary, secondary ne.Device
var primaryACL, secondaryACL ne.ACLTemplate
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: newTestAccConfig(context).withDevice().withACL().build(),
Check: resource.ComposeTestCheckFunc(
testAccNeDeviceExists(deviceResourceName, &primary),
testAccNeDeviceAttributes(&primary, context),
testAccNeDeviceStatusAttributes(&primary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateApplied),
testAccNeDeviceSecondaryExists(&primary, &secondary),
testAccNeDeviceSecondaryAttributes(&secondary, context),
testAccNeDeviceStatusAttributes(&secondary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateApplied),
testAccNeDeviceRedundancyAttributes(&primary, &secondary),
testAccNetworkACLTemplateExists(priACLResourceName, &primaryACL),
testAccNetworkACLTemplateExists(secACLResourceName, &secondaryACL),
testAccNeDeviceACLs(&primary, &secondary, &primaryACL, &secondaryACL),
testAccNeDeviceHAAttributes(deviceResourceName),
),
},
},
})
}
func TestAccNetworkDevice_CGENIX_HA_Self_BYOL(t *testing.T) {
t.Parallel()
metro, _ := schema.EnvDefaultFunc(networkDeviceMetroEnvVar, "SV")()
licenseKey, _ := schema.EnvDefaultFunc(networkDeviceCGENIXLicenseKeyEnvVar, randString(10))()
licenseSecret, _ := schema.EnvDefaultFunc(networkDeviceCGENIXLicenseSecretEnvVar, randString(10))()
context := map[string]interface{}{
"device-resourceName": "test",
"device-self_managed": true,
"device-byol": true,
"device-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-metro_code": metro.(string),
"device-type_code": "CGENIXSDWAN",
"device-package_code": "3102V",
"device-notifications": []string{"marry@equinix.com", "john@equinix.com"},
"device-term_length": 1,
"device-version": "5.2.1-b11",
"device-core_count": 2,
"device-purchase_order_number": randString(10),
"device-order_reference": randString(10),
"device-vendorConfig_enabled": true,
"device-vendorConfig_licenseKey": licenseKey.(string),
"device-vendorConfig_licenseSecret": licenseSecret.(string),
"device-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"device-secondary_notifications": []string{"secondary@equinix.com"},
"device-secondary_vendorConfig_enabled": true,
"device-secondary_vendorConfig_licenseKey": licenseKey.(string),
"device-secondary_vendorConfig_licenseSecret": licenseSecret.(string),
"acl-resourceName": "acl-pri",
"acl-name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"acl-description": randString(50),
"acl-metroCode": metro.(string),
"acl-secondary_resourceName": "acl-sec",
"acl-secondary_name": fmt.Sprintf("%s-%s", tstResourcePrefix, randString(6)),
"acl-secondary_description": randString(50),
"acl-secondary_metroCode": metro.(string),
}
deviceResourceName := fmt.Sprintf("equinix_network_device.%s", context["device-resourceName"].(string))
priACLResourceName := fmt.Sprintf("equinix_network_acl_template.%s", context["acl-resourceName"].(string))
secACLResourceName := fmt.Sprintf("equinix_network_acl_template.%s", context["acl-secondary_resourceName"].(string))
var primary, secondary ne.Device
var primaryACL, secondaryACL ne.ACLTemplate
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: newTestAccConfig(context).withDevice().withACL().build(),
Check: resource.ComposeTestCheckFunc(
testAccNeDeviceExists(deviceResourceName, &primary),
testAccNeDeviceAttributes(&primary, context),
testAccNeDeviceStatusAttributes(&primary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateApplied),
testAccNeDeviceSecondaryExists(&primary, &secondary),
testAccNeDeviceSecondaryAttributes(&secondary, context),
testAccNeDeviceStatusAttributes(&secondary, ne.DeviceStateProvisioned, ne.DeviceLicenseStateApplied),
testAccNeDeviceRedundancyAttributes(&primary, &secondary),
testAccNetworkACLTemplateExists(priACLResourceName, &primaryACL),
testAccNetworkACLTemplateExists(secACLResourceName, &secondaryACL),
testAccNeDeviceACLs(&primary, &secondary, &primaryACL, &secondaryACL),
testAccNeDeviceHAAttributes(deviceResourceName),
),
},
},
})
}
func testAccNeDeviceExists(resourceName string, device *ne.Device) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[resourceName]
if !ok {
return fmt.Errorf("resource not found: %s", resourceName)
}
if rs.Primary.ID == "" {
return fmt.Errorf("resource has no ID attribute set")
}
client := testAccProvider.Meta().(*Config).ne
resp, err := client.GetDevice(rs.Primary.ID)
if err != nil {
return fmt.Errorf("error when fetching network device '%s': %s", rs.Primary.ID, err)
}
*device = *resp
return nil
}
}
func testAccNeDeviceSecondaryExists(primary, secondary *ne.Device) resource.TestCheckFunc {
return func(s *terraform.State) error {
if ne.StringValue(primary.RedundantUUID) == "" {
return fmt.Errorf("secondary device UUID is not set")
}
client := testAccProvider.Meta().(*Config).ne
resp, err := client.GetDevice(ne.StringValue(primary.RedundantUUID))
if err != nil {
return fmt.Errorf("error when fetching network device '%s': %s", ne.StringValue(primary.RedundantUUID), err)
}
*secondary = *resp
return nil
}
}
func testAccNeDevicePairExists(resourceName string, primary, secondary *ne.Device) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[resourceName]
if !ok {
return fmt.Errorf("resource not found: %s", resourceName)
}
if rs.Primary.ID == "" {
return fmt.Errorf("resource has no ID attribute set")
}
client := testAccProvider.Meta().(*Config).ne
resp, err := client.GetDevice(rs.Primary.ID)
if err != nil {
return fmt.Errorf("error when fetching primary network device '%s': %s", rs.Primary.ID, err)
}
*primary = *resp
resp, err = client.GetDevice(ne.StringValue(resp.RedundantUUID))
if err != nil {
return fmt.Errorf("error when fetching secondary network device '%s': %s", rs.Primary.ID, err)
}
*secondary = *resp
return nil
}
}
func testAccNeDeviceAttributes(device *ne.Device, ctx map[string]interface{}) resource.TestCheckFunc {
return func(s *terraform.State) error {
if v, ok := ctx["device-name"]; ok && ne.StringValue(device.Name) != v.(string) {
return fmt.Errorf("name does not match %v - %v", ne.StringValue(device.Name), v)
}
if v, ok := ctx["device-self_managed"]; ok && ne.BoolValue(device.IsSelfManaged) != v.(bool) {
return fmt.Errorf("self_managed does not match %v - %v", ne.BoolValue(device.IsSelfManaged), v)
}
if v, ok := ctx["device-byol"]; ok && ne.BoolValue(device.IsBYOL) != v.(bool) {
return fmt.Errorf("byol does not match %v - %v", ne.BoolValue(device.IsBYOL), v)
}
if v, ok := ctx["device-throughput"]; ok && ne.IntValue(device.Throughput) != v.(int) {
return fmt.Errorf("throughput does not match %v - %v", ne.IntValue(device.Throughput), v)
}
if v, ok := ctx["device-throughput_unit"]; ok && ne.StringValue(device.ThroughputUnit) != v.(string) {
return fmt.Errorf("throughput_unit does not match %v - %v", ne.StringValue(device.ThroughputUnit), v)
}
if v, ok := ctx["device-metro_code"]; ok && ne.StringValue(device.MetroCode) != v.(string) {
return fmt.Errorf("metro_code does not match %v - %v", ne.StringValue(device.MetroCode), v)
}
if v, ok := ctx["device-type_code"]; ok && ne.StringValue(device.TypeCode) != v.(string) {
return fmt.Errorf("type_code does not match %v - %v", ne.StringValue(device.TypeCode), v)
}
if v, ok := ctx["device-package_code"]; ok && ne.StringValue(device.PackageCode) != v.(string) {
return fmt.Errorf("device-package_code does not match %v - %v", ne.StringValue(device.PackageCode), v)
}
if v, ok := ctx["device-notifications"]; ok && !slicesMatch(device.Notifications, v.([]string)) {
return fmt.Errorf("device-notifications does not match %v - %v", device.Notifications, v)
}
if v, ok := ctx["device-hostname"]; ok && ne.StringValue(device.HostName) != v.(string) {
return fmt.Errorf("device-hostname does not match %v - %v", ne.StringValue(device.HostName), v)
}
if v, ok := ctx["device-term_length"]; ok && ne.IntValue(device.TermLength) != v.(int) {
return fmt.Errorf("device-term_length does not match %v - %v", ne.IntValue(device.TermLength), v)
}
if v, ok := ctx["device-version"]; ok && ne.StringValue(device.Version) != v.(string) {
return fmt.Errorf("device-version does not match %v - %v", ne.StringValue(device.Version), v)
}
if v, ok := ctx["device-core_count"]; ok && ne.IntValue(device.CoreCount) != v.(int) {
return fmt.Errorf("device-core_count does not match %v - %v", ne.IntValue(device.CoreCount), v)
}
if v, ok := ctx["device-purchase_order_number"]; ok && ne.StringValue(device.PurchaseOrderNumber) != v.(string) {
return fmt.Errorf("device-purchase_order_number does not match %v - %v", ne.StringValue(device.PurchaseOrderNumber), v)
}
if v, ok := ctx["device-order_reference"]; ok && ne.StringValue(device.OrderReference) != v.(string) {
return fmt.Errorf("device-order_reference does not match %v - %v", ne.StringValue(device.OrderReference), v)
}
if v, ok := ctx["device-interface_count"]; ok && ne.IntValue(device.InterfaceCount) != v.(int) {
return fmt.Errorf("device-interface_count does not match %v - %v", ne.IntValue(device.InterfaceCount), v)
}
if v, ok := ctx["device-vendorConfig_siteId"]; ok && device.VendorConfiguration["siteId"] != v.(string) {
return fmt.Errorf("device-vendorConfig_siteId does not match %v - %v", device.VendorConfiguration["siteId"], v)
}
if v, ok := ctx["device-vendorConfig_systemIpAddress"]; ok && device.VendorConfiguration["systemIpAddress"] != v.(string) {
return fmt.Errorf("device-vendorConfig_systemIpAddress does not match %v - %v", device.VendorConfiguration["systemIpAddress"], v)
}
if v, ok := ctx["device-vendorConfig_licenseKey"]; ok && device.VendorConfiguration["licenseKey"] != v.(string) {
return fmt.Errorf("device-vendorConfig_licenseKey does not match %v - %v", device.VendorConfiguration["licenseKey"], v)
}
if v, ok := ctx["device-vendorConfig_licenseSecret"]; ok && device.VendorConfiguration["licenseSecret"] != v.(string) {
return fmt.Errorf("device-vendorConfig_licenseSecret does not match %v - %v", device.VendorConfiguration["licenseSecret"], v)
}
if v, ok := ctx["device-vendorConfig_controller1"]; ok && device.VendorConfiguration["controller1"] != v.(string) {
return fmt.Errorf("device-vendorConfig_controller1 does not match %v - %v", device.VendorConfiguration["controller1"], v)
}
if v, ok := ctx["device-vendorConfig_controller2"]; ok && device.VendorConfiguration["controller2"] != v.(string) {
return fmt.Errorf("device-vendorConfig_controller2 does not match %v - %v", device.VendorConfiguration["controller2"], v)
}
if v, ok := ctx["device-vendorConfig_localId"]; ok && device.VendorConfiguration["localId"] != v.(string) {
return fmt.Errorf("device-vendorConfig_localId does not match %v - %v", device.VendorConfiguration["localId"], v)
}
if v, ok := ctx["device-vendorConfig_remoteId"]; ok && device.VendorConfiguration["remoteId"] != v.(string) {
return fmt.Errorf("device-vendorConfig_remoteId does not match %v - %v", device.VendorConfiguration["remoteId"], v)
}
if v, ok := ctx["device-vendorConfig_serialNumber"]; ok && device.VendorConfiguration["serialNumber"] != v.(string) {
return fmt.Errorf("device-vendorConfig_serialNumber does not match %v - %v", device.VendorConfiguration["serialNumber"], v)
}
return nil
}
}
func testAccNeDeviceSecondaryAttributes(device *ne.Device, ctx map[string]interface{}) resource.TestCheckFunc {
secCtx := make(map[string]interface{})
for key, value := range ctx {
secCtx[key] = value
}
if v, ok := ctx["device-secondary_name"]; ok {
secCtx["device-name"] = v
}
if v, ok := ctx["device-secondary_hostname"]; ok {
secCtx["device-hostname"] = v
}
if v, ok := ctx["device-secondary_notifications"]; ok {
secCtx["device-notifications"] = v
}
if v, ok := ctx["device-secondary_vendorConfig_siteId"]; ok {
secCtx["device-vendorConfig_siteId"] = v
}
if v, ok := ctx["device-secondary_vendorConfig_systemIpAddress"]; ok {
secCtx["device-vendorConfig_systemIpAddress"] = v
}
if v, ok := ctx["device-secondary_vendorConfig_licenseKey"]; ok {
secCtx["device-vendorConfig_licenseKey"] = v
}
if v, ok := ctx["device-secondary_vendorConfig_licenseSecret"]; ok {
secCtx["device-vendorConfig_licenseSecret"] = v
}
if v, ok := ctx["device-secondary_vendorConfig_controller1"]; ok {
secCtx["device-vendorConfig_controller1"] = v
}
if v, ok := ctx["device-secondary_vendorConfig_controller2"]; ok {
secCtx["device-vendorConfig_controller2"] = v
}
if v, ok := ctx["device-secondary_vendorConfig_localId"]; ok {
secCtx["device-vendorConfig_localId"] = v
}
if v, ok := ctx["device-secondary_vendorConfig_remoteId"]; ok {
secCtx["device-vendorConfig_remoteId"] = v
}
if v, ok := ctx["device-secondary_vendorConfig_serialNumber"]; ok {
secCtx["device-vendorConfig_serialNumber"] = v
}
return testAccNeDeviceAttributes(device, secCtx)
}
func testAccNeDeviceRedundancyAttributes(primary, secondary *ne.Device) resource.TestCheckFunc {
return func(s *terraform.State) error {
if ne.StringValue(primary.RedundancyType) != "PRIMARY" {
return fmt.Errorf("redundancy_type does not match %v - %v", ne.StringValue(primary.RedundancyType), "PRIMARY")
}
if ne.StringValue(primary.RedundantUUID) != ne.StringValue(secondary.UUID) {
return fmt.Errorf("redundant_id does not match %v - %v", ne.StringValue(primary.RedundantUUID), secondary.UUID)
}
if ne.StringValue(secondary.RedundancyType) != "SECONDARY" {
return fmt.Errorf("redundancy_type does not match %v - %v", ne.StringValue(secondary.RedundancyType), "SECONDARY")
}
if ne.StringValue(secondary.RedundantUUID) != ne.StringValue(primary.UUID) {
return fmt.Errorf("redundant_id does not match %v - %v", ne.StringValue(secondary.RedundantUUID), primary.UUID)
}
return nil
}
}
func testAccNeDeviceStatusAttributes(device *ne.Device, provStatus, licStatus string) resource.TestCheckFunc {
return func(s *terraform.State) error {
if ne.StringValue(device.Status) != provStatus {
return fmt.Errorf("status for device %q does not match %v - %v", ne.StringValue(device.UUID), ne.StringValue(device.Status), provStatus)
}
if ne.StringValue(device.LicenseStatus) != licStatus {
return fmt.Errorf("license_status for device %q does not match %v - %v", ne.StringValue(device.UUID), ne.StringValue(device.LicenseStatus), licStatus)
}
return nil
}
}
func testAccNeDeviceACLs(primary, secondary *ne.Device, primaryACL, secondaryACL *ne.ACLTemplate) resource.TestCheckFunc {
return func(s *terraform.State) error {
if ne.StringValue(primaryACL.DeviceUUID) != ne.StringValue(primary.UUID) {
return fmt.Errorf("Primary ACL %s device UUID does not match %v - %v", ne.StringValue(primaryACL.UUID), ne.StringValue(primaryACL.DeviceUUID), ne.StringValue(primary.UUID))
}
if ne.StringValue(secondaryACL.DeviceUUID) != ne.StringValue(secondary.UUID) {
return fmt.Errorf("Secondary ACL %s device UUID does not match %v - %v", ne.StringValue(secondaryACL.UUID), ne.StringValue(secondaryACL.DeviceUUID), ne.StringValue(secondary.UUID))
}
if ne.StringValue(primaryACL.DeviceACLStatus) != ne.ACLDeviceStatusProvisioned {
return fmt.Errorf("Primary ACL %s device_acl_status does not match %v - %v", ne.StringValue(primaryACL.UUID), ne.StringValue(primaryACL.DeviceACLStatus), ne.ACLDeviceStatusProvisioned)
}
if ne.StringValue(secondaryACL.DeviceACLStatus) != ne.ACLDeviceStatusProvisioned {
return fmt.Errorf("Secondary ACL %s device_acl_status does not match %v - %v", ne.StringValue(secondaryACL.UUID), ne.StringValue(secondaryACL.DeviceACLStatus), ne.ACLDeviceStatusProvisioned)
}
return nil
}
}
func testAccNeDeviceHAAttributes(deviceResourceName string) resource.TestCheckFunc {
return resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttrSet(deviceResourceName, "uuid"),
resource.TestCheckResourceAttrSet(deviceResourceName, "ibx"),
resource.TestCheckResourceAttrSet(deviceResourceName, "region"),
resource.TestCheckResourceAttrSet(deviceResourceName, "ssh_ip_address"),
resource.TestCheckResourceAttrSet(deviceResourceName, "ssh_ip_fqdn"),
resource.TestCheckResourceAttrSet(deviceResourceName, "secondary_device.0.uuid"),
resource.TestCheckResourceAttrSet(deviceResourceName, "secondary_device.0.ibx"),
resource.TestCheckResourceAttrSet(deviceResourceName, "secondary_device.0.region"),
resource.TestCheckResourceAttrSet(deviceResourceName, "secondary_device.0.ssh_ip_address"),
resource.TestCheckResourceAttrSet(deviceResourceName, "secondary_device.0.ssh_ip_fqdn"),
)
}
func (t *testAccConfig) withDevice() *testAccConfig {
t.config += testAccNetworkDevice(t.ctx)
return t
}
func (t *testAccConfig) withACL() *testAccConfig {
t.config += testAccNetworkDeviceACL(t.ctx)
return t
}
func (t *testAccConfig) withSSHKey() *testAccConfig {
t.config += testAccNetworkDeviceSSHKey(t.ctx)
return t
}
func testAccNetworkDevice(ctx map[string]interface{}) string {
var config string
config += nprintf(`
data "equinix_network_account" "test" {
metro_code = "%{device-metro_code}"
status = "Active"
}`, ctx)
if _, ok := ctx["device-secondary_metro_code"]; ok {
config += nprintf(`
data "equinix_network_account" "test-secondary" {
metro_code = "%{device-secondary_metro_code}"
status = "Active"
}`, ctx)
}
config += nprintf(`
resource "equinix_network_device" "%{device-resourceName}" {
self_managed = %{device-self_managed}
byol = %{device-byol}
name = "%{device-name}"
metro_code = "%{device-metro_code}"
type_code = "%{device-type_code}"
package_code = "%{device-package_code}"
notifications = %{device-notifications}
term_length = %{device-term_length}
account_number = data.equinix_network_account.test.number
version = "%{device-version}"
core_count = %{device-core_count}`, ctx)
if _, ok := ctx["device-purchase_order_number"]; ok {
config += nprintf(`
purchase_order_number = "%{device-purchase_order_number}"`, ctx)
}
if _, ok := ctx["device-purchase_order_number"]; ok {
config += nprintf(`
order_reference = "%{device-order_reference}"`, ctx)
}
if _, ok := ctx["device-additional_bandwidth"]; ok {
config += nprintf(`
additional_bandwidth = %{device-additional_bandwidth}`, ctx)
}
if _, ok := ctx["device-throughput"]; ok {
config += nprintf(`
throughput = %{device-throughput}
throughput_unit = "%{device-throughput_unit}"`, ctx)
}
if _, ok := ctx["device-hostname"]; ok {
config += nprintf(`
hostname = "%{device-hostname}"`, ctx)
}
if _, ok := ctx["device-interface_count"]; ok {
config += nprintf(`
interface_count = %{device-interface_count}`, ctx)
}
if _, ok := ctx["acl-resourceName"]; ok {
config += nprintf(`
acl_template_id = equinix_network_acl_template.%{acl-resourceName}.id`, ctx)
}
if _, ok := ctx["sshkey-resourceName"]; ok {
config += nprintf(`
ssh_key {
username = "test"
key_name = equinix_network_ssh_key.%{sshkey-resourceName}.name
}`, ctx)
}
if _, ok := ctx["device-license_file"]; ok {
config += nprintf(`
license_file = "%{device-license_file}"`, ctx)
}
if _, ok := ctx["device-vendorConfig_enabled"]; ok {
config += nprintf(`
vendor_configuration = {`, ctx)
if _, ok := ctx["device-vendorConfig_siteId"]; ok {
config += nprintf(`
siteId = "%{device-vendorConfig_siteId}"`, ctx)
}
if _, ok := ctx["device-vendorConfig_systemIpAddress"]; ok {
config += nprintf(`
systemIpAddress = "%{device-vendorConfig_systemIpAddress}"`, ctx)
}
if _, ok := ctx["device-vendorConfig_licenseKey"]; ok {
config += nprintf(`
licenseKey = "%{device-vendorConfig_licenseKey}"`, ctx)
}
if _, ok := ctx["device-vendorConfig_licenseSecret"]; ok {
config += nprintf(`
licenseSecret = "%{device-vendorConfig_licenseSecret}"`, ctx)
}
if _, ok := ctx["device-vendorConfig_controller1"]; ok {
config += nprintf(`
controller1 = "%{device-vendorConfig_controller1}"`, ctx)
}
if _, ok := ctx["device-vendorConfig_controller2"]; ok {
config += nprintf(`
controller2 = "%{device-vendorConfig_controller2}"`, ctx)
}
if _, ok := ctx["device-vendorConfig_localId"]; ok {
config += nprintf(`
localId = "%{device-vendorConfig_localId}"`, ctx)
}
if _, ok := ctx["device-vendorConfig_remoteId"]; ok {
config += nprintf(`
remoteId = "%{device-vendorConfig_remoteId}"`, ctx)
}
if _, ok := ctx["device-vendorConfig_serialNumber"]; ok {
config += nprintf(`
serialNumber = "%{device-vendorConfig_serialNumber}"`, ctx)
}
config += nprintf(`
}`, ctx)
}
if _, ok := ctx["device-secondary_name"]; ok {
config += nprintf(`
secondary_device {
name = "%{device-secondary_name}"`, ctx)
if _, ok := ctx["device-secondary_metro_code"]; ok {
config += nprintf(`
metro_code = "%{device-secondary_metro_code}"
account_number = data.equinix_network_account.test-secondary.number`, ctx)
} else {
config += nprintf(`
metro_code = "%{device-metro_code}"
account_number = data.equinix_network_account.test.number`, ctx)
}
config += nprintf(`
notifications = %{device-secondary_notifications}`, ctx)
if _, ok := ctx["device-secondary_additional_bandwidth"]; ok {
config += nprintf(`
additional_bandwidth = %{device-secondary_additional_bandwidth}`, ctx)
}
if _, ok := ctx["device-secondary_hostname"]; ok {
config += nprintf(`
hostname = "%{device-secondary_hostname}"`, ctx)
}
if _, ok := ctx["acl-secondary_resourceName"]; ok {
config += nprintf(`
acl_template_id = equinix_network_acl_template.%{acl-secondary_resourceName}.id`, ctx)
}
if _, ok := ctx["sshkey-resourceName"]; ok {
config += nprintf(`
ssh_key {
username = "test"
key_name = equinix_network_ssh_key.%{sshkey-resourceName}.name
}`, ctx)
}
if _, ok := ctx["device-secondary_license_file"]; ok {
config += nprintf(`
license_file = "%{device-secondary_license_file}"`, ctx)
}
if _, ok := ctx["device-secondary_vendorConfig_enabled"]; ok {
config += nprintf(`
vendor_configuration = {`, ctx)
if _, ok := ctx["device-secondary_vendorConfig_siteId"]; ok {
config += nprintf(`
siteId = "%{device-secondary_vendorConfig_siteId}"`, ctx)
}
if _, ok := ctx["device-secondary_vendorConfig_systemIpAddress"]; ok {
config += nprintf(`
systemIpAddress = "%{device-secondary_vendorConfig_systemIpAddress}"`, ctx)
}
if _, ok := ctx["device-secondary_vendorConfig_licenseKey"]; ok {
config += nprintf(`
licenseKey = "%{device-secondary_vendorConfig_licenseKey}"`, ctx)
}
if _, ok := ctx["device-secondary_vendorConfig_licenseSecret"]; ok {
config += nprintf(`
licenseSecret = "%{device-secondary_vendorConfig_licenseSecret}"`, ctx)
}
if _, ok := ctx["device-secondary_vendorConfig_controller1"]; ok {
config += nprintf(`
controller1 = "%{device-secondary_vendorConfig_controller1}"`, ctx)
}
if _, ok := ctx["device-secondary_vendorConfig_controller2"]; ok {
config += nprintf(`
controller2 = "%{device-secondary_vendorConfig_controller2}"`, ctx)
}
if _, ok := ctx["device-secondary_vendorConfig_localId"]; ok {
config += nprintf(`
localId = "%{device-secondary_vendorConfig_localId}"`, ctx)
}
if _, ok := ctx["device-secondary_vendorConfig_remoteId"]; ok {
config += nprintf(`
remoteId = "%{device-secondary_vendorConfig_remoteId}"`, ctx)
}
if _, ok := ctx["device-secondary_vendorConfig_serialNumber"]; ok {
config += nprintf(`
serialNumber = "%{device-secondary_vendorConfig_serialNumber}"`, ctx)
}
config += nprintf(`
}`, ctx)
}
config += `
}`
}
config += `
}`
return config
}
func testAccNetworkDeviceACL(ctx map[string]interface{}) string {
config := nprintf(`
resource "equinix_network_acl_template" "%{acl-resourceName}" {
name = "%{acl-name}"
description = "%{acl-description}"
metro_code = "%{acl-metroCode}"
inbound_rule {
subnets = ["10.0.0.0/24"]
protocol = "IP"
src_port = "any"
dst_port = "any"
}
}`, ctx)
if _, ok := ctx["acl-secondary_name"]; ok {
config += nprintf(`
resource "equinix_network_acl_template" "%{acl-secondary_resourceName}" {
name = "%{acl-secondary_name}"
description = "%{acl-secondary_description}"
metro_code = "%{acl-secondary_metroCode}"
inbound_rule {
subnets = ["192.0.0.0/24"]
protocol = "IP"
src_port = "any"
dst_port = "any"
}
}`, ctx)
}
return config
}
func testAccNetworkDeviceSSHKey(ctx map[string]interface{}) string {
return nprintf(`
resource "equinix_network_ssh_key" "%{sshkey-resourceName}" {
name = "%{sshkey-name}"
public_key = "%{sshkey-public_key}"
}
`, ctx)
}
|
package controllers
import (
"dispatch/utils"
"encoding/json"
"fmt"
"strings"
"github.com/astaxie/beego"
)
type DubboPostBody struct {
Host string `json:"host"`
Env string `json:"env"`
Weight int `json:"weight"`
Disable bool `json:"disable"`
}
type DubboController struct {
beego.Controller
}
//get 查看host对应的dubbo流量情况
func (c *DubboController) GetFlowFromHost() {
host := c.Input().Get("host")
env := c.Input().Get("env")
if host == "" {
c.Ctx.WriteString("{\"ok\": false,\"errorCode\": 1, \"errorMsg\":\"没有输入host参数\"}")
return
}
if env == "" {
c.Ctx.WriteString("{\"ok\": false,\"errorCode\": 1, \"errorMsg\":\"没有输入env参数\"}")
return
}
if _, ok := utils.ZkConn[env]; !ok {
c.Ctx.WriteString("{\"ok\": fasle,\"errorCode\": 1, \"errorMsg\": \"当前运行的环境不存在\"}")
return
}
srcdata := utils.WeightGet(utils.ZkConn[env], utils.ZkMap[env], strings.Split(host, ","))
//fmt.Println(srcdata)
resdata, err := json.Marshal(srcdata)
if err != nil {
c.Ctx.WriteString(fmt.Sprintf("{\"ok\": false,\"errorCode\": 1, \"errorMsg\":\"%v\"}", err))
return
}
c.Ctx.WriteString(fmt.Sprintf("{\"ok\": true,\"errorCode\": 0, \"errorMsg\":null, \"data\":%s}", string(resdata)))
}
//post 修改host对应的dubbo流量情况
func (c *DubboController) ModFlowFromHost() {
var postbody DubboPostBody
err := json.Unmarshal(c.Ctx.Input.RequestBody, &postbody)
if err != nil {
c.Ctx.WriteString(fmt.Sprintf("{\"ok\": false,\"errorCode\": 1, \"errorMsg\":\"%v\"}", err))
return
}
if postbody.Host == "" {
c.Ctx.WriteString("{\"ok\": false,\"errorCode\": 1, \"errorMsg\":\"没有输入host参数\"}")
return
}
if postbody.Env == "" {
c.Ctx.WriteString("{\"ok\": false,\"errorCode\": 1, \"errorMsg\":\"没有输入env参数\"}")
return
}
if postbody.Weight > 1000 {
postbody.Weight = 1000
}
if postbody.Weight <= 0 {
postbody.Weight = 0
postbody.Disable = true
}
//fmt.Println(postbody)
if _, ok := utils.ZkConn[postbody.Env]; !ok {
c.Ctx.WriteString("{\"ok\": false,\"errorCode\": 1, \"errorMsg\": \"当前运行的环境不存在\"}")
return
}
err = utils.WeightMod(postbody.Host, utils.ZkConn[postbody.Env], utils.ZkMap[postbody.Env], postbody.Weight, postbody.Disable)
if err != nil {
c.Ctx.WriteString(fmt.Sprintf("{\"ok\": false,\"errorCode\": 1, \"errorMsg\": \"创建zk节点失败,原因:%v\"}", err))
return
}
c.Ctx.WriteString("{\"ok\": true,\"errorCode\": 0, \"errorMsg\":null}")
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.