text stringlengths 11 4.05M |
|---|
package dexml
import (
"fmt"
"log"
"testing"
"errors"
apdm "spca/apd/models"
)
func TestDeXmlSetup(t *testing.T){
fmt.Println("Test 1: Data Encode Setting to Xml Setup")
xmlHdl := NewDeXmlHdl()
if xmlHdl == nil {
fmt.Println("Error in getting the Xml handle")
t.Fatal(errors.New("Get Xml Handle Failed"))
}
fmt.Println("Got Xml Handle")
}
func TestDeXmlOper(t *testing.T){
fmt.Println("Test 2: Xml operations Setup/Encode/Decode")
xmlHdl := NewDeXmlHdl()
if xmlHdl == nil {
fmt.Println("Error in getting the Xml handle")
t.Fatal(errors.New("Get Xml Handle Failed"))
}
fmt.Println("Xml Handle Get Success:")
fmt.Println("Xml Encode Test:")
td := Td1{9,"Test-CMD"}
t1 := Td1{D1:9, D2:"SGN"}
msg, err := td.EncodeTest(t1)
fmt.Printf("Xml Encoded Msg: %+v Error: %+v\n",msg, err)
fmt.Println("Xml Encode Test Success:")
fmt.Println("Xml Decode Test:")
/*
xmlHdl.EncodeCmdConf("Encode-Cmd-Conf")
xmlHdl.DecodeCmdConf([]byte(`{"Name":"Decode-Cmd-Conf"}`))
xmlHdl.EncodeCmdStatus("Encode-Cmd-Status")
xmlHdl.DecodeCmdStatus([]byte(`{"Name":"Decode-Cmd-Status"}`))
*/
cmd := apdm.CmdConf{
Service:"SERVICE-1",
Cmd:"CMDA",
CmdType:apdm.INDCMD,
CmdParam:"Param-1",
}
msg, _ = xmlHdl.EncodeCmdConf(cmd)
log.Println(string(msg))
cmd1, _ := xmlHdl.DecodeCmdConf(msg)
log.Println(cmd1.(apdm.CmdConf))
cmdst := apdm.CmdOpStatus{
CmdSubmitted: 3,
CmdProcessing: 2,
CmdRunning: 4,
CmdCompleted: 9,
CmdError:1,
}
msg,_ = xmlHdl.EncodeCmdStatus(cmdst)
log.Println(string(msg))
cmdst1, _ := xmlHdl.DecodeCmdStatus(msg)
log.Println(cmdst1.(apdm.CmdOpStatus))
fmt.Println("Xml Decode Test Success:")
}
|
package main
import "fmt"
func max(x, y int) int {
if x > y {
return x
}
return y
}
func rob(nums []int) int {
if len(nums) == 0 {
return 0
}
if len(nums) == 1 {
return nums[0]
}
dp := [2][]int{}
for i := 0; i < 2; i = i + 1 {
dp[i] = make([]int, len(nums)+1)
}
dp[1][1] = nums[0]
for i := 2; i <= len(nums); i = i + 1 {
val := nums[i-1]
dp[0][i] = max(dp[0][i-2]+val, dp[0][i-1])
dp[1][i] = max(dp[1][i-2]+val, dp[1][i-1])
}
return max(dp[0][len(nums)], dp[1][len(nums)-1])
}
func main() {
input := []int{3}
fmt.Println(rob(input))
}
|
package influx
import (
"testing"
"time"
"github.com/bingoohuang/gou/lang"
"github.com/stretchr/testify/assert"
)
type delay struct {
ModifiedTime time.Time `influx:"time" measurement:"pipeline_delay"`
ID uint64 `influx:"tag" name:"delay_id"`
Delay float64 `influx:"field"`
Something uint64 `influx:"-"`
IncrID uint64
MyNote string // default as influx field with snake name conversion.
}
func TestCreateLine(t *testing.T) {
assert.Equal(t, lang.M2(
`pipeline_delay,delay_id=1 delay=123456,incr_id=100,my_note="测试" 1587371416000000000`, nil),
lang.M2(ToLine(
delay{
ModifiedTime: lang.ParseTime("2006-01-02 15:04:05", "2020-04-20 16:30:16"),
Something: 332333,
ID: 1,
Delay: 123456,
IncrID: 100,
MyNote: "测试",
},
)))
}
|
package objectstorage
type CachedObjects []CachedObject
func (cachedObjects CachedObjects) Release(force ...bool) {
for _, cachedObject := range cachedObjects {
cachedObject.Release(force...)
}
}
|
package MySQL
import (
"AlgorithmPractice/src/common/Intergration/DB"
"AlgorithmPractice/src/common/conf"
"database/sql"
"fmt"
"sync"
)
/**
* @author liujun
* @version V1.0
* @date 2022/7/15 19:06
* @author-Email ljfirst@mail.ustc.edu.cn
* @description
*/
var (
DBMySqlCliInstance *DBMySqlCli
DBMySqlCliOnce sync.Once
)
type DBMySqlCli struct {
*sql.DB
}
func GetDBMysqlCli() *DBMySqlCli {
DBMySqlCliOnce.Do(NewDBMysqlCli)
return DBMySqlCliInstance
}
func NewDBMysqlCli() {
conf.GetConfig()
dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=%s",
conf.AlgorithmConf.Mysql.Username,
conf.AlgorithmConf.Mysql.Password,
conf.AlgorithmConf.Mysql.Host,
conf.AlgorithmConf.Mysql.Port,
conf.AlgorithmConf.Mysql.DBName,
conf.AlgorithmConf.Mysql.CharSet)
//Open打开一个driverName指定的数据库,dataSourceName指定数据源
//不会校验用户名和密码是否正确,只会对dsn的格式进行检测
//使用 sql.Open函数创建数据库类型,第一个是数据库驱动名,第二个是连接信息的字符串
db, err := sql.Open("mysql", dsn)
if err != nil { //dsn格式不正确的时候会报错
fmt.Printf("打开数据库失败,err:%v\n", err)
return
}
//尝试连接数据库,Ping方法可检查数据源名称是否合法,账号密码是否正确。
err = db.Ping()
if err != nil {
fmt.Printf("连接数据库失败,err:%v\n", err)
return
}
fmt.Println("连接数据库成功!")
DBMySqlCliInstance = &DBMySqlCli{
DB: db,
}
return
}
func (d *DBMySqlCli) ExecQuery(sqlStr string) ([]*DB.SQLTestDataEntity, error) {
var (
entityList []DB.SQLTestDataEntity
entityListPtr = make([]*DB.SQLTestDataEntity, 0)
)
row, err := d.Query(sqlStr)
if err != nil {
fmt.Println("sqlStr:", sqlStr, "-------", "err:", err)
return nil, err
}
for row.Next() {
var entity DB.SQLTestDataEntity
err = row.Scan(&entity.ID, &entity.Input, &entity.Output, &entity.ClassName,
&entity.InputDesc, &entity.OutputDesc, &entity.ClassDescribe)
if err != nil {
fmt.Printf("获取数据错误, err:%v\n", err)
return nil, err
}
entityList = append(entityList, entity)
}
//fmt.Printf("查询数据成功: %#v", entityList)
for _, entity := range entityList {
entity_ := entity
entityListPtr = append(entityListPtr, &entity_)
}
return entityListPtr, nil
}
func (d *DBMySqlCli) ExecQueryAllUTData() ([]*DB.SQLTestDataEntity, error) {
var (
entityList []DB.SQLTestDataEntity
entityListPtr = make([]*DB.SQLTestDataEntity, 0)
)
sqlStr := "SELECT * FROM algorithm_practice_db.ut_test_data order by id desc"
row, err := d.Query(sqlStr)
if err != nil {
fmt.Println("sqlStr:", sqlStr, "-------", "err:", err)
return nil, err
}
for row.Next() {
var entity DB.SQLTestDataEntity
err = row.Scan(&entity.ID, &entity.Input, &entity.Output, &entity.ClassName,
&entity.InputDesc, &entity.OutputDesc, &entity.ClassDescribe)
if err != nil {
fmt.Printf("获取数据错误, err:%v\n", err)
return nil, err
}
entityList = append(entityList, entity)
}
//fmt.Printf("查询数据成功: %#v", entityList)
for _, entity := range entityList {
entity_ := entity
entityListPtr = append(entityListPtr, &entity_)
}
return entityListPtr, nil
}
func (d *DBMySqlCli) ExecInsert(entity *DB.SQLTestDataEntity) {
//需要插入的sql语句,?表示占位参数
//sqlStr := "insert into algorithm_practice_db.ut_test_data (input,output,class_name) values (?,?,?)"
sqlPattern := "insert into %s.%s (input,output,class_name) values (?,?,?)"
sql := fmt.Sprintf(sqlPattern,
conf.AlgorithmConf.Mysql.DBName,
conf.AlgorithmConf.Mysql.TableName)
//把user结构体的name、age字段依次传给sqlStr的占位参数
ret, err := d.Exec(sql, entity.Input, entity.Output, entity.ClassName)
if err != nil { //执行sql语句报错
fmt.Println("插入失败,err", err)
return
}
newID, err := ret.LastInsertId() //新插入数据的ID,默认为主键
//rowsNumber, err:= ret.RowsAffected() //受影响的行数
if err != nil {
fmt.Println("获取id失败,err", err)
return
}
fmt.Println("插入成功,id为:", newID)
}
func (d *DBMySqlCli) ExecBatchInsert(entity []*DB.SQLTestDataEntity) {
//d.Exec()
}
func (d *DBMySqlCli) ExecUpdate(entity *DB.SQLTestDataEntity) {
}
func (d *DBMySqlCli) PrintQuerySingleData(sqlStr string) {
//sqlStr := "SELECT * from %s.%s WHERE where ? rlike class_name"
sqlPattern := "SELECT * from %s.%s where class_name rlike ?"
sql := fmt.Sprintf(sqlPattern,
conf.AlgorithmConf.Mysql.DBName,
conf.AlgorithmConf.Mysql.TableName)
fmt.Println(sql)
row := d.QueryRow(sql, "className")
var entity DB.SQLTestDataEntity
//然后使用Scan()方法给对应类型变量赋值,以便取出结果,注意传入的是指针
err := row.Scan(&entity.ID, &entity.Input, &entity.Output, &entity.ClassName,
&entity.InputDesc, &entity.OutputDesc, &entity.ClassDescribe)
if err != nil {
fmt.Printf("获取数据错误, err:%v\n", err)
return
}
fmt.Printf("查询数据成功: %#v", entity)
}
|
package main
import (
"bufio"
"fmt"
"io"
"os"
"strings"
)
type CommonUse struct {
Contents string
}
//实现stringer接口 相当于java的toString
func (c *CommonUse) String() string {
return fmt.Sprintf("Retriever:{Contents=%s}", c.Contents)
}
func printFile(filename string) {
file, err := os.Open(filename)
if err != nil {
panic(err)
}
printFileContents(file)
}
func printFileContents(reader io.Reader) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
fmt.Println(scanner.Text())
}
}
func main() {
c := CommonUse{"hello"}
fmt.Println(&c)
printFile("abs.txt")
s := `abc"d"
kkkkk
123`
printFileContents(strings.NewReader(s))
}
|
package handler
import (
"fmt"
"time"
"github.com/dustinengle/itshere/pkg/client"
"github.com/dustinengle/itshere/pkg/data"
"github.com/dustinengle/itshere/pkg/v1/reply"
"github.com/gin-gonic/gin"
)
func DeleteMailbox(c *gin.Context) {
mailbox := new(data.Mailbox)
if err := c.BindJSON(mailbox); err != nil {
reply.BadRequest(c, err)
return
}
// Get the gateway.
gateway := &data.Gateway{ID: mailbox.GatewayID}
if err := data.Single(gateway); err != nil {
reply.BadRequest(c, err)
return
}
// Delete pins.
pins := make([]*data.PIN, 0)
if err := data.Related(mailbox, &pins); err != nil {
reply.BadRequest(c, err)
return
}
for _, pin := range pins {
if err := data.Delete(&pin); err != nil {
reply.InternalServer(c, err)
return
}
}
// Send the new message to the gateway channel.
senml := []map[string]interface{}{
map[string]interface{}{
"bn": fmt.Sprintf("%s_", gateway.DeviceID),
"n": "DELETE",
"u": "PublicKey",
"vs": mailbox.PublicKey,
},
}
if err := client.ChannelMessageCreate(gateway.DeviceKey, gateway.ChannelID, senml); err != nil {
reply.BadGateway(c, err)
return
}
// V2: add cleanup on StreamIoT.
if err := mailbox.Delete(); err != nil {
reply.BadRequest(c, err)
return
}
reply.OK(c, "OK")
}
func DeletePIN(c *gin.Context) {
req := new(IDs)
if err := c.BindJSON(req); err != nil {
reply.BadRequest(c, err)
return
}
// Load the pin from the database.
pin := &data.PIN{ID: req.ID}
if err := data.Single(pin); err != nil {
reply.BadRequest(c, err)
return
}
// Load the mailbox for the pin.
mailbox := &data.Mailbox{ID: pin.MailboxID}
if err := data.Related(pin, mailbox); err != nil {
reply.InternalServer(c, err)
return
}
// Remove the pin from the system.
if err := mailbox.DeletePIN(pin); err != nil {
reply.BadGateway(c, err)
return
}
reply.OK(c, "OK")
}
func PostMailbox(c *gin.Context) {
// Validate request while binding.
req := new(Mailbox)
if err := c.BindJSON(req); err != nil {
reply.BadRequest(c, err)
return
}
// Get the gateway.
gateway := &data.Gateway{ID: req.GatewayID}
if err := data.Single(gateway); err != nil {
reply.BadRequest(c, err)
return
}
// Create the mailbox.
mailbox := &data.Mailbox{
Name: req.Name,
PublicKey: req.PublicKey,
}
if err := gateway.CreateMailbox(mailbox); err != nil {
reply.BadRequest(c, err)
return
}
// Send the new message to the gateway channel.
senml := []map[string]interface{}{
map[string]interface{}{
"bn": fmt.Sprintf("%s_", gateway.DeviceID),
"n": "ADD",
"u": "PublicKey",
"vs": mailbox.PublicKey,
},
}
if err := client.ChannelMessageCreate(gateway.DeviceKey, gateway.ChannelID, senml); err != nil {
reply.BadGateway(c, err)
return
}
reply.OK(c, mailbox)
}
func PostMessage(c *gin.Context) {
req := new(Message)
if err := c.BindJSON(req); err != nil {
reply.BadRequest(c, err)
return
}
fmt.Println("req:", req)
// Get the mailbox from the database.
mailbox := &data.Mailbox{ID: req.MailboxID}
if err := data.Single(mailbox); err != nil {
reply.BadRequest(c, err)
return
}
// Get the gateway for the mailbox.
gateway := &data.Gateway{ID: req.GatewayID}
if err := data.Related(mailbox, gateway); err != nil {
reply.InternalServer(c, err)
return
}
// Send the message to StreamIoT.
if err := client.ChannelMessageCreate(gateway.DeviceKey, mailbox.ChannelID, &req.SenML); err != nil {
reply.BadGateway(c, err)
return
}
reply.OK(c, "OK")
}
func PostPIN(c *gin.Context) {
// Validate request while binding.
req := new(PIN)
if err := c.BindJSON(req); err != nil {
reply.BadRequest(c, err)
return
}
// Get the mailbox for the logged in user.
mailbox := &data.Mailbox{ID: req.MailboxID}
if err := data.Single(mailbox); err != nil {
reply.BadRequest(c, err)
return
}
// Create the pin for the mailbox.
pin := &data.PIN{
Email: req.Email,
MailboxID: req.MailboxID,
Name: req.Name,
Number: req.Number,
Phone: req.Phone,
Single: req.Single,
Timeout: time.Unix(int64(req.Timeout), 0).UTC(),
}
if err := mailbox.CreatePIN(pin); err != nil {
reply.BadRequest(c, err)
return
}
reply.OK(c, pin)
}
func PutMailbox(c *gin.Context) {
reply.OK(c, "OK")
}
|
package main
import (
"flag"
"fmt"
"log"
"math"
"math/rand"
"net/http"
"os"
"runtime"
"strconv"
"sync"
"time"
"io/ioutil"
"os/exec"
"github.com/go-yaml/yaml"
"github.com/google/uuid"
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
api "github.com/synerex/synerex_alpha/api"
napi "github.com/synerex/synerex_alpha/nodeapi"
"github.com/synerex/synerex_alpha/provider/simutil"
"github.com/synerex/synerex_alpha/util"
"google.golang.org/grpc"
)
var (
myProvider *api.Provider
synerexAddr string
nodeIdAddr string
port string
startFlag bool
masterClock int
workerHosts []string
mu sync.Mutex
simapi *api.SimAPI
//providerManager *Manager
pm *simutil.ProviderManager
logger *util.Logger
waiter *api.Waiter
config *Config
podgen *PodGenerator
proc *Processor
)
type Config struct {
Area Config_Area `yaml:"area"`
}
type Config_Area struct {
SideRange float64 `yaml:"sideRange"`
DuplicateRange float64 `yaml:"duplicateRange"`
DefaultAreaNum Config_AreaNum `yaml:"defaultAreaNum"`
}
type Config_AreaNum struct {
Row uint64 `yaml:"row"`
Column uint64 `yaml:"column"`
}
func readConfig() (*Config, error) {
var config *Config
buf, err := ioutil.ReadFile("./config.yaml")
if err != nil {
fmt.Println(err)
return config, err
}
// []map[string]string のときと使う関数は同じです。
// いい感じにマッピングしてくれます。
err = yaml.Unmarshal(buf, &config)
if err != nil {
fmt.Println(err)
return config, err
}
fmt.Printf("yaml is %v\n", config)
return config, nil
}
func init() {
podgen = NewPodGenerator()
proc = NewProcessor()
waiter = api.NewWaiter()
startFlag = false
masterClock = 0
workerHosts = make([]string, 0)
logger = util.NewLogger()
logger.SetPrefix("Master")
flag.Parse()
//providerManager = NewManager()
// configを読み取る
config, _ = readConfig()
// kubetest
/*id := "test"
area := &Area{
Id: 3,
Control: []Coord{{Latitude: 0, Longitude: 0}, {Latitude: 10, Longitude: 0}, {Latitude: 10, Longitude: 10}, {Latitude: 0, Longitude: 10}},
Duplicate: []Coord{{Latitude: 0, Longitude: 0}, {Latitude: 10, Longitude: 0}, {Latitude: 10, Longitude: 10}, {Latitude: 0, Longitude: 10}},
}
go podgen.applyWorker(id, area)
time.Sleep(4 * time.Second)
go podgen.deleteWorker(id)*/
synerexAddr = os.Getenv("SYNEREX_SERVER")
if synerexAddr == "" {
synerexAddr = "127.0.0.1:10000"
}
nodeIdAddr = os.Getenv("NODEID_SERVER")
if nodeIdAddr == "" {
nodeIdAddr = "127.0.0.1:9000"
}
port = os.Getenv("PORT")
if port == "" {
port = "9990"
}
}
////////////////////////////////////////////////////////////
////////////////// Manager ///////////////////
///////////////////////////////////////////////////////////
type Manager struct {
Providers []*api.Provider
}
func NewManager() *Manager {
m := &Manager{
Providers: make([]*api.Provider, 0),
}
return m
}
func (m *Manager) AddProvider(provider *api.Provider) {
m.Providers = append(m.Providers, provider)
}
func (m *Manager) GetProviderIds() []uint64 {
pids := make([]uint64, 0)
for _, p := range m.Providers {
pids = append(pids, p.GetId())
}
return pids
}
////////////////////////////////////////////////////////////
//////////// Demand Supply Callback ////////////////
///////////////////////////////////////////////////////////
// Supplyのコールバック関数
func supplyCallback(clt *api.SMServiceClient, sp *api.Supply) {
// 自分宛かどうか
// check if supply is match with my demand.
switch sp.GetSimSupply().GetType() {
case api.SupplyType_SET_CLOCK_RESPONSE:
simapi.SendSpToWait(sp)
case api.SupplyType_SET_AGENT_RESPONSE:
simapi.SendSpToWait(sp)
case api.SupplyType_FORWARD_CLOCK_RESPONSE:
simapi.SendSpToWait(sp)
case api.SupplyType_FORWARD_CLOCK_INIT_RESPONSE:
simapi.SendSpToWait(sp)
case api.SupplyType_UPDATE_PROVIDERS_RESPONSE:
simapi.SendSpToWait(sp)
case api.SupplyType_SEND_AREA_INFO_RESPONSE:
simapi.SendSpToWait(sp)
}
}
// Demandのコールバック関数
func demandCallback(clt *api.SMServiceClient, dm *api.Demand) {
// check if supply is match with my demand.
switch dm.GetSimDemand().GetType() {
case api.DemandType_REGIST_PROVIDER_REQUEST:
// providerを追加する
p := dm.GetSimDemand().GetRegistProviderRequest().GetProvider()
//providerManager.AddProvider(p)
pm.AddProvider(p)
fmt.Printf("regist provider! %v\n", p.GetId())
// 登録完了通知
//targets := []uint64{tid}
senderInfo := myProvider.Id
targets := []uint64{p.GetId()}
msgId := dm.GetSimDemand().GetMsgId()
simapi.RegistProviderResponse(senderInfo, targets, msgId, pm.MyProvider)
// update provider to worker
targets = pm.GetProviderIds([]simutil.IDType{
simutil.IDType_WORKER,
})
simapi.UpdateProvidersRequest(senderInfo, targets, pm.GetProviders())
logger.Info("Success Update Providers! Worker Num: ", len(targets))
}
}
///////////////////////////////////////////////
//////////// Processor //////////////////////
///////////////////////////////////////////////
type Processor struct {
Area *api.Area // 全体のエリア
AreaMap map[string]*api.Area // [areaid] []areaCoord エリア情報を表したmap
NeighborMap map[string][]string // [areaid] []neighborAreaid 隣接関係を表したmap
}
func NewProcessor() *Processor {
proc := &Processor{
Area: nil,
AreaMap: make(map[string]*api.Area),
NeighborMap: make(map[string][]string),
}
return proc
}
// setAgents3: gatewayから入れる
// routes から東山のrouteを作成する
func (proc *Processor) setAgents3(agentNum uint64) (bool, error) {
if proc.Area == nil {
return false, fmt.Errorf("area is nil")
}
agents := make([]*api.Agent, 0)
//minLon, maxLon, minLat, maxLat := 136.971626, 136.989379, 35.152210, 35.161499
//maxLat, maxLon, minLat, minLon := GetCoordRange(proc.Area.ControlArea)
//fmt.Printf("minLon %v, maxLon %v, minLat %v, maxLat %v\n", minLon, maxLon, minLat, maxLat)
for i := 0; i < int(agentNum); i++ {
uid, _ := uuid.NewRandom()
routes := GetRoutes()
route1 := routes[0]
point1 := route1.Point
point2 := route1.NeighborPoints[rand.Intn(int(len(route1.NeighborPoints)))].Point
position := GetAmongPosition(point1, point2)
nextTransit := point2
agents = append(agents, &api.Agent{
Type: api.AgentType_PEDESTRIAN,
Id: uint64(uid.ID()),
Route: &api.Route{
Position: position,
Direction: 30,
Speed: 60,
Departure: position,
Destination: position,
TransitPoints: []*api.Coord{},
NextTransit: nextTransit,
},
})
fmt.Printf("position %v\n", position)
}
// エージェントを設置するリクエスト
senderId := myProvider.Id
targets := pm.GetProviderIds([]simutil.IDType{
simutil.IDType_WORKER,
})
simapi.SetAgentRequest(senderId, targets, agents)
logger.Info("Finish Setting Agents \n Add: %v", len(agents))
return true, nil
}
// setAgents: agentをセットするDemandを出す関数
// routes から東山のrouteを作成する
func (proc *Processor) setAgents2(agentNum uint64) (bool, error) {
if proc.Area == nil {
return false, fmt.Errorf("area is nil")
}
agents := make([]*api.Agent, 0)
//minLon, maxLon, minLat, maxLat := 136.971626, 136.989379, 35.152210, 35.161499
maxLat, maxLon, minLat, minLon := GetCoordRange(proc.Area.ControlArea)
fmt.Printf("minLon %v, maxLon %v, minLat %v, maxLat %v\n", minLon, maxLon, minLat, maxLat)
for i := 0; i < int(agentNum); i++ {
uid, _ := uuid.NewRandom()
routes := GetRoutes()
route1 := routes[rand.Intn(len(routes))]
point1 := route1.Point
point2 := route1.NeighborPoints[rand.Intn(int(len(route1.NeighborPoints)))].Point
position := GetAmongPosition(point1, point2)
nextTransit := point2
agents = append(agents, &api.Agent{
Type: api.AgentType_PEDESTRIAN,
Id: uint64(uid.ID()),
Route: &api.Route{
Position: position,
Direction: 30,
Speed: 60,
Departure: position,
Destination: position,
TransitPoints: []*api.Coord{},
NextTransit: nextTransit,
},
})
fmt.Printf("position %v\n", position)
}
// エージェントを設置するリクエスト
senderId := myProvider.Id
targets := pm.GetProviderIds([]simutil.IDType{
simutil.IDType_WORKER,
})
simapi.SetAgentRequest(senderId, targets, agents)
logger.Info("Finish Setting Agents \n Add: %v", len(agents))
return true, nil
}
// setAgents: agentをセットするDemandを出す関数
func (proc *Processor) setAgents(agentNum uint64) (bool, error) {
if proc.Area == nil {
return false, fmt.Errorf("area is nil")
}
agents := make([]*api.Agent, 0)
//minLon, maxLon, minLat, maxLat := 136.971626, 136.989379, 35.152210, 35.161499
maxLat, maxLon, minLat, minLon := GetCoordRange(proc.Area.ControlArea)
fmt.Printf("minLon %v, maxLon %v, minLat %v, maxLat %v\n", minLon, maxLon, minLat, maxLat)
for i := 0; i < int(agentNum); i++ {
uid, _ := uuid.NewRandom()
position := &api.Coord{
Longitude: minLon + (maxLon-minLon)*rand.Float64(),
Latitude: minLat + (maxLat-minLat)*rand.Float64(),
}
destination := &api.Coord{
Longitude: minLon + (maxLon-minLon)*rand.Float64(),
Latitude: minLat + (maxLat-minLat)*rand.Float64(),
}
transitPoint := &api.Coord{
Longitude: minLon + (maxLon-minLon)*rand.Float64(),
Latitude: minLat + (maxLat-minLat)*rand.Float64(),
}
transitPoints := []*api.Coord{transitPoint}
agents = append(agents, &api.Agent{
Type: api.AgentType_PEDESTRIAN,
Id: uint64(uid.ID()),
Route: &api.Route{
Position: position,
Direction: 30,
Speed: 60,
Departure: position,
Destination: destination,
TransitPoints: transitPoints,
NextTransit: transitPoint,
},
})
//fmt.Printf("position %v\n", position)
}
// エージェントを設置するリクエスト
senderId := myProvider.Id
targets := pm.GetProviderIds([]simutil.IDType{
simutil.IDType_WORKER,
})
simapi.SetAgentRequest(senderId, targets, agents)
logger.Info("Finish Setting Agents \n Add: %v", len(agents))
return true, nil
}
// setAreas: areaをセットするDemandを出す関数
func (proc *Processor) setAreas(areaCoords []*api.Coord) (bool, error) {
proc.Area = &api.Area{
Id: 0,
ControlArea: areaCoords,
DuplicateArea: areaCoords,
}
//id := "test"
areas, neighborsMap := proc.divideArea(areaCoords, config.Area)
for _, area := range areas {
neighbors := neighborsMap[int(area.Id)]
go podgen.applyWorker(area, neighbors)
//defer podgen.deleteWorker(id) // not working...
}
// send area info to visualization
senderId := myProvider.Id
targets := pm.GetProviderIds([]simutil.IDType{
simutil.IDType_VISUALIZATION,
})
logger.Debug("Send Area Info to Vis! \n%v\n", targets)
//areas := []*api.Area{proc.Area}
simapi.SendAreaInfoRequest(senderId, targets, areas)
return true, nil
}
// startClock:
func (proc *Processor) startClock() {
t1 := time.Now()
senderId := myProvider.Id
targets := pm.GetProviderIds([]simutil.IDType{
simutil.IDType_WORKER,
simutil.IDType_VISUALIZATION,
})
logger.Debug("Next Cycle! \n%v\n", targets)
simapi.ForwardClockInitRequest(senderId, targets)
simapi.ForwardClockRequest(senderId, targets)
// calc next time
masterClock++
log.Printf("\x1b[30m\x1b[47m \n Finish: Clock forwarded \n Time: %v \x1b[0m\n", masterClock)
t2 := time.Now()
duration := t2.Sub(t1).Milliseconds()
logger.Info("Duration: %v", duration)
interval := int64(1000) // 周期ms
if duration > interval {
logger.Error("time cycle delayed...")
} else {
// 待機
logger.Info("wait %v ms", interval-duration)
time.Sleep(time.Duration(interval-duration) * time.Millisecond)
}
// 次のサイクルを行う
if startFlag {
proc.startClock()
} else {
log.Printf("\x1b[30m\x1b[47m \n Finish: Clock stopped \n GlobalTime: %v \x1b[0m\n", masterClock)
startFlag = false
return
}
}
// areaをrow、columnに分割する関数
func (proc *Processor) divideArea(areaCoords []*api.Coord, areaConfig Config_Area) ([]*api.Area, map[int][]string) {
row := areaConfig.DefaultAreaNum.Row
column := areaConfig.DefaultAreaNum.Column
dupRange := areaConfig.DuplicateRange
areas := []*api.Area{}
neighborMap := make(map[int][]string)
maxLat, maxLon, minLat, minLon := GetCoordRange(proc.Area.ControlArea)
//areaId := 0
for c := 0; c < int(column); c++ {
// calc slon, elon
slon := minLon + (maxLon-minLon)*float64(c)/float64(column)
elon := minLon + (maxLon-minLon)*float64((c+1))/float64(column)
for r := 0; r < int(row); r++ {
areaId := strconv.Itoa(c+1) + strconv.Itoa(r+1)
areaIdint, _ := strconv.Atoi(strconv.Itoa(c+1) + strconv.Itoa(r+1))
// calc slat, elat
slat := minLat + (maxLat-minLat)*float64(r)/float64(row)
elat := minLat + (maxLat-minLat)*float64((r+1))/float64(row)
//fmt.Printf("test id %v\n", areaId)
areas = append(areas, &api.Area{
Id: uint64(areaIdint),
ControlArea: []*api.Coord{
{Latitude: slat, Longitude: slon},
{Latitude: slat, Longitude: elon},
{Latitude: elat, Longitude: elon},
{Latitude: elat, Longitude: slon},
},
DuplicateArea: []*api.Coord{
{Latitude: slat - dupRange, Longitude: slon - dupRange},
{Latitude: slat - dupRange, Longitude: elon + dupRange},
{Latitude: elat + dupRange, Longitude: elon + dupRange},
{Latitude: elat + dupRange, Longitude: slon - dupRange},
},
})
// add neighbors 各エリアの右と上を作成すれば全体を満たす
if c+2 <= int(column) {
id := strconv.Itoa(c+2) + strconv.Itoa(r+1)
neighborMap[areaIdint] = append(neighborMap[areaIdint], id)
}
if r+2 <= int(row) {
id := strconv.Itoa(c+1) + strconv.Itoa(r+2)
neighborMap[areaIdint] = append(neighborMap[areaIdint], id)
}
}
}
return areas, neighborMap
}
///////////////////////////////////////////////
//////////// Order //////////////////////
///////////////////////////////////////////////
type Order struct {
}
func NewOrder() *Order {
order := &Order{}
return order
}
type ClockOptions struct {
Time int `validate:"required,min=0" json:"time"`
}
func (or *Order) SetClock() echo.HandlerFunc {
return func(c echo.Context) error {
co := new(ClockOptions)
if err := c.Bind(co); err != nil {
return err
}
fmt.Printf("time %d\n", co.Time)
masterClock = co.Time
return c.String(http.StatusOK, "Set Clock")
}
}
type AgentOptions struct {
Num int `validate:"required,min=0,max=10", json:"num"`
}
func (or *Order) SetAgent() echo.HandlerFunc {
return func(c echo.Context) error {
ao := new(AgentOptions)
if err := c.Bind(ao); err != nil {
return err
}
fmt.Printf("agent num %d\n", ao.Num)
ok, err := proc.setAgents2(uint64(ao.Num))
fmt.Printf("ok %v, err %v", ok, err)
return c.String(http.StatusOK, "Set Agent")
}
}
type AreaOptions struct {
SLat string `min=0,max=100", json:"slat"`
SLon string `min=0,max=200", json:"slon"`
ELat string `min=0,max=100", json:"elat"`
ELon string `min=0,max=200", json:"elon"`
}
func (or *Order) SetArea() echo.HandlerFunc {
return func(c echo.Context) error {
ao := new(AreaOptions)
if err := c.Bind(ao); err != nil {
return err
}
fmt.Printf("area %d\n", ao)
slat, _ := strconv.ParseFloat(ao.SLat, 64)
slon, _ := strconv.ParseFloat(ao.SLon, 64)
elat, _ := strconv.ParseFloat(ao.ELat, 64)
elon, _ := strconv.ParseFloat(ao.ELon, 64)
area := []*api.Coord{
{Latitude: slat, Longitude: slon},
{Latitude: slat, Longitude: elon},
{Latitude: elat, Longitude: elon},
{Latitude: elat, Longitude: slon},
}
proc.setAreas(area)
return c.String(http.StatusOK, "Set Area")
}
}
func (or *Order) Start() echo.HandlerFunc {
return func(c echo.Context) error {
if startFlag == false {
startFlag = true
go proc.startClock()
return c.String(http.StatusOK, "Start")
} else {
logger.Warn("Clock is already started.")
return c.String(http.StatusBadRequest, "Start")
}
}
}
func (or *Order) Stop() echo.HandlerFunc {
return func(c echo.Context) error {
startFlag = false
return c.String(http.StatusOK, "Stop")
}
}
func startSimulatorServer() {
fmt.Printf("Starting Simulator Server...")
order := NewOrder()
e := echo.New()
e.Use(middleware.Logger())
e.Use(middleware.Recover())
e.Use(middleware.CORS())
e.POST("/order/set/clock", order.SetClock())
e.POST("/order/set/agent", order.SetAgent())
e.POST("/order/set/area", order.SetArea())
e.POST("/order/start", order.Start())
e.POST("/order/stop", order.Stop())
e.Start(":" + port)
}
func main() {
fmt.Printf("NumCPU=%d\n", runtime.NumCPU())
runtime.GOMAXPROCS(runtime.NumCPU())
// ProviderManager
uid, _ := uuid.NewRandom()
myProvider = &api.Provider{
Id: uint64(uid.ID()),
Name: "MasterServer",
Type: api.ProviderType_MASTER,
}
pm = simutil.NewProviderManager(myProvider)
// CLI, GUIの受信サーバ
go startSimulatorServer()
/*quit := make(chan os.Signal)
// 受け取るシグナルを設定
signal.Notify(quit, os.Interrupt)
<-quit*/
// Connect to Node Server
nodeapi := napi.NewNodeAPI()
for {
err := nodeapi.RegisterNodeName(nodeIdAddr, "MasterProvider", false)
if err == nil {
logger.Info("connected NodeID server!")
go nodeapi.HandleSigInt()
nodeapi.RegisterDeferFunction(nodeapi.UnRegisterNode)
break
} else {
logger.Warn("NodeID Error... reconnecting...")
time.Sleep(2 * time.Second)
}
}
// Connect to Synerex Server
var opts []grpc.DialOption
opts = append(opts, grpc.WithInsecure())
conn, err := grpc.Dial(synerexAddr, opts...)
if err != nil {
log.Fatalf("fail to dial: %v", err)
}
nodeapi.RegisterDeferFunction(func() { conn.Close() })
client := api.NewSynerexClient(conn)
argJson := fmt.Sprintf("{Client:Master}")
// api
fmt.Printf("client: %v\n", client)
simapi = api.NewSimAPI()
simapi.RegistClients(client, myProvider.Id, argJson) // channelごとのClientを作成
simapi.SubscribeAll(demandCallback, supplyCallback) // ChannelにSubscribe*/
wg := sync.WaitGroup{}
wg.Add(1)
wg.Wait()
nodeapi.CallDeferFunctions() // cleanup!
}
//////////////////////////////////
////////// Pod Generator ////////
//////////////////////////////////
type PodGenerator struct {
RsrcMap map[string][]Resource
}
func NewPodGenerator() *PodGenerator {
pg := &PodGenerator{
RsrcMap: make(map[string][]Resource),
}
return pg
}
func (pg *PodGenerator) applyWorker(area *api.Area, neighbors []string) error {
fmt.Printf("applying WorkerPod... %v\n", area.Id)
areaid := strconv.FormatUint(area.Id, 10)
rsrcs := []Resource{
pg.NewWorkerService(areaid),
pg.NewWorker(areaid),
pg.NewAgent(areaid, area),
}
for _, neiId := range neighbors {
rsrcs = append(rsrcs, pg.NewGateway(areaid, neiId))
}
fmt.Printf("applying WorkerPod2... %v\n", areaid)
// write yaml
fileName := "scripts/worker" + areaid + ".yaml"
for _, rsrc := range rsrcs {
err := WriteOnFile(fileName, rsrc)
if err != nil {
fmt.Println(err)
return err
}
}
fmt.Printf("test: %v %v\n", fileName, areaid)
// apply yaml
cmd := exec.Command("kubectl", "apply", "-f", fileName)
out, err := cmd.Output()
if err != nil {
fmt.Println("Command Start Error. %v\n", err)
return err
}
// delete yaml
/*if err := os.Remove(fileName); err != nil {
fmt.Println(err)
return err
}*/
fmt.Printf("out: %v\n", string(out))
// regist resource
pg.RsrcMap[areaid] = rsrcs
return nil
}
func (pg *PodGenerator) deleteWorker(areaid string) error {
fmt.Printf("deleting WorkerPod...")
rsrcs := pg.RsrcMap[areaid]
// write yaml
fileName := "worker" + areaid + ".yaml"
for _, rsrc := range rsrcs {
err := WriteOnFile(fileName, rsrc)
if err != nil {
fmt.Println(err)
return err
}
}
// apply yaml
cmd := exec.Command("kubectl", "delete", "-f", fileName)
out, err := cmd.Output()
if err != nil {
fmt.Println("Command Start Error.")
return err
}
// delete yaml
if err := os.Remove(fileName); err != nil {
fmt.Println(err)
return err
}
fmt.Printf("out: %v\n", string(out))
// regist resource
pg.RsrcMap[areaid] = nil
return nil
}
// gateway
func (pg *PodGenerator) NewGateway(areaId string, neiId string) Resource {
worker1Name := "worker" + areaId
worker2Name := "worker" + neiId
gatewayName := "gateway" + areaId + neiId
gateway := Resource{
ApiVersion: "v1",
Kind: "Pod",
Metadata: Metadata{
Name: gatewayName,
Labels: Label{App: gatewayName},
},
Spec: Spec{
Containers: []Container{
{
Name: "gateway-provider",
Image: "synerex-simulation/gateway-provider:latest",
ImagePullPolicy: "Never",
Env: []Env{
{
Name: "WORKER_SYNEREX_SERVER1",
Value: worker1Name + ":700",
},
{
Name: "WORKER_NODEID_SERVER1",
Value: worker1Name + ":600",
},
{
Name: "WORKER_SYNEREX_SERVER2",
Value: worker2Name + ":700",
},
{
Name: "WORKER_NODEID_SERVER2",
Value: worker2Name + ":600",
},
{
Name: "PROVIDER_NAME",
Value: "GatewayProvider" + areaId + neiId,
},
},
Ports: []Port{{ContainerPort: 9980}},
},
},
},
}
return gateway
}
func (pg *PodGenerator) NewAgent(areaid string, area *api.Area) Resource {
workerName := "worker" + areaid
agentName := "agent" + areaid
agent := Resource{
ApiVersion: "v1",
Kind: "Pod",
Metadata: Metadata{
Name: agentName,
Labels: Label{App: agentName},
},
Spec: Spec{
Containers: []Container{
{
Name: "agent-provider",
Image: "synerex-simulation/agent-provider:latest",
ImagePullPolicy: "Never",
Env: []Env{
{
Name: "NODEID_SERVER",
Value: workerName + ":600",
},
{
Name: "SYNEREX_SERVER",
Value: workerName + ":700",
},
{
Name: "VIS_SYNEREX_SERVER",
Value: "visualization:700",
},
{
Name: "VIS_NODEID_SERVER",
Value: "visualization:600",
},
{
Name: "AREA",
Value: convertAreaToJson(area),
},
{
Name: "PROVIDER_NAME",
Value: "AgentProvider" + areaid,
},
},
},
},
},
}
return agent
}
// worker
func (pg *PodGenerator) NewWorkerService(areaid string) Resource {
name := "worker" + areaid
service := Resource{
ApiVersion: "v1",
Kind: "Service",
Metadata: Metadata{Name: name},
Spec: Spec{
Selector: Selector{App: name},
Ports: []Port{
{
Name: "synerex",
Port: 700,
TargetPort: 10000,
},
{
Name: "nodeid",
Port: 600,
TargetPort: 9000,
},
},
},
}
return service
}
func (pg *PodGenerator) NewWorker(areaid string) Resource {
name := "worker" + areaid
worker := Resource{
ApiVersion: "v1",
Kind: "Pod",
Metadata: Metadata{
Name: name,
Labels: Label{App: name},
},
Spec: Spec{
Containers: []Container{
{
Name: "nodeid-server",
Image: "synerex-simulation/nodeid-server:latest",
ImagePullPolicy: "Never",
Env: []Env{
{
Name: "NODEID_SERVER",
Value: ":9000",
},
},
Ports: []Port{{ContainerPort: 9000}},
},
{
Name: "synerex-server",
Image: "synerex-simulation/synerex-server:latest",
ImagePullPolicy: "Never",
Env: []Env{
{
Name: "NODEID_SERVER",
Value: ":9000",
},
{
Name: "SYNEREX_SERVER",
Value: ":10000",
},
{
Name: "SERVER_NAME",
Value: "SynerexServer" + areaid,
},
},
Ports: []Port{{ContainerPort: 10000}},
},
{
Name: "worker-provider",
Image: "synerex-simulation/worker-provider:latest",
ImagePullPolicy: "Never",
Env: []Env{
{
Name: "NODEID_SERVER",
Value: ":9000",
},
{
Name: "SYNEREX_SERVER",
Value: ":10000",
},
{
Name: "MASTER_SYNEREX_SERVER",
Value: "master:700",
},
{
Name: "MASTER_NODEID_SERVER",
Value: "master:600",
},
{
Name: "PORT",
Value: "9980",
},
{
Name: "PROVIDER_NAME",
Value: "WorkerProvider" + areaid,
},
},
Ports: []Port{{ContainerPort: 9980}},
},
},
},
}
return worker
}
// ファイル名とデータをを渡すとyamlファイルに保存してくれる関数です。
func WriteOnFile(fileName string, data interface{}) error {
// ここでデータを []byte に変換しています。
buf, err := yaml.Marshal(data)
if err != nil {
return err
}
file, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
//エラー処理
log.Fatal(err)
}
defer file.Close()
fmt.Fprintln(file, string(buf)) //書き込み
fmt.Fprintln(file, string("---")) //書き込み
return nil
}
func convertAreaToJson(area *api.Area) string {
id := area.Id
duplicateText := `[`
controlText := `[`
for i, ctl := range area.ControlArea {
ctlText := fmt.Sprintf(`{"latitude":%v, "longitude":%v}`, ctl.Latitude, ctl.Longitude)
//fmt.Printf("ctl %v\n", ctlText)
if i == len(area.ControlArea)-1 { // 最後は,をつけない
controlText += ctlText
} else {
controlText += ctlText + ","
}
}
for i, dpl := range area.DuplicateArea {
dplText := fmt.Sprintf(`{"latitude":%v, "longitude":%v}`, dpl.Latitude, dpl.Longitude)
//fmt.Printf("dpl %v\n", dplText)
if i == len(area.DuplicateArea)-1 { // 最後は,をつけない
duplicateText += dplText
} else {
duplicateText += dplText + ","
}
}
duplicateText += `]`
controlText += `]`
result := fmt.Sprintf(`{"id":%d, "name":"Unknown", "duplicate_area": %s, "control_area": %s}`, id, duplicateText, controlText)
//result = fmt.Sprintf("%s", result)
//fmt.Printf("areaJson: %s\n", result)
return result
}
type Resource struct {
ApiVersion string `yaml:"apiVersion,omitempty"`
Kind string `yaml:"kind,omitempty"`
Metadata Metadata `yaml:"metadata,omitempty"`
Spec Spec `yaml:"spec,omitempty"`
}
type Spec struct {
Containers []Container `yaml:"containers,omitempty"`
Selector Selector `yaml:"selector,omitempty"`
Ports []Port `yaml:"ports,omitempty"`
Type string `yaml:"type,omitempty"`
}
type Container struct {
Name string `yaml:"name,omitempty"`
Image string `yaml:"image,omitempty"`
ImagePullPolicy string `yaml:"imagePullPolicy,omitempty"`
Stdin bool `yaml:"stdin,omitempty"`
Tty bool `yaml:"tty,omitempty"`
Env []Env `yaml:"env,omitempty"`
Ports []Port `yaml:"ports,omitempty"`
}
type Env struct {
Name string `yaml:"name,omitempty"`
Value string `yaml:"value,omitempty"`
}
type Selector struct {
App string `yaml:"app,omitempty"`
MatchLabels Label `yaml:"matchLabels,omitempty"`
}
type Port struct {
Name string `yaml:"name,omitempty"`
Port int `yaml:"port,omitempty"`
TargetPort int `yaml:"targetPort,omitempty"`
ContainerPort int `yaml:"containerPort,omitempty"`
}
type Metadata struct {
Name string `yaml:"name,omitempty"`
Labels Label `yaml:"labels,omitempty"`
}
type Label struct {
App string `yaml:"app,omitempty"`
}
type Area struct {
Id int
Control []*api.Coord
Duplicate []*api.Coord
}
type Coord struct {
Latitude float64
Longitude float64
}
func GetCoordRange(coords []*api.Coord) (float64, float64, float64, float64) {
maxLon, maxLat := math.Inf(-1), math.Inf(-1)
minLon, minLat := math.Inf(0), math.Inf(0)
for _, coord := range coords {
if coord.Latitude > maxLat {
maxLat = coord.Latitude
}
if coord.Longitude > maxLon {
maxLon = coord.Longitude
}
if coord.Latitude < minLat {
minLat = coord.Latitude
}
if coord.Longitude < minLon {
minLon = coord.Longitude
}
}
return maxLat, maxLon, minLat, minLon
}
/////////////////////////////////////////////////////
//////// util for creating higashiyama route ////////
///////////////////////////////////////////////////////
type RoutePoint struct {
Id uint64
Name string
Point *api.Coord
NeighborPoints []*RoutePoint
}
func GetRoutes() []*RoutePoint {
routes := []*RoutePoint{
{
Id: 0, Name: "gate", Point: &api.Coord{Longitude: 136.974024, Latitude: 35.158995},
NeighborPoints: []*RoutePoint{
{Id: 1, Name: "enterance", Point: &api.Coord{Longitude: 136.974688, Latitude: 35.158228}},
},
},
{
Id: 1, Name: "enterance", Point: &api.Coord{Longitude: 136.974688, Latitude: 35.158228},
NeighborPoints: []*RoutePoint{
{Id: 0, Name: "gate", Point: &api.Coord{Longitude: 136.974024, Latitude: 35.158995}},
{Id: 2, Name: "rightEnt", Point: &api.Coord{Longitude: 136.974645, Latitude: 35.157958}},
{Id: 3, Name: "leftEnt", Point: &api.Coord{Longitude: 136.974938, Latitude: 35.158164}},
},
},
{
Id: 2, Name: "rightEnt", Point: &api.Coord{Longitude: 136.974645, Latitude: 35.157958},
NeighborPoints: []*RoutePoint{
{Id: 1, Name: "enterance", Point: &api.Coord{Longitude: 136.974688, Latitude: 35.158228}},
{Id: 4, Name: "road1", Point: &api.Coord{Longitude: 136.974864, Latitude: 35.157823}},
},
},
{
Id: 3, Name: "leftEnt", Point: &api.Coord{Longitude: 136.974938, Latitude: 35.158164},
NeighborPoints: []*RoutePoint{
{Id: 1, Name: "enterance", Point: &api.Coord{Longitude: 136.974688, Latitude: 35.158228}},
{Id: 5, Name: "road2", Point: &api.Coord{Longitude: 136.975054, Latitude: 35.158001}},
{Id: 17, Name: "north1", Point: &api.Coord{Longitude: 136.976395, Latitude: 35.158410}},
},
},
{
Id: 4, Name: "road1", Point: &api.Coord{Longitude: 136.974864, Latitude: 35.157823},
NeighborPoints: []*RoutePoint{
{Id: 2, Name: "rightEnt", Point: &api.Coord{Longitude: 136.974645, Latitude: 35.157958}},
{Id: 5, Name: "road2", Point: &api.Coord{Longitude: 136.975054, Latitude: 35.158001}},
{Id: 6, Name: "road3", Point: &api.Coord{Longitude: 136.975517, Latitude: 35.157096}},
},
},
{
Id: 5, Name: "road2", Point: &api.Coord{Longitude: 136.975054, Latitude: 35.158001},
NeighborPoints: []*RoutePoint{
{Id: 3, Name: "leftEnt", Point: &api.Coord{Longitude: 136.974938, Latitude: 35.158164}},
{Id: 4, Name: "road1", Point: &api.Coord{Longitude: 136.974864, Latitude: 35.157823}},
},
},
{
Id: 6, Name: "road3", Point: &api.Coord{Longitude: 136.975517, Latitude: 35.157096},
NeighborPoints: []*RoutePoint{
{Id: 7, Name: "road4", Point: &api.Coord{Longitude: 136.975872, Latitude: 35.156678}},
{Id: 4, Name: "road1", Point: &api.Coord{Longitude: 136.974864, Latitude: 35.157823}},
},
},
{
Id: 7, Name: "road4", Point: &api.Coord{Longitude: 136.975872, Latitude: 35.156678},
NeighborPoints: []*RoutePoint{
{Id: 6, Name: "road3", Point: &api.Coord{Longitude: 136.975517, Latitude: 35.157096}},
{Id: 8, Name: "road5", Point: &api.Coord{Longitude: 136.976314, Latitude: 35.156757}},
{Id: 10, Name: "burger", Point: &api.Coord{Longitude: 136.976960, Latitude: 35.155697}},
},
},
{
Id: 8, Name: "road5", Point: &api.Coord{Longitude: 136.976314, Latitude: 35.156757},
NeighborPoints: []*RoutePoint{
{Id: 6, Name: "road3", Point: &api.Coord{Longitude: 136.975517, Latitude: 35.157096}},
{Id: 9, Name: "toilet", Point: &api.Coord{Longitude: 136.977261, Latitude: 35.155951}},
},
},
{
Id: 9, Name: "toilet", Point: &api.Coord{Longitude: 136.977261, Latitude: 35.155951},
NeighborPoints: []*RoutePoint{
{Id: 8, Name: "road5", Point: &api.Coord{Longitude: 136.976314, Latitude: 35.156757}},
{Id: 10, Name: "burger", Point: &api.Coord{Longitude: 136.976960, Latitude: 35.155697}},
},
},
{
Id: 10, Name: "burger", Point: &api.Coord{Longitude: 136.976960, Latitude: 35.155697},
NeighborPoints: []*RoutePoint{
{Id: 8, Name: "road5", Point: &api.Coord{Longitude: 136.976314, Latitude: 35.156757}},
{Id: 7, Name: "road4", Point: &api.Coord{Longitude: 136.975872, Latitude: 35.156678}},
{Id: 11, Name: "lake1", Point: &api.Coord{Longitude: 136.978217, Latitude: 35.155266}},
},
},
{
Id: 11, Name: "lake1", Point: &api.Coord{Longitude: 136.978217, Latitude: 35.155266},
NeighborPoints: []*RoutePoint{
{Id: 10, Name: "burger", Point: &api.Coord{Longitude: 136.976960, Latitude: 35.155697}},
{Id: 12, Name: "lake2", Point: &api.Coord{Longitude: 136.978623, Latitude: 35.155855}},
{Id: 16, Name: "lake6", Point: &api.Coord{Longitude: 136.978297, Latitude: 35.154755}},
},
},
{
Id: 12, Name: "lake2", Point: &api.Coord{Longitude: 136.978623, Latitude: 35.155855},
NeighborPoints: []*RoutePoint{
{Id: 11, Name: "lake1", Point: &api.Coord{Longitude: 136.978217, Latitude: 35.155266}},
{Id: 13, Name: "lake3", Point: &api.Coord{Longitude: 136.979657, Latitude: 35.155659}},
},
},
{
Id: 13, Name: "lake3", Point: &api.Coord{Longitude: 136.979657, Latitude: 35.155659},
NeighborPoints: []*RoutePoint{
{Id: 12, Name: "lake2", Point: &api.Coord{Longitude: 136.978623, Latitude: 35.155855}},
{Id: 14, Name: "lake4", Point: &api.Coord{Longitude: 136.980489, Latitude: 35.154484}},
{Id: 26, Name: "east6", Point: &api.Coord{Longitude: 136.984100, Latitude: 35.153693}},
{Id: 22, Name: "east1", Point: &api.Coord{Longitude: 136.981124, Latitude: 35.157283}},
{Id: 27, Name: "east-in1", Point: &api.Coord{Longitude: 136.982804, Latitude: 35.154175}},
},
},
{
Id: 14, Name: "lake4", Point: &api.Coord{Longitude: 136.980489, Latitude: 35.154484},
NeighborPoints: []*RoutePoint{
{Id: 13, Name: "lake3", Point: &api.Coord{Longitude: 136.979657, Latitude: 35.155659}},
{Id: 15, Name: "lake5", Point: &api.Coord{Longitude: 136.980143, Latitude: 35.153869}},
},
},
{
Id: 15, Name: "lake5", Point: &api.Coord{Longitude: 136.980143, Latitude: 35.153869},
NeighborPoints: []*RoutePoint{
{Id: 14, Name: "lake4", Point: &api.Coord{Longitude: 136.980489, Latitude: 35.154484}},
{Id: 16, Name: "lake6", Point: &api.Coord{Longitude: 136.978297, Latitude: 35.154755}},
},
},
{
Id: 16, Name: "lake6", Point: &api.Coord{Longitude: 136.978297, Latitude: 35.154755},
NeighborPoints: []*RoutePoint{
{Id: 11, Name: "lake1", Point: &api.Coord{Longitude: 136.978217, Latitude: 35.155266}},
{Id: 15, Name: "lake5", Point: &api.Coord{Longitude: 136.980143, Latitude: 35.153869}},
},
},
{
Id: 17, Name: "north1", Point: &api.Coord{Longitude: 136.976395, Latitude: 35.158410},
NeighborPoints: []*RoutePoint{
{Id: 3, Name: "leftEnt", Point: &api.Coord{Longitude: 136.974938, Latitude: 35.158164}},
{Id: 5, Name: "road2", Point: &api.Coord{Longitude: 136.975054, Latitude: 35.158001}},
{Id: 18, Name: "north2", Point: &api.Coord{Longitude: 136.977821, Latitude: 35.159220}},
},
},
{
Id: 18, Name: "north2", Point: &api.Coord{Longitude: 136.977821, Latitude: 35.159220},
NeighborPoints: []*RoutePoint{
{Id: 17, Name: "north1", Point: &api.Coord{Longitude: 136.976395, Latitude: 35.158410}},
{Id: 19, Name: "medaka", Point: &api.Coord{Longitude: 136.979040, Latitude: 35.158147}},
},
},
{
Id: 19, Name: "medaka", Point: &api.Coord{Longitude: 136.979040, Latitude: 35.158147},
NeighborPoints: []*RoutePoint{
{Id: 18, Name: "north2", Point: &api.Coord{Longitude: 136.977821, Latitude: 35.159220}},
{Id: 20, Name: "tower", Point: &api.Coord{Longitude: 136.978846, Latitude: 35.157108}},
},
},
{
Id: 20, Name: "tower", Point: &api.Coord{Longitude: 136.978846, Latitude: 35.157108},
NeighborPoints: []*RoutePoint{
{Id: 19, Name: "medaka", Point: &api.Coord{Longitude: 136.979040, Latitude: 35.158147}},
{Id: 21, Name: "north-out", Point: &api.Coord{Longitude: 136.977890, Latitude: 35.156563}},
},
},
{
Id: 21, Name: "north-out", Point: &api.Coord{Longitude: 136.977890, Latitude: 35.156563},
NeighborPoints: []*RoutePoint{
{Id: 20, Name: "tower", Point: &api.Coord{Longitude: 136.978846, Latitude: 35.157108}},
{Id: 17, Name: "north1", Point: &api.Coord{Longitude: 136.976395, Latitude: 35.158410}},
{Id: 9, Name: "toilet", Point: &api.Coord{Longitude: 136.977261, Latitude: 35.155951}},
},
},
{
Id: 22, Name: "east1", Point: &api.Coord{Longitude: 136.981124, Latitude: 35.157283},
NeighborPoints: []*RoutePoint{
{Id: 13, Name: "lake3", Point: &api.Coord{Longitude: 136.979657, Latitude: 35.155659}},
{Id: 23, Name: "east2", Point: &api.Coord{Longitude: 136.984350, Latitude: 35.157271}},
},
},
{
Id: 23, Name: "east2", Point: &api.Coord{Longitude: 136.984350, Latitude: 35.157271},
NeighborPoints: []*RoutePoint{
{Id: 22, Name: "east1", Point: &api.Coord{Longitude: 136.981124, Latitude: 35.157283}},
{Id: 24, Name: "east3", Point: &api.Coord{Longitude: 136.987567, Latitude: 35.158233}},
},
},
{
Id: 24, Name: "east3", Point: &api.Coord{Longitude: 136.987567, Latitude: 35.158233},
NeighborPoints: []*RoutePoint{
{Id: 23, Name: "east2", Point: &api.Coord{Longitude: 136.984350, Latitude: 35.157271}},
{Id: 25, Name: "east4", Point: &api.Coord{Longitude: 136.988522, Latitude: 35.157286}},
},
},
{
Id: 25, Name: "east4", Point: &api.Coord{Longitude: 136.988522, Latitude: 35.157286},
NeighborPoints: []*RoutePoint{
{Id: 24, Name: "east3", Point: &api.Coord{Longitude: 136.987567, Latitude: 35.158233}},
{Id: 25, Name: "east5", Point: &api.Coord{Longitude: 136.988355, Latitude: 35.155838}},
},
},
{
Id: 25, Name: "east5", Point: &api.Coord{Longitude: 136.988355, Latitude: 35.155838},
NeighborPoints: []*RoutePoint{
{Id: 25, Name: "east4", Point: &api.Coord{Longitude: 136.988522, Latitude: 35.157286}},
{Id: 26, Name: "east6", Point: &api.Coord{Longitude: 136.984100, Latitude: 35.153693}},
},
},
{
Id: 26, Name: "east6", Point: &api.Coord{Longitude: 136.984100, Latitude: 35.153693},
NeighborPoints: []*RoutePoint{
{Id: 25, Name: "east5", Point: &api.Coord{Longitude: 136.988355, Latitude: 35.155838}},
{Id: 13, Name: "lake3", Point: &api.Coord{Longitude: 136.979657, Latitude: 35.155659}},
{Id: 27, Name: "east-in1", Point: &api.Coord{Longitude: 136.982804, Latitude: 35.154175}},
},
},
{
Id: 27, Name: "east-in1", Point: &api.Coord{Longitude: 136.982804, Latitude: 35.154175},
NeighborPoints: []*RoutePoint{
{Id: 26, Name: "east6", Point: &api.Coord{Longitude: 136.984100, Latitude: 35.153693}},
{Id: 13, Name: "lake3", Point: &api.Coord{Longitude: 136.979657, Latitude: 35.155659}},
{Id: 28, Name: "east-in2", Point: &api.Coord{Longitude: 136.984244, Latitude: 35.156283}},
},
},
{
Id: 28, Name: "east-in2", Point: &api.Coord{Longitude: 136.984244, Latitude: 35.156283},
NeighborPoints: []*RoutePoint{
{Id: 29, Name: "east-in3", Point: &api.Coord{Longitude: 136.987627, Latitude: 35.157104}},
{Id: 27, Name: "east-in1", Point: &api.Coord{Longitude: 136.982804, Latitude: 35.154175}},
},
},
{
Id: 29, Name: "east-in3", Point: &api.Coord{Longitude: 136.987627, Latitude: 35.157104},
NeighborPoints: []*RoutePoint{
{Id: 28, Name: "east-in2", Point: &api.Coord{Longitude: 136.984244, Latitude: 35.156283}},
{Id: 30, Name: "east-in4", Point: &api.Coord{Longitude: 136.986063, Latitude: 35.155353}},
},
},
{
Id: 30, Name: "east-in4", Point: &api.Coord{Longitude: 136.986063, Latitude: 35.155353},
NeighborPoints: []*RoutePoint{
{Id: 29, Name: "east-in3", Point: &api.Coord{Longitude: 136.987627, Latitude: 35.157104}},
{Id: 26, Name: "east6", Point: &api.Coord{Longitude: 136.984100, Latitude: 35.153693}},
},
},
}
return routes
}
func GetAmongPosition(pos1 *api.Coord, pos2 *api.Coord) *api.Coord {
lat1 := pos1.Latitude
lon1 := pos1.Longitude
lat2 := pos2.Latitude
lon2 := pos2.Longitude
position := &api.Coord{
Latitude: lat1 + (lat2-lat1)*rand.Float64(),
Longitude: lon1 + (lon2-lon1)*rand.Float64(),
}
return position
}
|
package chapter5
import (
"fmt"
)
type person struct {
name string
email string
}
// 如果一个函数有接收者,这个函数就被称为方法。
// Go语言既允许使用值,也允许使用指针来调用方法,不必严格符合接收者的类型。
// Go 编译器为了支持这种方法调用在背后做的事情,帮我们做了指针被解引用为值,或者 引用值得到一个指针
func (p person) notify() {
fmt.Printf("Sending User Email To %s<%s>\n", p.name, p.email)
}
func (p *person) changeEmail(email string) {
p.email = email
}
func (p person) changeEmailV2(email string) {
p.email = email
}
// 下面编译不通过 编译器只允许为命名的用户定义的类型声明方法
//func (dict map[string]int) test(aaa string) {
//
//}
func methodDemo() {
tom := person{"tom", "tom@google.com"}
tom.notify()
jerry := &person{"jerry", "jerry@google.com"}
jerry.notify()
tom.changeEmail("mot@google.com")
tom.notify()
jerry.changeEmail("yerry@google.com")
jerry.notify()
lucy := &person{"lucy", "lucy@google.com"}
lucy.notify()
lucy.changeEmailV2("lily@google.com")
lucy.notify()
}
|
package main
func main() {
x := make([]int, 2, 10)
_ = x[6:10]
_ = x[6:] // 截取符号 [i:j],如果 j 省略,默认是原切片或者数组的⻓度,x 的⻓度是 2,小于起始下标 6 , 所以 panic。
_ = x[2:]
}
|
package domain
import (
"errors"
"time"
)
var (
ErrTodoNotFound = errors.New("todo not found")
)
type (
Todo struct {
ID string `json:"id" bson:"id"`
Title string `json:"title" bson:"title,omitempty"`
Description string `json:"description" bson:"description,omitempty"`
Completed bool `json:"completed" bson:"completed,omitempty"`
Timestamp time.Time `json:"timestamp" bson:"timestamp,omitempty"`
}
TodoCreatePayload struct {
ID string `json:"id"`
Title string `json:"title" validate:"required"`
Description string `json:"description" validate:"required"`
Timestamp time.Time `json:"timestamp"`
}
TodoUpdatePayload struct {
ID string `json:"id"`
Title string `json:"title" validate:"omitempty,min=5"`
Description string `json:"description" validate:"omitempty,min=15"`
Completed bool `json:"completed" validate:"omitempty"`
}
TodoReplacePayload struct {
ID string `json:"id"`
Title string `json:"title" validate:"required,min=5"`
Description string `json:"description" validate:"required,min=15"`
Completed bool `json:"completed" validate:"omitempty"`
}
)
func (Todo) TableName() string {
return "todos"
}
|
package main
import "fmt"
func main() {
var pi float64
step := 1000000000
x := 3.0
y := -1.0
pi = 4.0
for i := 0; i < step; i++ {
pi = pi + (4.0 / x * y)
x += 2.0
y = -y
}
fmt.Println("pi = ", pi)
}
|
package repository
var repos *AllRepository
type AllRepository interface {
Create() error
}
|
package main
import (
"goApi/controllers/api"
"goApi/controllers/users"
"log"
"os"
"goApi/utils/db"
"github.com/gin-gonic/gin"
"github.com/joho/godotenv"
)
func main() {
// Load env variables
err := godotenv.Load()
if err != nil {
log.Fatal("Error loading .env file")
}
// Set gin to production mode
gin.SetMode(gin.DebugMode)
// connect to mysql
db.Connect(os.Getenv("MYSQL_USERNAME"), os.Getenv("MYSQL_PASSWORD"), os.Getenv("MYSQL_DB"))
// close database when this function block gets the SIG_INT
defer db.Close()
// create new instance of gin
r := gin.New()
// set router to use default middlewares
r = gin.Default()
// initialize controllers
api.Init(r)
users.Init(r)
// start the server
r.Run(":5000")
}
|
package system
import (
"errors"
"time"
"github.com/fanda-org/postmasters/database/models"
"golang.org/x/crypto/bcrypt"
// "github.com/jinzhu/gorm"
// "github.com/satori/go.uuid"
// _ "github.com/jinzhu/gorm/dialects/postgres"
// "database/sql"
)
// User model
type User struct {
models.Base
UserName string `gorm:"size:50;not null;unique_index:uix_user_name" json:"userName"`
Email string `gorm:"size:100;not null;unique_index:uix_user_email" json:"email"`
PasswordHash string `gorm:"size:255;not null" json:"-"`
Salutation *string `gorm:"size:5" json:"salutation,omitempty"`
FirstName *string `gorm:"size:50;index:ix_user_firstname" json:"firstName,omitempty"`
LastName *string `gorm:"size:50" json:"lastName,omitempty"`
WorkPhone *string `gorm:"size:25" json:"workPhone,omitempty"`
Mobile *string `gorm:"size:25;index:ix_user_mobile" json:"mobile,omitempty"`
LoginAt *time.Time `json:"loginAt,omitempty"`
Organizations []Organization `gorm:"many2many:org_users;foreignkey:ID;association_foreignkey:ID;jointable_foreignkey:user_id;association_jointable_foreignkey:org_id;" json:"-"`
}
// Roles []Role `gorm:"many2many:user_roles"`
// SetPassword - What's bcrypt? https://en.wikipedia.org/wiki/Bcrypt
// Golang bcrypt doc: https://godoc.org/golang.org/x/crypto/bcrypt
// You can change the value in bcrypt.DefaultCost to adjust the security index.
// err := User.setPassword("password0")
func (u *User) SetPassword(password string) error {
if len(password) == 0 {
return errors.New("Password should not be empty")
}
bytePassword := []byte(password)
// Make sure the second param `bcrypt generator cost` between [4, 32)
passwordHash, _ := bcrypt.GenerateFromPassword(bytePassword, bcrypt.DefaultCost)
u.PasswordHash = string(passwordHash)
return nil
}
// VerifyPassword - Database will only save the hashed string, you should check it by util function.
// if err := serModel.checkPassword("password0"); err != nil { password error }
func (u *User) VerifyPassword(password string) error {
bytePassword := []byte(password)
byteHashedPassword := []byte(u.PasswordHash)
return bcrypt.CompareHashAndPassword(byteHashedPassword, bytePassword)
}
|
package router
import (
"sync"
"github.com/gin-gonic/gin"
"github.com/zhj0811/fabric-normal/apiserver/handler"
)
// Router 全局路由
var router *gin.Engine
var onceCreateRouter sync.Once
//GetRouter 获取路由
func GetRouter() *gin.Engine {
onceCreateRouter.Do(func() {
router = createRouter()
})
return router
}
func createRouter() *gin.Engine {
r := gin.Default()
//r.POST("/login", handler.Login) //登录
//r.POST("/register", handler.Register) //机构注册
//r.Use(handler.TokenAuthMiddleware())
v1 := r.Group("/v1")
{
v1.POST("/invoke", handler.Invoke)
v1.GET("/query", handler.Query)
}
return r
}
|
// SPDX-License-Identifier: MIT
// Package locale 提供了一个本地化翻译服务。
package locale
import (
"github.com/issue9/localeutil"
"golang.org/x/text/language"
"golang.org/x/text/message"
)
// DefaultLocaleID 默认的本地化语言 ID
//
// 当未调用相关函数设置 ID,或是设置为一个不支持的 ID 时,
// 系统最终会采用此 ID。
const DefaultLocaleID = "cmn-Hans"
var (
// 保证有个初始化的值,部分包的测试功能依赖此变量
localeTag = language.MustParse(DefaultLocaleID)
localePrinter = message.NewPrinter(localeTag)
tags = []language.Tag{}
)
type LocaleStringer = localeutil.LocaleStringer
type Error struct {
l LocaleStringer
}
func setMessages(id string, messages map[string]string) {
tag := language.MustParse(id)
for key, val := range messages {
if err := message.SetString(tag, key, val); err != nil {
panic(err)
}
}
// 保证 DefaultLocaleID 为第一个数组元素
if id == DefaultLocaleID {
ts := make([]language.Tag, 0, len(tags)+1)
tags = append(append(ts, tag), tags...)
} else {
tags = append(tags, tag)
}
}
func (err *Error) Error() string { return err.l.LocaleString(localePrinter) }
// SetTag 切换本地化环境
func SetTag(tag language.Tag) {
tag, _, _ = language.NewMatcher(tags).Match(tag)
localeTag = tag
localePrinter = message.NewPrinter(localeTag)
}
// Tag 获取当前的本地化 ID
func Tag() language.Tag { return localeTag }
// Tags 所有支持语言的列表
func Tags() []language.Tag {
ret := make([]language.Tag, len(tags))
copy(ret, tags)
return ret
}
// Sprintf 类似 fmt.Sprintf,与特定的本地化绑定。
func Sprintf(key message.Reference, v ...any) string {
return localePrinter.Sprintf(key, v...)
}
// New 声明新的 Locale 对象
func New(key message.Reference, v ...any) LocaleStringer { return localeutil.Phrase(key, v...) }
// NewError 返回本地化的错误对象
func NewError(key message.Reference, v ...any) error { return &Error{l: localeutil.Phrase(key, v...)} }
// Translate 功能与 Sprintf 类似,但是可以指定本地化 ID 值。
func Translate(localeID string, key message.Reference, v ...any) string {
tag, _ := language.MatchStrings(language.NewMatcher(tags), localeID)
return message.NewPrinter(tag).Sprintf(key, v...)
}
|
package main
import (
"context"
"fmt"
"github.com/zcong1993/ip2region-service/service"
"go.opencensus.io/examples/exporter"
"go.opencensus.io/exporter/jaeger"
"go.opencensus.io/plugin/ocgrpc"
"go.opencensus.io/stats/view"
"go.opencensus.io/trace"
"log"
"net"
"net/http"
"os"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/zcong1993/ip2region-service/pb"
"google.golang.org/grpc"
)
func initJaegerTracing() {
svcAddr := os.Getenv("JAEGER_SERVICE_ADDR")
if svcAddr == "" {
fmt.Println("jaeger initialization disabled.")
return
}
view.RegisterExporter(&exporter.PrintExporter{})
// Register the views to collect server request count.
if err := view.Register(ocgrpc.DefaultServerViews...); err != nil {
log.Fatal(err)
}
// Register the Jaeger exporter to be able to retrieve
// the collected spans.
exporter, err := jaeger.NewExporter(jaeger.Options{
CollectorEndpoint: fmt.Sprintf("http://%s", svcAddr),
Process: jaeger.Process{
ServiceName: "ip2region-service",
},
})
if err != nil {
log.Fatal(err)
}
trace.RegisterExporter(exporter)
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
fmt.Println("jaeger initialization completed.")
}
func runGatewayServer(rpcPort, port string) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
mux := runtime.NewServeMux()
opts := []grpc.DialOption{grpc.WithInsecure()}
err := pb.RegisterIP2RegionHandlerFromEndpoint(ctx, mux, rpcPort, opts)
if err != nil {
log.Fatal("Serve http error:", err)
}
http.ListenAndServe(port, mux)
}
func runRpcServer(port string) {
ss := grpc.NewServer(grpc.StatsHandler(&ocgrpc.ServerHandler{}))
pb.RegisterIP2RegionServer(ss, service.NewIP2RegionService(os.Getenv("DB_PATH")))
listener, err := net.Listen("tcp", ":1234")
if err != nil {
log.Fatal("ListenTCP error:", err)
}
if err = ss.Serve(listener); err != nil {
log.Fatal("ListenTCP error:", err)
}
}
func main() {
go initJaegerTracing()
if os.Getenv("GATEWAY") == "true" {
println("run gateway server on :9009")
go runGatewayServer(":1234", ":9009")
}
runRpcServer(":1234")
}
|
package main
import (
"github.com/panicthis/A"
"github.com/panicthis/B"
)
func main() {
A.Visit()
B.Visit()
}
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package safehttp
import (
"io"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/safehtml"
"github.com/google/safehtml/template"
)
type testDispatcher struct{}
func (testDispatcher) Write(rw http.ResponseWriter, resp Response) error {
switch x := resp.(type) {
case safehtml.HTML:
_, err := rw.Write([]byte(x.String()))
return err
default:
panic("not a safe response type")
}
}
func (testDispatcher) ExecuteTemplate(rw http.ResponseWriter, t Template, data interface{}) error {
switch x := t.(type) {
case *template.Template:
return x.Execute(rw, data)
default:
panic("not a safe response type")
}
}
type responseRecorder struct {
header http.Header
writer io.Writer
status int
}
func newResponseRecorder(w io.Writer) *responseRecorder {
return &responseRecorder{header: http.Header{}, writer: w, status: http.StatusOK}
}
func (r *responseRecorder) Header() http.Header {
return r.header
}
func (r *responseRecorder) WriteHeader(statusCode int) {
r.status = statusCode
}
func (r *responseRecorder) Write(data []byte) (int, error) {
return r.writer.Write(data)
}
func TestMuxOneHandlerOneRequest(t *testing.T) {
var test = []struct {
name string
req *http.Request
wantStatus int
wantHeader map[string][]string
wantBody string
}{
{
name: "Valid Request",
req: httptest.NewRequest("GET", "http://foo.com/", nil),
wantStatus: 200,
wantHeader: map[string][]string{},
wantBody: "<h1>Hello World!</h1>",
},
{
name: "Invalid Host",
req: httptest.NewRequest("GET", "http://bar.com/", nil),
wantStatus: 404,
wantHeader: map[string][]string{
"Content-Type": {"text/plain; charset=utf-8"},
"X-Content-Type-Options": {"nosniff"},
},
wantBody: "Not Found\n",
},
{
name: "Invalid Method",
req: httptest.NewRequest("POST", "http://foo.com/", nil),
wantStatus: 405,
wantHeader: map[string][]string{
"Content-Type": {"text/plain; charset=utf-8"},
"X-Content-Type-Options": {"nosniff"},
},
wantBody: "Method Not Allowed\n",
},
}
for _, tt := range test {
t.Run(tt.name, func(t *testing.T) {
mux := NewServeMux(testDispatcher{}, "foo.com")
h := HandlerFunc(func(w ResponseWriter, r *IncomingRequest) Result {
return w.Write(safehtml.HTMLEscaped("<h1>Hello World!</h1>"))
})
mux.Handle("/", MethodGet, h)
b := &strings.Builder{}
rw := newResponseRecorder(b)
mux.ServeHTTP(rw, tt.req)
if rw.status != tt.wantStatus {
t.Errorf("rw.status: got %v want %v", rw.status, tt.wantStatus)
}
if diff := cmp.Diff(tt.wantHeader, map[string][]string(rw.header)); diff != "" {
t.Errorf("rw.header mismatch (-want +got):\n%s", diff)
}
if got := b.String(); got != tt.wantBody {
t.Errorf("response body: got %q want %q", got, tt.wantBody)
}
})
}
}
func TestMuxServeTwoHandlers(t *testing.T) {
var tests = []struct {
name string
req *http.Request
hf HandlerFunc
wantStatus int
wantHeaders map[string][]string
wantBody string
}{
{
name: "GET Handler",
req: httptest.NewRequest("GET", "http://foo.com/bar", nil),
hf: HandlerFunc(func(w ResponseWriter, r *IncomingRequest) Result {
return w.Write(safehtml.HTMLEscaped("<h1>Hello World! GET</h1>"))
}),
wantStatus: 200,
wantHeaders: map[string][]string{},
wantBody: "<h1>Hello World! GET</h1>",
},
{
name: "POST Handler",
req: httptest.NewRequest("POST", "http://foo.com/bar", nil),
hf: HandlerFunc(func(w ResponseWriter, r *IncomingRequest) Result {
return w.Write(safehtml.HTMLEscaped("<h1>Hello World! POST</h1>"))
}),
wantStatus: 200,
wantHeaders: map[string][]string{},
wantBody: "<h1>Hello World! POST</h1>",
},
}
d := testDispatcher{}
mux := NewServeMux(d, "foo.com")
mux.Handle("/bar", MethodGet, tests[0].hf)
mux.Handle("/bar", MethodPost, tests[1].hf)
for _, test := range tests {
b := &strings.Builder{}
rw := newResponseRecorder(b)
mux.ServeHTTP(rw, test.req)
if want := test.wantStatus; rw.status != want {
t.Errorf("rw.status: got %v want %v", rw.status, want)
}
if diff := cmp.Diff(test.wantHeaders, map[string][]string(rw.header)); diff != "" {
t.Errorf("rw.header mismatch (-want +got):\n%s", diff)
}
if got, want := b.String(), test.wantBody; got != want {
t.Errorf("response body: got %q want %q", got, want)
}
}
}
func TestMuxHandleSameMethodTwice(t *testing.T) {
d := testDispatcher{}
mux := NewServeMux(d, "foo.com")
registeredHandler := HandlerFunc(func(w ResponseWriter, r *IncomingRequest) Result {
return w.Write(safehtml.HTMLEscaped("<h1>Hello World!</h1>"))
})
mux.Handle("/bar", MethodGet, registeredHandler)
defer func() {
if r := recover(); r == nil {
t.Errorf(`mux.Handle("/bar", MethodGet, registeredHandler) expected panic`)
}
}()
mux.Handle("/bar", MethodGet, registeredHandler)
}
|
package main
import (
"fmt"
"os"
"net/http"
"io"
)
func main() {
fmt.Println("启动更新 ...")
out, err := os.Create("pudge.exe")//windows为 .exe
if err != nil {
fmt.Println("err:", err)
}
//http://www.baidu.com/ngrok/lasdver/
resp, _ := http.Get("http://127.0.0.1:8080/pudge.exe")
defer resp.Body.Close()
n, _ := io.Copy(out, resp.Body)
fmt.Println("数据大小: n=", n)
//download end
fmt.Println("新版本客户端下载完成.")
out.Close()
fmt.Println("更新完毕,请重启客户端.")
os.Exit(0)
}
|
package linqo
type Action interface {
String() string
//PrettyPrint() string
}
|
package main
import (
"fmt"
"sort"
)
func main() {
var a int
sum := 0
var list []int
fmt.Scan(&a)
for i := 1; i <= a; i++ {
var x int
fmt.Scan(&x)
sum += x
list = append(list, x)
}
sort.Ints(list)
fmt.Printf("%d %d %d\n", list[0], list[len(list)-1], sum)
}
|
package mcts3
import (
"fmt"
"math/rand"
)
type GameState struct {
player int
board [25]int
cachedResults [3]float64
}
type Node struct {
move int
player int
parent *Node
childNodes []*Node
wins float64
visits float64
score float64
// score should be 0 for a losing move,
// 1 for a winning move
untriedMoves []int
winner int
}
// Manifest constants to improve understanding
const (
MAXIMIZER = 1
MINIMIZER = -1
UNSET = 0
)
type MCTS3 struct {
board [25]int
playerJustMoved int
iterations int
}
func New(_ bool, _ int) *MCTS3 {
return &MCTS3{iterations: 500000}
}
func (p *MCTS3) Name() string {
return "MCTS/Plain"
}
func (p *MCTS3) SetIterations(iterations int) {
p.iterations = iterations
}
func (p *MCTS3) MakeMove(x, y int, player int) {
p.board[5*x+y] = player
p.playerJustMoved = player
}
func (p *MCTS3) SetDepth(_ int) {
}
// ChooseMove should choose computer's next move and
// return x,y coords of move and its score.
func (p *MCTS3) ChooseMove() (xcoord int, ycoord int, value int, leafcount int) {
var best int
best, value, leafcount = bestMove(p.board, p.iterations)
p.board[best] = MAXIMIZER
// Since this player's "board" is a plain array, a move has to translate
// to <x,y> coords
xcoord = best / 5
ycoord = best % 5
fmt.Printf("best move %d <%d,%d>\n", best, xcoord, ycoord)
return
}
func bestMove(board [25]int, iterations int) (move int, value int, leafCount int) {
fmt.Printf("enter bestMove, %d iterations\n", iterations)
root := &Node{
player: MINIMIZER,
}
root.untriedMoves = make([]int, 0, 25)
for i := range board {
if board[i] == UNSET {
root.untriedMoves = append(root.untriedMoves, i)
}
}
var state GameState
for iters := 0; iters < iterations; iters++ {
fmt.Printf("iteration %d\n", iters)
for j := 0; j < 25; j++ {
state.board[j] = board[j]
}
state.player = MINIMIZER
node := root
fmt.Printf("Selection\n")
// Selection
for len(node.untriedMoves) == 0 && len(node.childNodes) > 0 {
fmt.Printf("Node move %d/player %d has best child ", node.move, node.player)
node = node.selectBestChild()
fmt.Printf("with move %d, player %d, %.0f/%.0f/%.3f ", node.move, node.player, node.wins, node.visits, node.score)
state.player = 0 - state.player
state.board[node.move] = state.player
fmt.Printf("board now:\n%s\n", boardString(state.board))
}
fmt.Print("End Selection\n\n")
// state should represent the board resulting from following
// the "best child" nodes, and node points to a Node struct
// that has no child nodes.
var win bool
fmt.Printf("Expansion\n")
// Expansion
if len(node.untriedMoves) > 0 {
mv := node.untriedMoves[rand.Intn(len(node.untriedMoves))]
state.player = 0 - state.player
state.board[mv] = state.player
fmt.Printf("expansion move %d, board:\n%s\n", mv, boardString(state.board))
node = node.AddChild(mv, &state) // AddChild take mv out of untriedMoves slice
node.winner = findWinner(&(state.board))
if node.winner == MAXIMIZER {
node.score = 1.0
win = true
}
// node represents mv, the previously untried move
}
fmt.Print("End Expansion\n\n")
fmt.Printf("Simulation\n")
// Simulation
if node.winner == UNSET {
moves := (&state).RemainingMoves()
for len(moves) > 0 {
// fmt.Printf("\tRemaining moves: %v\n", moves)
m := moves[rand.Intn(len(moves))]
state.player = 0 - state.player
state.board[m] = state.player
// fmt.Printf("\tmove %d, player %d\n", m, state.player)
winner := findWinner(&(state.board))
if winner != UNSET {
if winner == MAXIMIZER {
win = true
}
break
}
cutElement(&moves, m)
}
}
fmt.Printf("board after playout:\n%s\n", boardString(state.board))
fmt.Print("End Simulation\n\n")
leafCount++
winIncr := 0.0
if win {
winIncr = 1.0
}
fmt.Printf("Back propagation: win %v\n", win)
// Back propagation
for node != nil {
node.visits += 1.0
// if node.winner == UNSET {
node.wins += winIncr
node.score = node.wins / node.visits
// }
fmt.Printf("node move %d, player %d: %.0f/%.0f/%.3f\n",
node.move, node.player, node.wins, node.visits, node.score,
)
node = node.parent
}
fmt.Print("End Back propagation\n\n")
}
fmt.Printf("after iterations root node %.0f/%.0f\n", root.wins, root.visits)
fmt.Println("Child nodes:")
for _, c := range root.childNodes {
fmt.Printf("\tmove %d, player %d, %.0f/%.0f/%.3f\n", c.move, c.player, c.wins, c.visits, c.score)
}
moveNode := root.selectBestChild()
fmt.Printf("\nbest move node move %d, player %d, %.0f/%.0f/%.3f\n", moveNode.move, moveNode.player, moveNode.wins, moveNode.visits, moveNode.score)
move = moveNode.move
return
}
// cutElement removes element from slice ary
// that has value v. Disorders ary.
func cutElement(ary *[]int, v int) {
for i, m := range *ary {
if m == v {
(*ary)[i] = (*ary)[len(*ary)-1]
*ary = (*ary)[:len((*ary))-1]
break
}
}
}
func (node *Node) AddChild(mv int, state *GameState) *Node {
fmt.Printf("node.AddChild(%d, %d)\n", mv, state.player)
ch := &Node{
move: mv,
parent: node,
player: state.player,
untriedMoves: state.RemainingMoves(),
}
node.childNodes = append(node.childNodes, ch)
// weed out mv as an untried move
cutElement(&(node.untriedMoves), mv)
fmt.Printf("Child nodes %d:\n", len(node.childNodes))
for _, n := range node.childNodes {
fmt.Printf("\tmove %d player %d, %.0f/%.0f/%.3f\n", n.move, n.player, n.wins, n.visits, n.score)
}
fmt.Printf("untried moves: %v\n", node.untriedMoves)
return ch
}
func (node *Node) selectBestChild() *Node {
best := node.childNodes[0]
bestScore := node.childNodes[0].score
for _, c := range node.childNodes {
if c.score > bestScore {
best = c
bestScore = c.score
}
}
return best
}
// RemainingMoves returns an array of all moves left
// unmade on state.board
func (state *GameState) RemainingMoves() []int {
mvs := make([]int, 0, 25)
j := 0
for i := 0; i < 25; i++ {
if state.board[i] == UNSET {
mvs = append(mvs, i)
j++
}
}
return mvs
}
func (p *MCTS3) PrintBoard() {
fmt.Printf("%s\n", p)
}
func (p *MCTS3) SetScores(_ bool) {
}
// FindWinner will return MAXIMIZER or MINIMIZER if somebody won,
// UNSET if nobody wins based on current board.
func (p *MCTS3) FindWinner() int {
return findWinner(&(p.board))
}
// findWinner will return MAXIMIZER or MINIMIZER if somebody won,
// UNSET if nobody wins based on argument board.
func findWinner(board *[25]int) int {
for _, i := range importantCells {
if (*board)[i] != UNSET {
for _, quad := range winningQuads[i] {
sum := (*board)[quad[0]] + (*board)[quad[1]] + (*board)[quad[2]] + (*board)[quad[3]]
switch sum {
case 4:
return MAXIMIZER
case -4:
return MINIMIZER
}
}
}
}
for _, i := range importantCells {
if (*board)[i] != UNSET {
for _, triplet := range losingTriplets[i] {
sum := (*board)[triplet[0]] + (*board)[triplet[1]] + (*board)[triplet[2]]
switch sum {
case 3:
return MINIMIZER
case -3:
return MAXIMIZER
}
}
}
}
return UNSET
}
func (p *MCTS3) String() string {
return boardString(p.board)
}
func boardString(board [25]int) string {
s := " 0 1 2 3 4\n"
for i := 0; i < 25; i++ {
if (i % 5) == 0 {
s += string((i/5)+'0') + " "
}
s += string("O_X"[board[i]+1]) + " "
if (i % 5) == 4 {
s += "\n"
}
}
return s
}
var importantCells = [9]int{2, 7, 10, 11, 12, 13, 14, 17, 22}
// 25 rows only to make looping easier. The filled-in
// rows are the only quads you actually have to check
// to find out if there's a win
var winningQuads = [25][][]int{
{}, {},
{{0, 1, 2, 3}, {1, 2, 3, 4}, {2, 7, 12, 17}},
{}, {}, {}, {},
{{5, 6, 7, 8}, {6, 7, 8, 9}, {7, 12, 17, 22}},
{}, {},
{{0, 5, 10, 15}, {5, 10, 15, 20}},
{{1, 6, 11, 16}, {6, 11, 16, 21}, {3, 7, 11, 15}, {5, 11, 17, 23}},
{{10, 11, 12, 13}, {11, 12, 13, 14}, {4, 8, 12, 16}, {8, 12, 16, 20}, {0, 6, 12, 18}, {6, 12, 18, 24}},
{{3, 8, 13, 18}, {8, 13, 18, 23}, {1, 7, 13, 19}, {9, 13, 17, 21}},
{{4, 9, 14, 19}, {9, 14, 19, 24}},
{}, {},
{{15, 16, 17, 18}, {16, 17, 18, 19}},
{}, {}, {}, {},
{{20, 21, 22, 23}, {21, 22, 23, 24}},
{}, {},
}
// 25 rows only to make looping easier. The filled-in
// rows are the only triplets you actually have to check
// to find out if there's a loss.
var losingTriplets = [][][]int{
{}, {},
{{0, 1, 2}, {1, 2, 3}, {2, 3, 4}, {2, 7, 12}, {2, 6, 10}, {14, 8, 2}},
{}, {}, {}, {},
{{5, 6, 7}, {6, 7, 8}, {7, 8, 9}, {2, 7, 12}, {7, 12, 17}, {3, 7, 11}, {7, 11, 15}, {1, 7, 13}, {7, 13, 19}},
{}, {},
{{10, 11, 12}, {0, 5, 10}, {5, 10, 15}, {10, 15, 20}, {2, 6, 10}, {10, 16, 22}},
{{10, 11, 12}, {11, 12, 13}, {1, 6, 11}, {6, 11, 16}, {11, 16, 21}, {3, 7, 11}, {7, 11, 15}, {5, 11, 17}, {11, 17, 23}},
{{10, 11, 12}, {11, 12, 13}, {12, 13, 14}, {2, 7, 12}, {7, 12, 17}, {12, 17, 22}, {0, 6, 12}, {6, 12, 18}, {12, 18, 24}, {4, 8, 12}, {8, 12, 16}, {12, 16, 20}},
{{11, 12, 13}, {12, 13, 14}, {3, 8, 13}, {8, 13, 18}, {13, 18, 23}, {1, 7, 13}, {7, 13, 19}, {21, 17, 13}, {17, 13, 9}},
{{12, 13, 14}, {4, 9, 14}, {9, 14, 19}, {14, 19, 24}, {22, 18, 14}, {14, 8, 2}},
{}, {},
{{15, 16, 17}, {16, 17, 18}, {17, 18, 19}, {7, 12, 17}, {12, 17, 22}, {5, 11, 17}, {11, 17, 23}, {21, 17, 13}, {17, 13, 9}},
{}, {}, {}, {},
{{20, 21, 22}, {21, 22, 23}, {22, 23, 24}, {12, 17, 22}, {10, 16, 22}, {22, 18, 14}},
{}, {},
}
|
package lc
// Time: O(n)
// Benchmark: 4ms 3.3mb | 76% 29%
func getMinDistance(nums []int, target int, start int) int {
rounds := start
if len(nums)-start > start {
rounds = len(nums) - start
}
for i := 0; i <= rounds; i++ {
if start-i >= 0 && nums[start-i] == target {
return i
}
if start+i < len(nums) && nums[start+i] == target {
return i
}
}
return 0
}
|
package channel
import (
"github.com/mitchellh/mapstructure"
"github.com/slack-clone-server/config"
r "gopkg.in/rethinkdb/rethinkdb-go.v5"
)
// iota vai adicionar os valores: 1,2,3 automaticamente
const (
ChannelStop = iota
UserStop
MessageStop
)
func Add(client *config.Client, data interface{}) {
var channel Channel
err := mapstructure.Decode(data, &channel)
if err != nil {
client.Send <- config.Message{"error", err.Error()}
}
go func() {
err = r.Table("channel").
Insert(channel).
Exec(client.Session)
if err != nil {
client.Send <- config.Message{"error", err.Error()}
}
}()
}
func Subscribe(client *config.Client, data interface{}) {
stop := client.NewStopChannel(ChannelStop)
result := make(chan r.ChangeResponse)
cursor, err := r.Table("channel").
Changes(r.ChangesOpts{IncludeInitial: true}).
Run(client.Session)
if err != nil {
client.Send <- config.Message{"error", err.Error()}
return
}
go func() {
var change r.ChangeResponse
for cursor.Next(&change) {
result <- change
}
}()
go func() {
for {
select {
case <-stop:
cursor.Close()
return
case change := <-result:
if change.NewValue != nil && change.OldValue == nil {
client.Send <- config.Message{"channel add", change.NewValue}
}
}
}
}()
}
func Unsubscribe(client *config.Client, data interface{}) {
client.StopForKey(ChannelStop)
}
|
package parseBlock
import (
"github.com/hyperledger/fabric-protos-go/common"
utils "github.com/hyperledger/fabric/protoutil"
)
type FilterTx struct {
BlockNum uint64 //区块编号
Timestamp int64 `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` //秒
}
func FilterParseTransaction(block *common.Block, txId string) *FilterTx {
res := &FilterTx{}
res.BlockNum = block.Header.Number
for _, data := range block.Data.Data {
localTransaction := &FilterTransaction{}
localTransaction.Size = uint64(len(data))
envelope, err := utils.GetEnvelopeFromBlock(data)
if err != nil {
parseBlockLogger.Errorf("Error getting envelope: %s\n", err)
continue
}
payload, err := utils.UnmarshalPayload(envelope.Payload)
if err != nil {
parseBlockLogger.Errorf("Error getting payload from envelope: %s\n", err)
continue
}
chHeader, err := utils.UnmarshalChannelHeader(payload.Header.ChannelHeader)
if err != nil {
parseBlockLogger.Errorf("Error unmarshaling channel header: %s\n", err)
continue
}
if txId == chHeader.TxId {
res.Timestamp = chHeader.Timestamp.Seconds
return res
}
}
return res
}
|
package cli
import (
"fmt"
"io"
"os"
"github.com/apprenda/kismatic/pkg/install"
"github.com/apprenda/kismatic/pkg/util"
"github.com/spf13/cobra"
)
type validateOpts struct {
planFile string
verbose bool
outputFormat string
skipPreFlight bool
}
// NewCmdValidate creates a new install validate command
func NewCmdValidate(out io.Writer, installOpts *installOpts) *cobra.Command {
opts := &validateOpts{}
cmd := &cobra.Command{
Use: "validate",
Short: "validate your plan file",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) != 0 {
return fmt.Errorf("Unexpected args: %v", args)
}
planner := &install.FilePlanner{File: installOpts.planFilename}
opts.planFile = installOpts.planFilename
return doValidate(out, planner, opts)
},
}
cmd.Flags().BoolVar(&opts.verbose, "verbose", false, "enable verbose logging from the installation")
cmd.Flags().StringVarP(&opts.outputFormat, "output", "o", "simple", "installation output format (options simple|raw)")
cmd.Flags().BoolVar(&opts.skipPreFlight, "skip-preflight", false, "skip pre-flight checks")
return cmd
}
func doValidate(out io.Writer, planner install.Planner, opts *validateOpts) error {
util.PrintHeader(out, "Validating", '=')
// Check if plan file exists
if !planner.PlanExists() {
util.PrettyPrintErr(out, "Reading installation plan file [ERROR]")
fmt.Fprintln(out, "Run \"kismatic install plan\" to generate it")
return fmt.Errorf("plan does not exist")
}
plan, err := planner.Read()
if err != nil {
util.PrettyPrintErr(out, "Reading installation plan file %q", opts.planFile)
return fmt.Errorf("error reading plan file: %v", err)
}
util.PrettyPrintOk(out, "Reading installation plan file %q", opts.planFile)
// Validate plan file
ok, errs := install.ValidatePlan(plan)
if !ok {
util.PrettyPrintErr(out, "Validating installation plan file")
printValidationErrors(out, errs)
return fmt.Errorf("validation error prevents installation from proceeding")
}
util.PrettyPrintOk(out, "Validating installation plan file")
// Validate SSH connections
ok, errs = install.ValidatePlanSSHConnection(plan)
if !ok {
util.PrettyPrintErr(out, "Validating SSH connections to nodes")
printValidationErrors(out, errs)
return fmt.Errorf("SSH connectivity validation failure prevents installation from proceeding")
}
if opts.skipPreFlight {
return nil
}
// Run pre-flight
options := install.ExecutorOptions{
OutputFormat: opts.outputFormat,
Verbose: opts.verbose,
}
e, err := install.NewPreFlightExecutor(out, os.Stderr, options)
if err != nil {
return err
}
if err = e.RunPreFlightCheck(plan); err != nil {
return err
}
return nil
}
func printValidationErrors(out io.Writer, errors []error) {
for _, err := range errors {
util.PrintColor(out, util.Red, "- %v\n", err)
}
}
|
package main
import "testing"
type tuple struct {
y, x int
}
func TestPowi(t *testing.T) {
for k, v := range map[tuple]int{
tuple{9, 19}: 1350851717672992089,
tuple{0, 8}: 0,
tuple{0, 0}: 1} {
if r := powi(k.y, k.x); r != v {
t.Errorf("failed: %d^%d = %d, got %d",
k.y, k.x, v, r)
}
}
}
func TestArmstrong(t *testing.T) {
for k, v := range map[int]bool{
6: true,
153: true,
351: false,
0: true} {
if r := armstrong(k); r != v {
t.Errorf("failed: armstrong %d is %t, got %t",
k, v, r)
}
}
}
func BenchmarkPowi(b *testing.B) {
for i := 0; i < b.N; i++ {
powi(i%10, i%20)
}
}
func BenchmarkPowiNaive(b *testing.B) {
for i := 0; i < b.N; i++ {
powiNaive(i%10, i%20)
}
}
func BenchmarkArmstrong(b *testing.B) {
for i := 0; i < b.N; i++ {
armstrong(i)
}
}
func powi(y, x int) int {
ret := 1
for x > 0 {
if x&1 > 0 {
ret *= y
}
y *= y
x >>= 1
}
return ret
}
func powiNaive(a, b int) (ret int) {
ret = 1
for i := 0; i < b; i++ {
ret *= a
}
return ret
}
func armstrong(n int) bool {
var e, t int
for a := n; a > 0; a /= 10 {
e++
}
for a := n; a > 0; a /= 10 {
t += powi(a%10, e)
}
return n == t
}
|
package aggregator
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/cookiejar"
"net/url"
"strconv"
"strings"
"time"
"gitlab.com/vultour/steamcli/objects"
log "github.com/sirupsen/logrus"
"golang.org/x/net/html"
"golang.org/x/net/html/atom"
)
type gameMatcher []*map[int]struct{}
// GameEndpoint is the Steam Store's endpoint for one or more games
const GameEndpoint = "https://store.steampowered.com/api/appdetails/?appids="
// GameEndpointHTML is the human-readable store endpoint for a single game
// The 'API' endpoint does not return tags, so we need to parse the HTML version
const GameEndpointHTML = "https://store.steampowered.com/app/"
// ParallelUpdates defines how many games will be fetched at one time from store
var ParallelUpdates = 1
// Select returns games across all profiles matching the specified criteria
func (a *Aggregator) Select(tags []string, common, and, invalid bool) objects.JSONGameList {
matcher := make(gameMatcher, 0, 3)
for _, c := range a.Clients {
games := make(map[int]struct{})
for _, g := range c.Profile.Games {
log.WithField("id", g.AppID).Debug("Adding game to matcher")
games[g.AppID] = struct{}{}
}
matcher = append(matcher, &games)
log.WithField("size", len(games)).Debug("Created matcher section")
}
var wantedIDs []int
if common {
wantedIDs = matcher.Common()
} else {
wantedIDs = matcher.All()
}
return a.Cache.Games.Select(tags, wantedIDs, and, invalid)
}
// UpdateGameCache updates the aggregator game cache.
// This fetches the details of every game owned across all clients and stores
// it in the cache.
func (a *Aggregator) UpdateGameCache() error {
gameIDs := make(map[int]struct{})
for _, c := range a.Clients {
for id := range c.Profile.Games {
if _, cached := a.Cache.Games.Get(id); !cached {
gameIDs[id] = struct{}{}
}
}
}
log.WithField("n", len(gameIDs)).Debug("Accumulated game IDs")
c := http.Client{Timeout: time.Second * 10}
for len(gameIDs) > 0 {
nextIDs := make([]string, 0, ParallelUpdates)
i := 0
for g := range gameIDs {
nextIDs = append(nextIDs, strconv.Itoa(g))
i++
if i >= ParallelUpdates {
break
}
}
log.WithField("ids", strings.Join(nextIDs, ",")).Debug("Fetching games")
url := fmt.Sprintf("%s%s", GameEndpoint, strings.Join(nextIDs, ","))
log.Debugf("Built URL: %s", url)
r, err := c.Get(url)
if err != nil {
return fmt.Errorf("could not retrieve data from the store: %s", err)
}
defer r.Body.Close()
log.WithField("status", r.StatusCode).Debug("Got response")
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return fmt.Errorf("couldn't read response body: %s", err)
}
if strings.TrimSpace(string(b)) == "null" {
return fmt.Errorf("rate limit exceeded or unsupported parallelisation number used")
}
// Anonymous struct to deal with the API uglyness
obj := map[string]*struct {
Success bool `json:"success"`
Data *objects.JSONGame `json:"data"`
}{}
err = json.Unmarshal(b, &obj)
if err != nil {
return fmt.Errorf("couldn't decode json: %s", err)
}
for _, v := range nextIDs {
vi, err := strconv.Atoi(v)
if err != nil {
return fmt.Errorf("couldn't convert AppID: %s", err)
}
if vg, exists := obj[v]; exists {
if !obj[v].Success {
// Set as invalid and backfill from profile
log.WithField("id", v).Warning("received invalid response from store")
pGame, ex := a.Cache.Profiles.FindGame(vi)
if !ex {
log.WithField("id", vi).Error("Could not backfill game from profile")
}
log.WithField("name", pGame).Debug("Backfilling game name")
obj[v].Data = &objects.JSONGame{
AppID: vi,
Invalid: true,
Name: pGame,
}
}
// Add game to cache
log.WithFields(log.Fields{
"id_s": vi,
"id_i": vg.Data.AppID,
}).Debug("Adding game to cache")
vg.Data.Complete()
a.Cache.Games.Add(vg.Data.AppID, vg.Data)
// Add a duplicate entry if received mismatch to avoid loop
if obj[v].Data.AppID != vi {
log.WithFields(log.Fields{
"id_requested": vi,
"id_received": obj[v].Data.AppID,
}).Warn("AppID mismatch")
a.Cache.Games.Add(vi, vg.Data)
delete(gameIDs, vi)
}
delete(gameIDs, vg.Data.AppID)
} else {
log.WithFields(log.Fields{
"id": v,
"response": string(b),
}).Panic("Didn't find game in response")
}
}
if err := a.Cache.Save(); err != nil {
return err
}
time.Sleep(time.Millisecond * 600)
}
if err := a.Cache.Save(); err != nil {
return err
}
return nil
}
// UpdateGameTags fetches Game tags for all games that are eligible
func (a *Aggregator) UpdateGameTags() error {
log.Debug("Updating tags")
c := &http.Client{Timeout: time.Second * 10}
jar, err := cookiejar.New(nil)
if err != nil {
return fmt.Errorf("could not create cookiejar: %s", err)
}
c.Jar = jar
storeURL, err := url.Parse("https://store.steampowered.com")
if err != nil {
return fmt.Errorf("Could not parse store URL: %s", err)
}
c.Jar.SetCookies(
storeURL,
[]*http.Cookie{
&http.Cookie{
Name: "birthtime",
Expires: time.Now().Add(time.Hour * 12),
Domain: "store.steampowered.com",
Path: "/",
Value: "156729601",
},
&http.Cookie{
Name: "lastagecheckage",
Expires: time.Now().Add(time.Hour * 12),
Domain: "store.steampowered.com",
Path: "/",
Value: "1-0-1987",
},
&http.Cookie{
Name: "wants_mature_content",
Expires: time.Now().Add(time.Hour * 12),
Domain: "store.steampowered.com",
Path: "/",
Value: "1",
},
},
)
for i, g := range a.Cache.Games {
if g.Tags == nil {
t, err := fetchTags(c, i)
if err != nil {
log.WithFields(log.Fields{
"err": err,
"id": i,
}).Error("Failed fetching tags")
}
log.WithFields(log.Fields{
"id": i,
"tags": t,
}).Debug("Retrieved tags")
g.Tags = t
if err := a.Cache.Save(); err != nil {
return err
}
time.Sleep(time.Second)
}
}
return nil
}
func fetchTags(httpClient *http.Client, appid int) ([]string, error) {
log.WithField("appid", appid).Debug("Retrieving tags")
u := fmt.Sprintf("%s%d", GameEndpointHTML, appid)
log.Debugf("Built URL: %s", u)
r, err := httpClient.Get(u)
if err != nil {
return []string{}, fmt.Errorf("could not retrieve data from the store: %s", err)
}
defer r.Body.Close()
log.WithField("status", r.StatusCode).Debug("Got response")
doc, err := html.Parse(r.Body)
if err != nil {
return []string{}, fmt.Errorf("could not parse HTML: %s", err)
}
if !tagReturnSuccess(doc, appid) {
var s strings.Builder
err := html.Render(&s, doc)
if err != nil {
log.WithField("err", err).Error("Could not render page back into HTML")
}
log.Debugf("Validity check failed, content: %s", s.String())
return []string{}, fmt.Errorf("returned page did not pass validity check")
}
tags := findTags(doc)
if len(tags) < 1 {
var s strings.Builder
err := html.Render(&s, doc)
if err != nil {
log.WithField("err", err).Error("Could not render page back into HTML")
}
log.Debugf("Content: %s", s.String())
uuu, _ := url.Parse("https://store.steampowered.com")
for _, c := range httpClient.Jar.Cookies(uuu) {
log.Debugf("Cookie: %#v", c)
}
log.Debugf("Response Headers: %#v", r.Cookies())
log.WithField("id", appid).Warning("Did not find any tags")
}
return tags, nil
}
func tagReturnSuccess(root *html.Node, appid int) bool {
if root == nil {
return false
}
for e := root; e != nil; e = e.NextSibling {
if e.DataAtom == atom.Meta {
for _, a := range e.Attr {
if (a.Key == "content") && (strings.HasPrefix(a.Val, fmt.Sprintf("https://store.steampowered.com/app/%d", appid))) {
return true
}
}
}
if tagReturnSuccess(e.FirstChild, appid) {
return true
}
}
return false
}
func findTags(root *html.Node) []string {
ret := make([]string, 0, 8)
if root == nil {
return ret
}
for e := root; e != nil; e = e.NextSibling {
if e.DataAtom == atom.A {
for _, a := range e.Attr {
if (a.Key == "class") && (a.Val == "app_tag") {
if (e.FirstChild != nil) && (e.FirstChild.Type == html.TextNode) {
tag := strings.TrimSpace(e.FirstChild.Data)
log.Debugf("Found tag: %#v", tag)
ret = append(ret, tag)
}
}
}
}
ret = append(ret, findTags(e.FirstChild)...)
}
return ret
}
func (m *gameMatcher) All() []int {
games := make(map[int]struct{})
for _, section := range *m {
for game := range *section {
games[game] = struct{}{}
}
}
ret := make([]int, 0, 8)
for game := range games {
ret = append(ret, game)
}
return ret
}
func (m *gameMatcher) Common() []int {
log.WithField("sections", len(*m)).Debug("Computing common games")
for _, section := range *m {
log.WithField("size", len(*section)).Debug("Eliminating items in section")
for game := range *section {
inAll := true
for _, sectionAgain := range *m {
found := false
for gameAgain := range *sectionAgain {
if gameAgain == game {
found = true
}
}
if !found {
inAll = false
break
}
}
if !inAll { // Eliminate entries not present in all maps
delete(*section, game)
}
}
}
return m.All() // Maps should only contain common entries
}
|
package main
import "clipper"
func main() {
m := clipper.NewMaster()
m.StartUp()
}
|
package main
// Leetcode 983. (medium)
func mincostTickets(days []int, costs []int) int {
last := days[len(days)-1]
dp := make([]int, last+1)
idx := 0
for i := 1; i < last+1; i++ {
if i != days[idx] {
dp[i] = dp[i-1]
continue
}
cost := 1<<31 - 1
oneDaysAgo := i - 1
sevenDaysAgo := i - 7
thirtyDaysAgo := i - 30
if sevenDaysAgo < 0 {
sevenDaysAgo = 0
}
if thirtyDaysAgo < 0 {
thirtyDaysAgo = 0
}
cost = min(cost, dp[oneDaysAgo]+costs[0])
cost = min(cost, dp[sevenDaysAgo]+costs[1])
cost = min(cost, dp[thirtyDaysAgo]+costs[2])
dp[i] = cost
idx++
}
return dp[last]
}
|
package spotifyauth
import (
"github.com/Iteam1337/go-udp-wejay/utils"
"github.com/ankjevel/spotify"
"golang.org/x/oauth2"
)
type Interface interface {
AuthURL(id string) string
NewClient(*oauth2.Token) spotify.Client
Exchange(string) (*oauth2.Token, error)
}
type SpotifyAuth struct {
auth spotify.Authenticator
}
func (s SpotifyAuth) AuthURL(id string) string {
return s.auth.AuthURL(id)
}
func (s SpotifyAuth) NewClient(token *oauth2.Token) spotify.Client {
return s.auth.NewClient(token)
}
func (s SpotifyAuth) Exchange(code string) (*oauth2.Token, error) {
return s.auth.Exchange(code)
}
var (
Struct = SpotifyAuth{
spotify.NewAuthenticator(
utils.GetEnv("REDIRECT_URL", "http://localhost:8080/callback"),
spotify.ScopeUserReadCurrentlyPlaying,
spotify.ScopeUserReadPlaybackState,
spotify.ScopeUserModifyPlaybackState,
spotify.ScopePlaylistModifyPublic,
spotify.ScopePlaylistModifyPrivate,
spotify.ScopePlaylistReadCollaborative,
spotify.ScopeStreaming,
spotify.ScopeImageUpload,
// spotify.ScopeUserTopRead,
),
}
AuthURL = Struct.AuthURL
NewClient = Struct.NewClient
Exchange = Struct.Exchange
)
|
package pencode
import (
"bytes"
"encoding/binary"
"unicode/utf16"
)
type UTF16LEEncode struct{}
func (u UTF16LEEncode) Encode(input []byte) ([]byte, error) {
var b bytes.Buffer
runes := []rune(string(input))
utf16ints := utf16.Encode(runes)
for _, r := range utf16ints {
tmp := make([]byte, 2)
binary.LittleEndian.PutUint16(tmp, r)
b.Write(tmp)
}
return b.Bytes(), nil
}
func (u UTF16LEEncode) HelpText() string {
return "UTF-16 encoder (Little Endian)"
}
|
package adapter
import (
"fmt"
"github.com/centrifuge/go-substrate-rpc-client/v3/types"
"github.com/pkg/errors"
"github.com/shopspring/decimal"
"math/big"
"strconv"
"strings"
)
// removeHexPrefix removes the prefix (0x) of a given hex string.
func removeHexPrefix(str string) string {
if hasHexPrefix(str) {
return str[2:]
}
return str
}
// hasHexPrefix returns true if the string starts with 0x.
func hasHexPrefix(str string) bool {
return len(str) >= 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X')
}
func parseDecimalString(input string) (*big.Int, error) {
parseValue, err := strconv.ParseFloat(input, 64)
if err != nil {
return nil, err
}
output, ok := big.NewInt(0).SetString(fmt.Sprintf("%.f", parseValue), 10)
if !ok {
return nil, fmt.Errorf("error parsing decimal %s", input)
}
return output, nil
}
func ParseNumericString(input string) (decimal.Decimal, error) {
if hasHexPrefix(input) {
output, ok := big.NewInt(0).SetString(removeHexPrefix(input), 16)
if !ok {
return decimal.Decimal{}, fmt.Errorf("error parsing hex %s", input)
}
return decimal.NewFromBigInt(output, 0), nil
}
return decimal.NewFromString(input)
}
func convertTypes(t, v string) (interface{}, error) {
switch strings.ToLower(t) {
case "bool":
switch strings.ToLower(v) {
case "true":
return types.NewBool(true), nil
case "false":
return types.NewBool(false), nil
default:
return nil, errors.New("unable to parse bool")
}
case "uint8":
i, err := strconv.ParseUint(v, 10, 8)
if err != nil {
return nil, errors.Wrap(err, "failed parsing uint8")
}
return types.NewU8(uint8(i)), nil
case "uint16":
i, err := strconv.ParseUint(v, 10, 16)
if err != nil {
return nil, errors.Wrap(err, "failed parsing uint16")
}
return types.NewU16(uint16(i)), nil
case "uint32":
i, err := strconv.ParseUint(v, 10, 32)
if err != nil {
return nil, errors.Wrap(err, "failed parsing uint32")
}
return types.NewU32(uint32(i)), nil
case "uint64":
i, err := strconv.ParseUint(v, 10, 64)
if err != nil {
return nil, errors.Wrap(err, "failed parsing uint64")
}
return types.NewU64(i), nil
case "int8":
i, err := strconv.ParseInt(v, 10, 8)
if err != nil {
return nil, errors.Wrap(err, "failed parsing int8")
}
return types.NewI8(int8(i)), nil
case "int16":
i, err := strconv.ParseInt(v, 10, 16)
if err != nil {
return nil, errors.Wrap(err, "failed parsing int16")
}
return types.NewI16(int16(i)), nil
case "int32":
i, err := strconv.ParseInt(v, 10, 32)
if err != nil {
return nil, errors.Wrap(err, "failed parsing int32")
}
return types.NewI32(int32(i)), nil
case "int64":
i, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, errors.Wrap(err, "failed parsing int64")
}
return types.NewI64(i), nil
case "int128", "uint128", "int256", "uint256":
i, err := ParseNumericString(v)
if err != nil {
return nil, errors.Wrap(err, "failed parsing numeric string")
}
switch strings.ToLower(t) {
case "int128":
return types.NewI128(*i.BigInt()), nil
case "uint128":
return types.NewU128(*i.BigInt()), nil
case "int256":
return types.NewI256(*i.BigInt()), nil
case "uint256":
return types.NewU256(*i.BigInt()), nil
}
case "ucompact":
i, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, errors.Wrap(err, "failed parsing ucompact")
}
return types.NewUCompact(new(big.Int).SetInt64(i)), nil
case "bytes":
return types.Bytes(v), nil
case "address":
return types.NewAddressFromHexAccountID(fmt.Sprintf("%s", v))
}
return nil, errors.New("unknown type")
}
|
package odoo
import (
"fmt"
)
// IrQwebFieldContact represents ir.qweb.field.contact model.
type IrQwebFieldContact struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
}
// IrQwebFieldContacts represents array of ir.qweb.field.contact model.
type IrQwebFieldContacts []IrQwebFieldContact
// IrQwebFieldContactModel is the odoo model name.
const IrQwebFieldContactModel = "ir.qweb.field.contact"
// Many2One convert IrQwebFieldContact to *Many2One.
func (iqfc *IrQwebFieldContact) Many2One() *Many2One {
return NewMany2One(iqfc.Id.Get(), "")
}
// CreateIrQwebFieldContact creates a new ir.qweb.field.contact model and returns its id.
func (c *Client) CreateIrQwebFieldContact(iqfc *IrQwebFieldContact) (int64, error) {
ids, err := c.CreateIrQwebFieldContacts([]*IrQwebFieldContact{iqfc})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateIrQwebFieldContact creates a new ir.qweb.field.contact model and returns its id.
func (c *Client) CreateIrQwebFieldContacts(iqfcs []*IrQwebFieldContact) ([]int64, error) {
var vv []interface{}
for _, v := range iqfcs {
vv = append(vv, v)
}
return c.Create(IrQwebFieldContactModel, vv)
}
// UpdateIrQwebFieldContact updates an existing ir.qweb.field.contact record.
func (c *Client) UpdateIrQwebFieldContact(iqfc *IrQwebFieldContact) error {
return c.UpdateIrQwebFieldContacts([]int64{iqfc.Id.Get()}, iqfc)
}
// UpdateIrQwebFieldContacts updates existing ir.qweb.field.contact records.
// All records (represented by ids) will be updated by iqfc values.
func (c *Client) UpdateIrQwebFieldContacts(ids []int64, iqfc *IrQwebFieldContact) error {
return c.Update(IrQwebFieldContactModel, ids, iqfc)
}
// DeleteIrQwebFieldContact deletes an existing ir.qweb.field.contact record.
func (c *Client) DeleteIrQwebFieldContact(id int64) error {
return c.DeleteIrQwebFieldContacts([]int64{id})
}
// DeleteIrQwebFieldContacts deletes existing ir.qweb.field.contact records.
func (c *Client) DeleteIrQwebFieldContacts(ids []int64) error {
return c.Delete(IrQwebFieldContactModel, ids)
}
// GetIrQwebFieldContact gets ir.qweb.field.contact existing record.
func (c *Client) GetIrQwebFieldContact(id int64) (*IrQwebFieldContact, error) {
iqfcs, err := c.GetIrQwebFieldContacts([]int64{id})
if err != nil {
return nil, err
}
if iqfcs != nil && len(*iqfcs) > 0 {
return &((*iqfcs)[0]), nil
}
return nil, fmt.Errorf("id %v of ir.qweb.field.contact not found", id)
}
// GetIrQwebFieldContacts gets ir.qweb.field.contact existing records.
func (c *Client) GetIrQwebFieldContacts(ids []int64) (*IrQwebFieldContacts, error) {
iqfcs := &IrQwebFieldContacts{}
if err := c.Read(IrQwebFieldContactModel, ids, nil, iqfcs); err != nil {
return nil, err
}
return iqfcs, nil
}
// FindIrQwebFieldContact finds ir.qweb.field.contact record by querying it with criteria.
func (c *Client) FindIrQwebFieldContact(criteria *Criteria) (*IrQwebFieldContact, error) {
iqfcs := &IrQwebFieldContacts{}
if err := c.SearchRead(IrQwebFieldContactModel, criteria, NewOptions().Limit(1), iqfcs); err != nil {
return nil, err
}
if iqfcs != nil && len(*iqfcs) > 0 {
return &((*iqfcs)[0]), nil
}
return nil, fmt.Errorf("ir.qweb.field.contact was not found with criteria %v", criteria)
}
// FindIrQwebFieldContacts finds ir.qweb.field.contact records by querying it
// and filtering it with criteria and options.
func (c *Client) FindIrQwebFieldContacts(criteria *Criteria, options *Options) (*IrQwebFieldContacts, error) {
iqfcs := &IrQwebFieldContacts{}
if err := c.SearchRead(IrQwebFieldContactModel, criteria, options, iqfcs); err != nil {
return nil, err
}
return iqfcs, nil
}
// FindIrQwebFieldContactIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindIrQwebFieldContactIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(IrQwebFieldContactModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindIrQwebFieldContactId finds record id by querying it with criteria.
func (c *Client) FindIrQwebFieldContactId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(IrQwebFieldContactModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("ir.qweb.field.contact was not found with criteria %v and options %v", criteria, options)
}
|
package tests
import (
"reflect"
"testing"
)
/**
* [1195] Distribute Candies to People
*
* We distribute some number of candies, to a row of n = num_people people in the following way:
*
* We then give 1 candy to the first person, 2 candies to the second person, and so on until we give n candies to the last person.
*
* Then, we go back to the start of the row, giving n + 1 candies to the first person, n + 2 candies to the second person, and so on until we give 2 * n candies to the last person.
*
* This process repeats (with us giving one more candy each time, and moving to the start of the row after we reach the end) until we run out of candies. The last person will receive all of our remaining candies (not necessarily one more than the previous gift).
*
* Return an array (of length num_people and sum candies) that represents the final distribution of candies.
*
*
* Example 1:
*
*
* Input: candies = 7, num_people = 4
* Output: [1,2,3,1]
* Explanation:
* On the first turn, ans[0] += 1, and the array is [1,0,0,0].
* On the second turn, ans[1] += 2, and the array is [1,2,0,0].
* On the third turn, ans[2] += 3, and the array is [1,2,3,0].
* On the fourth turn, ans[3] += 1 (because there is only one candy left), and the final array is [1,2,3,1].
*
*
* Example 2:
*
*
* Input: candies = 10, num_people = 3
* Output: [5,2,3]
* Explanation:
* On the first turn, ans[0] += 1, and the array is [1,0,0].
* On the second turn, ans[1] += 2, and the array is [1,2,0].
* On the third turn, ans[2] += 3, and the array is [1,2,3].
* On the fourth turn, ans[0] += 4, and the final array is [5,2,3].
*
*
*
* Constraints:
*
*
* 1 <= candies <= 10^9
* 1 <= num_people <= 1000
*
*
*/
func TestDistributeCandiestoPeople(t *testing.T) {
var cases = []struct {
candies int
people int
output []int
}{
{
candies: 7,
people: 4,
output: []int{1, 2, 3, 1},
},
{
candies: 10,
people: 3,
output: []int{5, 2, 3},
},
}
for _, c := range cases {
x := distributeCandiesToPeople(c.candies, c.people)
if !reflect.DeepEqual(x, c.output) {
t.Fail()
}
}
}
// submission codes start here
func distributeCandiesToPeople(candies int, num_people int) []int {
res := make([]int, num_people)
for i := 0; candies > 0; candies -= i {
x := i + 1
res[i%num_people] += min(candies, x)
i++
}
return res
}
// slow brute force
func distributeCandiesToPeople2(candies int, num_people int) []int {
res := make([]int, num_people)
c := 0
for candies > 0 {
for i := 0; i < num_people; i++ {
c++
if candies <= c {
res[i] += candies
return res
}
res[i] += c
candies -= c
}
}
return res
}
// submission codes end
|
package Add_Two_Numbers
import "testing"
func Test(t *testing.T) {
l1 := ListNode{
Val: 2,
Next: &ListNode{
Val: 4,
Next: &ListNode{
Val: 3,
Next: nil,
},
},
}
l2 := ListNode{
Val: 5,
Next: &ListNode{
Val: 6,
Next: &ListNode{
Val: 4,
Next: nil,
},
},
}
l3 := addTwoNumbers(&l1, &l2)
t.Log(l3.Val, l3.Next.Val, l3.Next.Next.Val)
l11 := ListNode{
Val: 1,
Next: nil,
}
l22 := ListNode{
Val: 9,
Next: &ListNode{
Val: 9,
Next: nil,
},
}
l33 := addTwoNumbers(&l11, &l22)
t.Log(l33.Val, l33.Next.Val, l33.Next.Next.Val)
}
|
package main
import "fmt"
import "github.com/sushruta/go-key-value-db/memtable"
func main() {
bst := memtable.BstConstructor()
bst.Insert("handbag", 8786)
bst.Insert("handlebars", 3869)
bst.Insert("handicap", 70836)
bst.Insert("handkerchief", 16433)
fmt.Println(bst.Inorder())
}
|
package entity
import "github.com/google/uuid"
const (
TransaksiDetailTableName = "transaksi_detail"
)
//TransaksiModel is a model for entity.TransaksiDetail
type TransaksiDetail struct {
ID uuid.UUID `gorm:"type:uuid;primary_key" json:"id"`
Produk string `gorm:"type:string;null" json:"produk"`
Kuantitas int64 `gorm:"type:int;null" json:"kuantitas"`
Total int64 `gorm:"type:int;null" json:"total"`
//Auditable
}
func NewTransaksiDetail(id uuid.UUID, produk string, kuantitas, total int64) *TransaksiDetail {
return &TransaksiDetail{
ID: id,
Produk: produk,
Kuantitas: int64(kuantitas),
Total: int64(total),
//Auditable: NewAuditable(),
}
}
func (model *TransaksiDetail) TableName() string {
return TransaksiDetailTableName
}
|
package shoot
import (
"github.com/mandelsoft/cmdint/pkg/cmdint"
"github.com/afritzler/garden-examiner/pkg"
"github.com/afritzler/garden-examiner/cmd/gex/const"
"github.com/afritzler/garden-examiner/cmd/gex/context"
"github.com/afritzler/garden-examiner/cmd/gex/util"
)
func init() {
filters.Add(&SeedFilter{})
}
type SeedFilter struct {
}
var _ util.Filter = &SeedFilter{}
func (this *SeedFilter) AddOptions(cmd cmdint.ConfigurableCmdTabCommand) cmdint.ConfigurableCmdTabCommand {
return cmd.ArgOption(constants.O_SEED).Context(constants.O_SEL_SEED)
}
func (this *SeedFilter) Match(ctx *context.Context, elem interface{}, opts *cmdint.Options) (bool, error) {
s := elem.(gube.Shoot)
seed := opts.GetOptionValue(constants.O_SEED)
if seed != nil {
if s.GetSeedName() != *seed {
return false, nil
}
}
return true, nil
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package exit
// Codes that are common to all command times (server + client) follow.
// Success (0) represents a normal process termination.
func Success() Code { return Code{0} }
// UnspecifiedError (1) indicates the process has terminated with an
// error condition. The specific cause of the error can be found in
// the logging output.
func UnspecifiedError() Code { return Code{1} }
// UnspecifiedGoPanic (2) indicates the process has terminated due to
// an uncaught Go panic or some other error in the Go runtime.
//
// The reporting of this exit code likely indicates a programming
// error inside CockroachDB.
//
// Conversely, this should not be used when implementing features.
func UnspecifiedGoPanic() Code { return Code{2} }
// Interrupted (3) indicates the server process was interrupted with
// Ctrl+C / SIGINT.
func Interrupted() Code { return Code{3} }
// CommandLineFlagError (4) indicates there was an error in the
// command-line parameters.
func CommandLineFlagError() Code { return Code{4} }
// LoggingStderrUnavailable (5) indicates that an error occurred
// during a logging operation to the process' stderr stream.
func LoggingStderrUnavailable() Code { return Code{5} }
// LoggingFileUnavailable (6) indicates that an error occurred
// during a logging operation to a file.
func LoggingFileUnavailable() Code { return Code{6} }
// FatalError (7) indicates that a logical error in the server caused
// an emergency shutdown.
func FatalError() Code { return Code{7} }
// TimeoutAfterFatalError (8) indicates that an emergency shutdown
// due to a fatal error did not occur properly due to some blockage
// in the logging system.
func TimeoutAfterFatalError() Code { return Code{8} }
// LoggingNetCollectorUnavailable (9) indicates that an error occurred
// during a logging operation to a network collector.
func LoggingNetCollectorUnavailable() Code { return Code{9} }
// Codes that are specific to client commands follow. It's possible
// for codes to be reused across separate client or server commands.
// Command-specific exit codes should be allocated down from 125.
// 'doctor' exit codes.
// DoctorValidationFailed indicates that the 'doctor' command has detected
// an inconsistency in the SQL metaschema.
func DoctorValidationFailed() Code { return Code{125} }
|
package iterators_test
import (
"fmt"
"log"
"testing"
"github.com/adamluzsi/frameless/ports/iterators"
"github.com/adamluzsi/testcase/assert"
"github.com/adamluzsi/testcase/random"
)
func ExampleFilter() {
var iter iterators.Iterator[int]
iter = iterators.Slice([]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
iter = iterators.Filter[int](iter, func(n int) bool { return n > 2 })
defer iter.Close()
for iter.Next() {
n := iter.Value()
_ = n
}
if err := iter.Err(); err != nil {
log.Fatal(err)
}
}
func TestFilter(t *testing.T) {
t.Run("Filter", func(t *testing.T) {
t.Run("given the iterator has set of elements", func(t *testing.T) {
originalInput := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
iterator := func() iterators.Iterator[int] { return iterators.Slice[int](originalInput) }
t.Run("when filter allow everything", func(t *testing.T) {
i := iterators.Filter(iterator(), func(int) bool { return true })
assert.Must(t).NotNil(i)
numbers, err := iterators.Collect[int](i)
assert.Must(t).Nil(err)
assert.Must(t).Equal(originalInput, numbers)
})
t.Run("when filter disallow part of the value stream", func(t *testing.T) {
i := iterators.Filter(iterator(), func(n int) bool { return 5 < n })
assert.Must(t).NotNil(i)
numbers, err := iterators.Collect[int](i)
assert.Must(t).Nil(err)
assert.Must(t).Equal([]int{6, 7, 8, 9}, numbers)
})
t.Run("but iterator encounter an exception", func(t *testing.T) {
srcI := iterator
t.Run("during somewhere which stated in the iterator iterator Err", func(t *testing.T) {
iterator = func() iterators.Iterator[int] {
m := iterators.Stub(srcI())
m.StubErr = func() error { return fmt.Errorf("Boom!!") }
return m
}
t.Run("it is expect to report the error with the Err method", func(t *testing.T) {
i := iterators.Filter[int](iterator(), func(int) bool { return true })
assert.Must(t).NotNil(i)
assert.Must(t).Equal(i.Err(), fmt.Errorf("Boom!!"))
})
})
t.Run("during Closing the iterator", func(t *testing.T) {
iterator = func() iterators.Iterator[int] {
m := iterators.Stub(srcI())
m.StubClose = func() error { return fmt.Errorf("Boom!!!") }
return m
}
t.Run("it is expect to report the error with the Err method", func(t *testing.T) {
i := iterators.Filter(iterator(), func(int) bool { return true })
assert.Must(t).NotNil(i)
assert.Must(t).Nil(i.Err())
assert.Must(t).Equal(i.Close(), fmt.Errorf("Boom!!!"))
})
})
})
})
})
}
func BenchmarkFilter(b *testing.B) {
var logic = func(n int) bool {
return n > 500
}
rnd := random.New(random.CryptoSeed{})
var values []int
for i := 0; i < 1024; i++ {
values = append(values, rnd.IntN(1000))
}
makeIter := func() iterators.Iterator[int] {
return iterators.Filter[int](iterators.Slice[int](values), logic)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
func() {
iter := makeIter()
defer iter.Close()
for iter.Next() {
//
}
}()
}
}
|
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"testing"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
pb "github.com/4726/discussion-board/services/search/pb"
"github.com/golang/protobuf/proto"
"github.com/olivere/elastic/v7"
"github.com/stretchr/testify/assert"
)
var testApi *Api
var testAddr string
var testCFG Config
func TestMain(m *testing.M) {
cfg, err := ConfigFromFile("config_test.json")
if err != nil {
panic(err)
}
api, err := NewApi(cfg)
if err != nil {
panic(err)
}
testApi = api
testCFG = cfg
addr := fmt.Sprintf(":%v", cfg.ListenPort)
testAddr = addr
go api.Run(addr)
time.Sleep(time.Second * 3)
i := m.Run()
//close server
os.Exit(i)
}
func testSetup(t testing.TB) (pb.SearchClient, []Post) {
creds, err := credentials.NewClientTLSFromFile(testCFG.TLSCert, testCFG.TLSServerName)
assert.NoError(t, err)
conn, err := grpc.Dial(testAddr, grpc.WithTransportCredentials(creds))
assert.NoError(t, err)
// defer conn.Close()
c := pb.NewSearchClient(conn)
cleanDB(t)
posts := fillESTestData(t)
return c, posts
}
func TestIndexRequired(t *testing.T) {
req := &pb.Post{
Body: proto.String("body"),
UserId: proto.Uint64(10),
Id: proto.Uint64(10),
Timestamp: proto.Int64(0),
Likes: proto.Int64(0),
}
testIndexRequired(t, req)
req = &pb.Post{
Title: proto.String("title"),
UserId: proto.Uint64(10),
Id: proto.Uint64(10),
Timestamp: proto.Int64(0),
Likes: proto.Int64(0),
}
testIndexRequired(t, req)
req = &pb.Post{
Title: proto.String("title"),
Body: proto.String("body"),
Id: proto.Uint64(10),
Timestamp: proto.Int64(0),
Likes: proto.Int64(0),
}
testIndexRequired(t, req)
req = &pb.Post{
Title: proto.String("title"),
Body: proto.String("body"),
Id: proto.Uint64(10),
Timestamp: proto.Int64(0),
Likes: proto.Int64(0),
}
testIndexRequired(t, req)
req = &pb.Post{
Title: proto.String("title"),
Body: proto.String("body"),
UserId: proto.Uint64(10),
Timestamp: proto.Int64(0),
Likes: proto.Int64(0),
}
testIndexRequired(t, req)
}
func TestIndex(t *testing.T) {
c, posts := testSetup(t)
req := &pb.Post{
Title: proto.String("title"),
Body: proto.String("body"),
UserId: proto.Uint64(10),
Id: proto.Uint64(10),
Timestamp: proto.Int64(time.Now().Unix() + 30),
Likes: proto.Int64(1),
}
_, err := c.Index(context.TODO(), req)
assert.NoError(t, err)
expectedPost := Post{
req.GetTitle(),
req.GetBody(),
req.GetId(),
req.GetUserId(),
req.GetTimestamp(),
req.GetLikes(),
}
posts = append(posts, expectedPost)
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
func TestSearchNoTotal(t *testing.T) {
c, posts := testSetup(t)
req := &pb.SearchQuery{Term: proto.String("term")}
_, err := c.Search(context.TODO(), req)
assert.Error(t, err)
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
func TestSearchNoTerm(t *testing.T) {
c, posts := testSetup(t)
req := &pb.SearchQuery{Total: proto.Uint64(10)}
_, err := c.Search(context.TODO(), req)
assert.Error(t, err)
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
func TestSearch(t *testing.T) {
c, posts := testSetup(t)
req := &pb.SearchQuery{Term: proto.String("hello"), Total: proto.Uint64(10), From: proto.Uint64(0)}
resp, err := c.Search(context.TODO(), req)
assert.NoError(t, err)
expected := &pb.SearchResult{Id: []uint64{posts[0].Id, posts[2].Id}}
assert.Equal(t, expected, resp)
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
func TestSearch2(t *testing.T) {
c, posts := testSetup(t)
req := &pb.SearchQuery{Term: proto.String("world"), Total: proto.Uint64(10), From: proto.Uint64(0)}
resp, err := c.Search(context.TODO(), req)
assert.NoError(t, err)
expected := &pb.SearchResult{Id: []uint64{posts[0].Id, posts[1].Id, posts[2].Id}}
assert.Equal(t, expected, resp)
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
func TestSearch3(t *testing.T) {
c, posts := testSetup(t)
req := &pb.SearchQuery{Term: proto.String("world"), Total: proto.Uint64(10), From: proto.Uint64(2)}
resp, err := c.Search(context.TODO(), req)
assert.NoError(t, err)
expected := &pb.SearchResult{Id: []uint64{posts[2].Id}}
assert.Equal(t, expected, resp)
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
func TestSetLikesNoId(t *testing.T) {
c, posts := testSetup(t)
req := &pb.Likes{Likes: proto.Int64(100)}
_, err := c.SetLikes(context.TODO(), req)
assert.Error(t, err)
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
func TestSetLikesNoLikes(t *testing.T) {
c, posts := testSetup(t)
req := &pb.Likes{Id: proto.Uint64(1)}
_, err := c.SetLikes(context.TODO(), req)
assert.Error(t, err)
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
func TestUpdateLikesDoesNotExist(t *testing.T) {
c, posts := testSetup(t)
req := &pb.Likes{Id: proto.Uint64(5), Likes: proto.Int64(100)}
_, err := c.SetLikes(context.TODO(), req)
assert.NoError(t, err)
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
func TestUpdateLikes(t *testing.T) {
c, posts := testSetup(t)
req := &pb.Likes{Id: proto.Uint64(posts[1].Id), Likes: proto.Int64(100)}
_, err := c.SetLikes(context.TODO(), req)
assert.NoError(t, err)
posts[1].Likes = 100
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
func TestDeletePostNoId(t *testing.T) {
c, posts := testSetup(t)
req := &pb.Id{}
_, err := c.DeletePost(context.TODO(), req)
assert.Error(t, err)
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
func TestDeletePostDoesNotExist(t *testing.T) {
c, posts := testSetup(t)
req := &pb.Id{Id: proto.Uint64(5)}
_, err := c.DeletePost(context.TODO(), req)
assert.NoError(t, err)
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
func TestDeletePost(t *testing.T) {
c, posts := testSetup(t)
req := &pb.Id{Id: proto.Uint64(posts[1].Id)}
_, err := c.DeletePost(context.TODO(), req)
assert.NoError(t, err)
posts = []Post{posts[0], posts[2]}
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
func TestSetTimestampNoId(t *testing.T) {
c, posts := testSetup(t)
req := &pb.Timestamp{Timestamp: proto.Int64(1)}
_, err := c.SetTimestamp(context.TODO(), req)
assert.Error(t, err)
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
func TestSetTimestampNoTimestamp(t *testing.T) {
c, posts := testSetup(t)
req := &pb.Timestamp{Id: proto.Uint64(1)}
_, err := c.SetTimestamp(context.TODO(), req)
assert.Error(t, err)
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
func TestSetTimstampDoesNotExist(t *testing.T) {
c, posts := testSetup(t)
req := &pb.Timestamp{Id: proto.Uint64(5), Timestamp: proto.Int64(time.Now().Unix())}
_, err := c.SetTimestamp(context.TODO(), req)
assert.NoError(t, err)
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
func TestSetTimestamp(t *testing.T) {
c, posts := testSetup(t)
newTimestamp := time.Now().Unix()
req := &pb.Timestamp{Id: proto.Uint64(posts[1].Id), Timestamp: proto.Int64(newTimestamp)}
_, err := c.SetTimestamp(context.TODO(), req)
assert.NoError(t, err)
posts[1].LastUpdate = newTimestamp
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
func queryESC(t testing.TB) []Post {
testApi.esc.client.Refresh().Index(testApi.esc.indexName).Do(context.TODO())
posts := []Post{}
query := elastic.NewMatchAllQuery()
searchResult, err := testApi.esc.client.Search().
Index(testApi.esc.indexName).
Query(query).
SortBy(elastic.NewFieldSort("LastUpdate").Asc()).
Do(context.TODO())
assert.NoError(t, err)
for _, hit := range searchResult.Hits.Hits {
var p Post
err := json.Unmarshal(hit.Source, &p)
assert.NoError(t, err)
posts = append(posts, p)
}
return posts
}
func cleanDB(t testing.TB) {
query := elastic.NewMatchAllQuery()
_, err := testApi.esc.client.DeleteByQuery().
Index(testApi.esc.indexName).
Query(query).
Do(context.TODO())
assert.NoError(t, err)
}
func fillESTestData(t testing.TB) []Post {
p1 := Post{"my first post", "hello world", 1, 1, time.Now().Unix(), 0}
indexForTesting(t, p1)
p2 := Post{"post @2 world", "body #2", 2, 2, time.Now().Unix() + 10, 0}
indexForTesting(t, p2)
p3 := Post{"title3", "hello WORLd", 3, 3, time.Now().Unix() + 20, 0}
indexForTesting(t, p3)
return []Post{p1, p2, p3}
}
func indexForTesting(t testing.TB, p Post) {
_, err := testApi.esc.client.Index().
Index(testApi.esc.indexName).
Refresh("wait_for").
BodyJson(p).
Do(context.TODO())
assert.NoError(t, err)
}
func testIndexRequired(t *testing.T, req *pb.Post) {
c, posts := testSetup(t)
_, err := c.Index(context.TODO(), req)
assert.Error(t, err)
postsAfter := queryESC(t)
assert.Equal(t, posts, postsAfter)
}
|
package main
import (
"github.com/gondor/docker-volume-netshare/netshare"
)
func main() {
netshare.Execute()
}
|
package main
import (
"fmt"
"os"
"github.com/urfave/cli"
"github.com/evrynet-official/evrynet-tools/lib/node"
sc "github.com/evrynet-official/evrynet-tools/stakingcontract"
)
func main() {
app := cli.NewApp()
app.Name = "stress-test tool"
app.Usage = "Stress test for staking contract"
app.Version = "0.0.1"
app.Commands = stakingCommands()
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func stakingCommands() []cli.Command {
stressFlags := sc.NewStressTestFlag()
stressFlags = append(stressFlags, node.NewEvrynetNodeFlags()...)
stressVotesCmd := cli.Command{
Action: stressVoters,
Name: "stressvotes",
Usage: "sends vote from list voter to a candidate",
Description: "sends vote from list voter to a candidate",
Flags: stressFlags,
}
return []cli.Command{stressVotesCmd}
}
|
package api
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"github.com/mitchellh/mapstructure"
"github.com/opsbot/cli-go/utils"
log "github.com/sirupsen/logrus"
)
var (
// Config - hold a reference to configuration
Config Configuration
)
// CheckConfig - check config is set
func CheckConfig() {
// check if Config is an empty struct
if (Configuration{}) == Config {
log.Fatalln("Config not set: call SetEnvironment")
}
}
// Request - execure api request
func Request(method string, url string, body []byte) Response {
client := &http.Client{}
req, reqErr := http.NewRequest(method, url, bytes.NewBuffer(body))
if reqErr != nil {
log.Fatalf("HTTPRequestError: %v", reqErr)
}
SetAuthHeader(req)
log.Debug(req.Header)
resp, resErr := client.Do(req)
if resErr != nil {
log.Fatalf("HTTPResponseError: %v", resErr)
}
defer resp.Body.Close()
log.Info(resp.Status)
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatalf("ReadError: %v", err)
}
return Response{
Status: resp.Status,
StatusCode: resp.StatusCode,
Body: string(respBody),
}
}
// SetAuthHeader - add auth header to request
func SetAuthHeader(req *http.Request) {
var bearer = "Bearer " + Config.Token
req.Header.Set("Accept", "application/json")
req.Header.Set("Authorization", bearer)
req.Header.Set("Content-Type", "application/json")
}
// SetEnvironment - set environment config
func SetEnvironment(cfg map[string]interface{}) {
Config = Configuration{}
mapstructure.Decode(cfg, &Config)
}
func getEndpoint(path string) string {
var url string
url = fmt.Sprintf("%v/%v", Config.Host, path)
log.Tracef("getEndpoint %v\n", url)
return url
}
func showFullResponseBody(resp Response) {
var data map[string]interface{}
json.Unmarshal([]byte(resp.Body), &data)
utils.PrettyPrint(data)
}
|
package retry
import (
"errors"
"fmt"
"strings"
"time"
"go.uber.org/zap"
)
/* QuickRetry 重试
参数:
* fun func() error 重试的函数
* retryExtraCheck func(error) bool 重试额外处理检查
* retryExtra func() 额外处理
* timeout time.Duration 重试最长时间
* retryInterval time.Duration 重试间隔
返回值:
* error error
*/
func QuickRetry(fun func() error, retryExtraCheck func(error) bool, retryExtra func(), timeout, retryInterval time.Duration) error {
times := 0
start := time.Now()
var err error
for {
times++
if time.Since(start).Seconds() >= timeout.Seconds() {
return fmt.Errorf("重试超过%s,依然失败,重试次数%d", timeout.String(), times)
}
if err = fun(); err == nil {
return nil
}
if retryExtraCheck(err) {
retryExtra()
}
time.Sleep(retryInterval)
}
}
/* Retry 重试
请求参数:
* timeout time.Duration 超时时间
* interval time.Duration 重试等待时间
* logger *slog.Logger 日志器
* function func() error 执行的操作
返回值:
* error 异常
*/
func Retry(timeout, interval time.Duration, logger *zap.Logger, function func() error) error {
return RetryGeneric(NewTimeoutJudge(timeout), interval, logger, function)
}
/* RetryAllTheTime 重试失败,没有时间上限
参数:
* interval time.Duration 重试间隔
* logger *slog.Logger 日志器
* function func() error 执行的操作
返回值:
* error error
*/
func RetryAllTheTime(interval time.Duration, logger *zap.Logger, function func() error) error {
return RetryGeneric(nil, interval, logger, function)
}
/* RetryGeneric 重试失败,自定义的判断
参数:
* judge RetryJudge 自定义判断,返回true表示结束
* interval time.Duration 重试间隔
* logger *slog.Logger 日志器
* function func() error 执行的操作
返回值:
* error error
*/
func RetryGeneric(judge RetryJudge, interval time.Duration, logger *zap.Logger, function func() error) error {
var errMsg []string
judge = NewNeverFinishJudge(judge)
i := 0
for ; !judge.Finished(); time.Sleep(interval) { // 失败,重试间隔时间
if i != 0 {
logger.Debug("重试")
}
// 需要被执行的方法
err := function()
if err == nil {
// 没有错误直接返回
return nil
}
logger.Error("错误", zap.String("错误信息", err.Error()))
errMsg = append(errMsg, err.Error())
i++
}
return errors.New(strings.Join(errMsg, ","))
}
/* ------------------------------ 下面是对RetryJudge的定义 ------------------------------*/
// RetryJudge Retry是否已经结束
type RetryJudge interface {
// Finished 返回true,表示已经结束
Finished() bool
}
// TimeoutJudge 基于总时长的RetryJudge
type TimeoutJudge struct {
deadLine time.Time
}
/* NewTimeoutJudge 新建一个基于总时长的RetryJudge
参数:
* timeout time.Duration 总时长
返回值:
* RetryJudge
*/
func NewTimeoutJudge(timeout time.Duration) RetryJudge {
return &TimeoutJudge{deadLine: time.Now().Add(timeout)}
}
/* Finished 是否已经结束
参数:
返回值:
* bool bool
*/
func (t TimeoutJudge) Finished() bool {
return time.Now().After(t.deadLine)
}
// TimesJudge 基于重试次数的RetryJudge
type TimesJudge struct {
leftTimes uint
}
/* NewTimesJudge 新建一个TimesJudge
参数:
* times uint
返回值:
* RetryJudge
*/
func NewTimesJudge(times uint) RetryJudge {
return &TimesJudge{leftTimes: times}
}
/* Finished 是否已经结束
参数:
返回值:
* bool bool
*/
func (l *TimesJudge) Finished() bool {
finished := l.leftTimes == 0
if !finished {
l.leftTimes--
}
return finished
}
/* ------------------------------ 这里开始是一些复合的RetryJudge ------------------------------*/
// NeverFinishJudge 永不结束的RetryJudge,如果内嵌的Judge存在,那么以内嵌为主,否则永不结束
type NeverFinishJudge struct {
RetryJudge // 内嵌的Judge
}
/* NewNeverFinishJudge 新建一个NeverFinishJudge
参数:
* retryJudge RetryJudge
返回值:
* RetryJudge RetryJudge
*/
func NewNeverFinishJudge(retryJudge RetryJudge) RetryJudge {
return &NeverFinishJudge{RetryJudge: retryJudge}
}
/* Finished 是否已经结束
参数:
返回值:
* bool bool
*/
func (w NeverFinishJudge) Finished() bool {
if w.RetryJudge != nil {
return w.RetryJudge.Finished()
}
return false
}
// LogicOperator 逻辑操作符
type LogicOperator int
const (
// LogicAnd 逻辑与,全部都是true,才返回true
LogicAnd LogicOperator = iota + 1
// LogicOr 逻辑或,任意一个是true,就返回true
LogicOr
)
// LogicCompositeJudge 通过逻辑操作将多个RetryJudge组合
type LogicCompositeJudge struct {
operator LogicOperator // 逻辑操作符
judges []RetryJudge // 判断
}
/* NewLogicCompositeJudge 新建一个LogicCompositeJudge
参数:
* operator LogicOperator 逻辑操作符
* judges []RetryJudge 判断,任意一个元素都不能为nil
返回值:
* RetryJudge
*/
func NewLogicCompositeJudge(operator LogicOperator, judges []RetryJudge) RetryJudge {
return &LogicCompositeJudge{operator: operator, judges: judges}
}
/* Finished 是否已经结束
参数:
返回值:
* bool bool
*/
func (l LogicCompositeJudge) Finished() bool {
switch l.operator {
case LogicAnd:
finished := true
for i := range l.judges {
finished = l.judges[i].Finished() && finished
}
return finished
case LogicOr:
finished := false
for i := range l.judges {
finished = finished || l.judges[i].Finished()
}
return finished
default:
return false
}
}
|
//Package twitter of GOTOJS/stream offers a stream implementation for Twitter.
//Currently it is based on the twitter location API which needs to be generalized.
//
//The client key/secret configuration for the actual twitter API can be made in a local file
// named "twitter_account.json"
//
// A sample file is provided with the source: "twitter_account.json.sample"
package twitter
import (
"bytes"
"encoding/json"
"errors"
"fmt"
. "github.com/sebkl/gotojs/stream"
"github.com/sebkl/imgurl"
"github.com/sebkl/twitterstream"
"log"
"net/url"
"os"
"runtime/debug"
)
type twitterConfiguration struct {
APIKey string
APISecret string
AccessToken string
AccessSecret string
}
type Tweet struct {
Long float64 `json:"long"`
Lat float64 `json:"lat"`
Text string `json:"text"`
Sender string `json:"sender"`
Retweet bool `json:"retweet"`
Images []string `json:"images"`
Thumbnail string `json:"thumbnail"`
IsNude bool `json:"isnude"`
IsSensitive bool `json:"issensitive"`
}
type TwitterStreamConfig struct {
ThumbnailMethod string `json:"ThumbnailMethod"` // [api,url,dataurl]
ThumbnailSize int `json:"ThumbnailSize"`
ThumbnailAPICall string `json:"ThumbnailAPICall"` // "Image/Thumbnail?p=%s&p=%s&p=%s"
NudeFilter bool `json:"NudeFilter"`
TranscodeWorker int `json:"TranscodeWorker"`
TranscodeBuffer int `json:"TranscodeBuffer"`
BaseUrl string `json:"BaseUrl"` // "http://localhost:8080/gotojs/"
}
func (s *TwitterStreamConfig) JSON() string {
buf := new(bytes.Buffer)
encoder := json.NewEncoder(buf)
encoder.Encode(s)
return buf.String()
}
func (s *TwitterStreamConfig) APIUrl(u string) string {
return fmt.Sprintf("%s%s", s.BaseUrl, fmt.Sprintf(s.ThumbnailAPICall, url.QueryEscape(u), s.ThumbnailSize, s.ThumbnailSize))
}
type TwitterSource struct {
config *twitterConfiguration
streamConfig *TwitterStreamConfig
conn *twitterstream.Connection
client *twitterstream.Client
configFile *os.File
streamFile *os.File
transcoder *imgurl.TranscodeService
baseUrl string
}
//NewTwitterSource creates a new stream source based on the given configuration file.
func NewTwitterSource(account, stream, baseUrl string) (ret *TwitterSource, err error) {
//Read twitter config from json file.
configFile, err := os.Open(account)
if err != nil {
return
}
decoder := json.NewDecoder(configFile)
config := &twitterConfiguration{}
decoder.Decode(config)
//Read stream config from json file.
streamConfig := &TwitterStreamConfig{
ThumbnailMethod: "dataurl",
ThumbnailSize: 100,
NudeFilter: false,
TranscodeWorker: 5,
TranscodeBuffer: 10,
ThumbnailAPICall: "/Image/Thumbnail?p=%s&p=%d&p=%d",
BaseUrl: baseUrl}
streamFile, err := os.Open(stream)
// If config cannot be read use default values
if err == nil {
decoder = json.NewDecoder(streamFile)
decoder.Decode(streamConfig)
err = nil
log.Printf("Twitterstream configuration: %s", streamConfig.JSON())
} else {
log.Printf("Could not read stream configuration: %s", err)
err = nil
}
client := twitterstream.NewClient(config.APIKey, config.APISecret, config.AccessToken, config.AccessSecret)
ret = &TwitterSource{
config: config,
configFile: configFile,
streamFile: streamFile,
streamConfig: streamConfig,
transcoder: imgurl.NewTranscodeService(streamConfig.TranscodeWorker, streamConfig.TranscodeBuffer),
client: client}
return
}
//Next fetches the next message from the source stream.
func (s *TwitterSource) Next() (mes Message, err error) {
defer func() {
if r := recover(); r != nil {
log.Println("Recovered in Next", r)
debug.PrintStack()
err = errors.New("Panic in Next call.")
}
}()
if s.conn == nil {
return mes, errors.New("Connection s not established.")
}
for err == nil {
//Take from transcoder queue
for s.transcoder.Ready() {
res := s.transcoder.Get()
if tweet, ok := res.Payload.(*Tweet); ok {
//TODO: check other images too
tweet.Thumbnail = res.Image
if res.Tags != nil && len(res.Tags) > 0 {
if in, ok := res.Tags[0].(bool); ok {
tweet.IsNude = in
}
}
mes = NewMessage(tweet)
return
} else {
log.Printf("Invalid payload in transcoding Response. Ignoring.")
}
}
//Fetch from Twitter stream
if tweet, err := s.conn.Next(); err == nil {
var (
payload *Tweet
isSensitive bool
)
if tweet.PossiblySensitive != nil {
isSensitive = *tweet.PossiblySensitive
} else {
//If Sensitivity is not given in tweet, assume not to be sensitive.
isSensitive = false
}
if tweet.Coordinates != nil {
payload = &Tweet{
Long: float64(tweet.Coordinates.Long),
Lat: float64(tweet.Coordinates.Lat),
Text: tweet.Text,
IsSensitive: isSensitive,
Sender: tweet.User.ScreenName}
} else if tweet.Place != nil && tweet.Place != nil {
payload = &Tweet{
Long: float64(tweet.Place.BoundingBox.Points[0].Long),
Lat: float64(tweet.Place.BoundingBox.Points[0].Lat),
Text: tweet.Text,
IsSensitive: isSensitive,
Sender: tweet.User.ScreenName}
} else {
//return mes, err = errors.New("Invalid tweet.")
log.Printf("Invalid tweet received. Ignoring.")
continue
}
payload.Retweet = tweet.RetweetedStatus != nil
//Check if some images are attached as Entities to the tweet.
//If transcoding buffer is full, this image will ignored.
if len(tweet.Entities.Media) > 0 && !s.transcoder.Full() {
iu := make([]string, len(tweet.Entities.Media))
c := 0
for _, v := range tweet.Entities.Media {
if v.Type == "photo" {
iu[c] = v.MediaUrl
c++
//log.Println(v.MediaUrl)
}
}
payload.Images = iu[:c]
//TODO: change imgurl o be capable of transcoding multiple images per request.
var filter []imgurl.Filter
if s.streamConfig.NudeFilter {
filter = []imgurl.Filter{imgurl.NudeFilter}
}
switch s.streamConfig.ThumbnailMethod {
case "api":
payload.Thumbnail = s.streamConfig.APIUrl(iu[0])
case "url":
payload.Thumbnail = iu[0]
default: //"dataurl"
req := &imgurl.Request{
Url: iu[0],
Payload: payload,
Filters: filter,
Maxheight: s.streamConfig.ThumbnailSize,
Maxwidth: s.streamConfig.ThumbnailSize}
s.transcoder.Push(req)
continue
}
}
return NewMessage(payload), err
}
}
return
}
//Close interruptes the source stream. The connection to the twitter API is closed.
func (s *TwitterSource) Close() {
s.conn.Close()
}
//Start starts the source stream. A connection to the twitter API is established.
func (s *TwitterSource) Start() (err error) {
log.Println("Starting twitter stream.")
s.conn, err = s.client.Locations(twitterstream.Point{-90.0, -180.0}, twitterstream.Point{90.0, 180.0})
if err != nil {
log.Printf("Twitterfeed aborted: %s", err)
}
return
}
//NewTwitterStream create a new GOTOJS stream implementation that is bound to the twitter API stream using
// the default "twitter_account.json" configuration file.
func NewTwitterStream(baseUrl string) (stream *Stream, err error) {
tweetSource, err := NewTwitterSource("twitter_account.json", "twitter_stream.json", baseUrl)
if err != nil {
return
}
stream, err = NewStream(tweetSource)
return
}
|
package rt
import (
"testing"
"github.com/pkg/errors"
)
func TestReactrJobGroup(t *testing.T) {
r := New()
doMath := r.Register("math", math{})
grp := NewGroup()
grp.Add(doMath(input{5, 6}))
grp.Add(doMath(input{7, 8}))
grp.Add(doMath(input{9, 10}))
if err := grp.Wait(); err != nil {
t.Error(errors.Wrap(err, "failed to grp.Wait"))
}
}
func TestLargeGroup(t *testing.T) {
r := New()
doMath := r.Register("math", math{})
grp := NewGroup()
for i := 0; i < 50000; i++ {
grp.Add(doMath(input{5, 6}))
}
if err := grp.Wait(); err != nil {
t.Error(err)
}
}
func TestLargeGroupWithPool(t *testing.T) {
r := New()
doMath := r.Register("math", math{}, PoolSize(3))
grp := NewGroup()
for i := 0; i < 50000; i++ {
grp.Add(doMath(input{5, i}))
}
if err := grp.Wait(); err != nil {
t.Error(err)
}
}
type groupWork struct{}
// Run runs a groupWork job
func (g groupWork) Run(job Job, ctx *Ctx) (interface{}, error) {
grp := NewGroup()
grp.Add(ctx.Do(NewJob("generic", "first")))
grp.Add(ctx.Do(NewJob("generic", "group work")))
grp.Add(ctx.Do(NewJob("generic", "group work")))
grp.Add(ctx.Do(NewJob("generic", "group work")))
grp.Add(ctx.Do(NewJob("generic", "group work")))
return grp, nil
}
func (g groupWork) OnChange(change ChangeEvent) error {
return nil
}
func TestReactrChainedGroup(t *testing.T) {
r := New()
r.Register("generic", generic{})
doGrp := r.Register("group", groupWork{})
if _, err := doGrp(nil).Then(); err != nil {
t.Error(errors.Wrap(err, "failed to doGrp"))
}
}
|
package http
import (
"net/http"
"github.com/urfave/negroni"
)
type Server struct {
*http.Server
}
func Address() string {
return ":8080"
}
func NewServer() (*Server, error) {
mux := http.NewServeMux()
handler := newHandler("ping")
mux.Handle("/ping", handler)
router := negroni.New()
router.UseHandler(mux)
srv := &http.Server{
Addr: Address(),
Handler: router,
}
return &Server{srv}, nil
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"time"
"github.com/netsec-ethz/conn-tester/client/tests"
"github.com/netsec-ethz/conn-tester/client/tests/httptest"
"github.com/netsec-ethz/conn-tester/client/tests/ntptest"
"github.com/netsec-ethz/conn-tester/client/tests/tcpin"
"github.com/netsec-ethz/conn-tester/client/tests/tcpout"
"github.com/netsec-ethz/conn-tester/client/tests/udpin"
"gopkg.in/alecthomas/kingpin.v2"
)
var (
configFilePath = kingpin.Flag("config", "Name of configuration file").Required().String()
output = kingpin.Flag("output_result", "Should output test result").Bool()
outputFilePath = kingpin.Flag("output_path", "Location to output path").Default("result.json").String()
)
func main() {
rand.Seed(time.Now().UTC().UnixNano())
fmt.Println("--- Starting client application ---")
kingpin.Parse()
factory := tests.CreateTestFactory()
factory.AddTest("ntp_test", ntptest.Create)
factory.AddTest("http_test", httptest.Create)
factory.AddTest("tcp_out", tcpout.Create)
factory.AddTest("tcp_in", tcpin.Create)
factory.AddTest("udp_in", udpin.Create)
testList, err := tests.LoadTests(factory, *configFilePath)
if err != nil {
fmt.Println(err)
}
for _, config := range testList.TestList {
tests.RunTest(config)
}
if *output {
fmt.Printf("Saving test result in %s \n", *outputFilePath)
resultJson, _ := json.Marshal(testList)
err = ioutil.WriteFile(*outputFilePath, resultJson, 0644)
}
}
|
package main
import (
"bufio"
"fmt"
"github.com/codegangsta/cli"
"github.com/ntedeschi/lineage/devpat"
"log"
"os"
"strings"
)
func main() {
log.SetPrefix("lineage ")
log.SetFlags(0)
app := cli.NewApp()
app.Name = "lineage"
app.Usage = "analyze lineage trees"
var minsize int
var percent float64
var cutoff float64
app.Flags = []cli.Flag{
cli.IntFlag{
Name: "minsize, m",
Value: 7,
Usage: "Minimum size of sublineage.",
Destination: &minsize,
},
cli.Float64Flag{
Name: "percent, p",
Value: 50.0,
Usage: "Maximum percentage of parent tree size of sublineage size.",
Destination: &percent,
},
}
app.Commands = []cli.Command{
{
Name: "codev",
Usage: "Calculate codev scores between cell types",
ArgsUsage: "tree celltypes",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "notall",
Usage: "Do not get all sublineages",
},
},
Action: func(c *cli.Context) {
args := c.Args()
if len(args) < 2 {
log.Fatalf("codev: required arguments: tree celltypes")
}
treeFile := args[0]
cellTypeFile := args[1]
cots := codev(treeFile, cellTypeFile, percent, minsize, c.Bool("notall"))
fmt.Println(cots)
},
},
{
Name: "subs",
Usage: "Get the sublineages of a tree",
ArgsUsage: "tree",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "notall",
Usage: "Do not get all sublineages",
},
},
Action: func(c *cli.Context) {
args := c.Args()
if len(args) < 1 {
log.Fatalf("subs: required arguments: tree")
}
treeFile := args[0]
if c.Bool("notall") {
printSubs(treeFile, percent, minsize)
} else {
printAllSubs(treeFile)
}
},
},
{
Name: "fates",
Usage: "Calculate the fates of progenitors of sublineages",
ArgsUsage: "tree xdata markers",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "notall",
Usage: "Do not get all sublineages",
},
cli.Float64Flag{
Name: "cutoff, c",
Value: 1.0,
Usage: "Expression cutoff value to call a marker gene as expressed.",
Destination: &cutoff,
},
},
Action: func(c *cli.Context) {
args := c.Args()
if len(args) < 3 {
log.Fatalf("fates: missing required arguments: tree xdata markers")
}
treeFile := args[0]
xdatFile := args[1]
markerFile := args[2]
if c.Bool("notall") {
fates(treeFile, xdatFile, markerFile, cutoff, percent, minsize, false)
} else {
fates(treeFile, xdatFile, markerFile, cutoff, percent, minsize, true)
}
},
},
}
app.Run(os.Args)
}
func fates(treeFile, xdatFile, markerFile string, cutoff, p float64, min int, all bool) {
tr, err := os.Open(treeFile)
defer tr.Close()
if err != nil {
log.Fatalf("fates: %v", err)
}
xs, err := devpat.ReadXDat(xdatFile)
if err != nil {
log.Fatalf("fates: %v", err)
}
mf, err := os.Open(markerFile)
defer mf.Close()
if err != nil {
log.Fatalf("fates: %v", err)
}
input := bufio.NewScanner(mf)
markers := make([]string, 0)
for input.Scan() {
markers = append(markers, input.Text())
}
if err := input.Err(); err != nil {
log.Fatalf("fates: %v", err)
}
tree, err := devpat.ReadTree(tr)
if err != nil {
log.Fatalf("fates: reading tree: %v", err)
}
var ts []*devpat.Tree
if all {
ts = devpat.AllSublineages(tree)
} else {
ts = devpat.Sublineages(tree, p, min)
}
fates := devpat.Fates(ts, xs, markers, cutoff)
// devpat.PrintFates(fates)
out, err := os.Create("fates.csv")
defer out.Close()
if err != nil {
log.Fatalf("fates: %v", err)
}
devpat.WriteFates(out, fates)
out, err = os.Create("sublineages.csv")
defer out.Close()
if err != nil {
log.Fatalf("fates: %v", err)
}
if _, err := fmt.Fprintln(out, "cell,sublineage"); err != nil {
log.Fatalf("fates: %v", err)
}
for i, t := range ts {
for c := range devpat.GetCells(t) {
_, err = fmt.Fprintf(out, "%s,%d\n", c, i)
if err != nil {
log.Fatalf("fates: %v", err)
}
}
}
}
func printSubs(treeFile string, p float64, min int) {
// Read the tree and get the sublineages.
tr, err := os.Open(treeFile)
defer tr.Close()
if err != nil {
log.Fatalf("subs: %v", err)
}
tree, err := devpat.ReadTree(tr)
if err != nil {
log.Fatalf("subs: reading tree: %v", err)
}
ts := devpat.Sublineages(tree, p, min)
for _, t := range ts {
cs := make([]string, 0)
for c := range devpat.GetCells(t) {
cs = append(cs, c)
}
fmt.Println(strings.Join(cs, ","))
}
}
func printAllSubs(treeFile string) {
// Read the tree and get the sublineages.
tr, err := os.Open(treeFile)
defer tr.Close()
if err != nil {
log.Fatalf("subs: %v", err)
}
tree, err := devpat.ReadTree(tr)
if err != nil {
log.Fatalf("subs: reading tree: %v", err)
}
ts := devpat.AllSublineages(tree)
for _, t := range ts {
cs := make([]string, 0)
for c := range devpat.GetCells(t) {
cs = append(cs, c)
}
fmt.Println(strings.Join(cs, ","))
}
}
func codev(treeFile, cellTypeFile string, p float64, min int, notall bool) devpat.CoTypes {
// Read the tree and get the sublineages.
tr, err := os.Open(treeFile)
if err != nil {
log.Fatalf("codev: %v", err)
}
defer tr.Close()
tree, err := devpat.ReadTree(tr)
if err != nil {
log.Fatalf("codev: reading tree %v", err)
}
var ts []*devpat.Tree
if notall {
ts = devpat.Sublineages(tree, p, min)
} else {
ts = devpat.AllSublineages(tree)
}
// Get the cell to CellType mapping.
cr, err := os.Open(cellTypeFile)
if err != nil {
log.Fatalf("codev: %v", err)
}
defer cr.Close()
cts, err := devpat.ReadCellTypes(cr)
if err != nil {
log.Fatalf("codev: reading cell types: %v", err)
}
// Write the CellTypes in each sublineage.
var subFile string
if notall {
subFile = fmt.Sprintf("sublineages_%.0f_%d.csv", p, min)
} else {
subFile = "sublineages.csv"
}
subf, err := os.Create(subFile)
if err != nil {
log.Fatalf("codev: %v", err)
}
defer subf.Close()
err = devpat.WriteSublineageCellTypes(subf, ts, cts)
if err != nil {
log.Fatalf("codev: writing sublineage cell types: %v", err)
}
// Read the CellType sublineages.
subr, err := os.Open(subFile)
if err != nil {
log.Fatalf("codev: %v", err)
}
defer subr.Close()
subs, err := devpat.ReadCellTypeSublineages(subr)
if err != nil {
log.Fatalf("codev: reading cell type sublineages: %v", err)
}
return devpat.CodevScores(subs)
}
|
package roles
import (
"net/http"
)
// Registry stores links between roles and rights
type Registry struct {
roles map[Role][]Right
}
// NewRegistry returns new role access registry
func NewRegistry() *Registry {
t := Registry{}
t.Reset()
return &t
}
// Router is a http mux
type Router interface {
Get(pattern string, handlerFn http.HandlerFunc)
}
// Right is an enumeration of possible access levels
type Right uint
// Role represents a set of rights
type Role uint
// Reset clears all known roles
func (rg *Registry) Reset() {
rg.roles = make(map[Role][]Right)
}
// RegisterRole allows to configure list of rights for the role
func (rg *Registry) RegisterRole(role Role, rights ...Right) {
rg.roles[role] = rights
}
// NewUser returns new role object
func (rg *Registry) NewUser(id uint, roles ...Role) *User {
return &User{Registry: rg, ID: id, rights: rg.GetRights(roles...)}
}
// GetRights returns all right for the role
func (rg *Registry) GetRights(roles ...Role) []Right {
result := make([]Right, 0)
for _, roleID := range roles {
rights, ok := rg.roles[roleID]
if ok {
result = append(result, rights...)
}
}
return result
}
// CheckRequest helper validates if role behind the request has one of the provided rights
func (rg *Registry) CheckRequest(types ...Right) func(*http.Request) bool {
return func(r *http.Request) bool {
return UserFromContext(r.Context()).Check(types...)
}
}
// GuardRequest middleware, checks role access and redirects to "denied" page when access denied
func (rg *Registry) GuardRequest(redirect string, types ...Right) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ok := UserFromContext(r.Context()).Check(types...)
if !ok {
http.Redirect(w, r, redirect, http.StatusTemporaryRedirect)
return
}
next.ServeHTTP(w, r)
})
}
}
|
package tdpos
import (
"encoding/json"
"testing"
"time"
"github.com/golang/protobuf/proto"
bmock "github.com/xuperchain/xupercore/bcs/consensus/mock"
lpb "github.com/xuperchain/xupercore/bcs/ledger/xledger/xldgpb"
common "github.com/xuperchain/xupercore/kernel/consensus/base/common"
cctx "github.com/xuperchain/xupercore/kernel/consensus/context"
"github.com/xuperchain/xupercore/kernel/consensus/def"
kmock "github.com/xuperchain/xupercore/kernel/consensus/mock"
)
func getTdposConsensusConf() string {
return `{
"timestamp": "1559021720000000000",
"proposer_num": "2",
"period": "3000",
"alternate_interval": "3000",
"term_interval": "6000",
"block_num": "20",
"vote_unit_price": "1",
"init_proposer": {
"1": ["TeyyPLpp9L7QAcxHangtcHTu7HUZ6iydY", "SmJG3rH2ZzYQ9ojxhbRCPwFiE9y6pD1Co"]
}
}`
}
func getBFTTdposConsensusConf() string {
return `{
"timestamp": "1559021720000000000",
"proposer_num": "2",
"period": "3000",
"alternate_interval": "3000",
"term_interval": "6000",
"block_num": "20",
"vote_unit_price": "1",
"init_proposer": {
"1": ["TeyyPLpp9L7QAcxHangtcHTu7HUZ6iydY", "SmJG3rH2ZzYQ9ojxhbRCPwFiE9y6pD1Co"]
},
"bft_config":{}
}`
}
func prepare(config string) (*cctx.ConsensusCtx, error) {
l := kmock.NewFakeLedger([]byte(config))
cCtx, err := bmock.NewConsensusCtx(l)
cCtx.Ledger = l
p, ctxN, err := kmock.NewP2P("node")
p.Init(ctxN)
cCtx.Network = p
return cCtx, err
}
func TestUnmarshalConfig(t *testing.T) {
cStr := getTdposConsensusConf()
_, err := buildConfigs([]byte(cStr))
if err != nil {
t.Error("Config unmarshal err", "err", err)
}
}
func getConfig(config string) def.ConsensusConfig {
return def.ConsensusConfig{
ConsensusName: "tdpos",
Config: config,
StartHeight: 1,
Index: 0,
}
}
func TestNewTdposConsensus(t *testing.T) {
cCtx, err := prepare(getTdposConsensusConf())
if err != nil {
t.Error("prepare error", "error", err)
return
}
i := NewTdposConsensus(*cCtx, getConfig(getTdposConsensusConf()))
if i == nil {
t.Error("NewTdposConsensus error", "conf", getConfig(getTdposConsensusConf()))
return
}
}
func TestCompeteMaster(t *testing.T) {
cCtx, err := prepare(getTdposConsensusConf())
if err != nil {
t.Error("prepare error", "error", err)
return
}
i := NewTdposConsensus(*cCtx, getConfig(getTdposConsensusConf()))
if i == nil {
t.Error("NewTdposConsensus error", "conf", getConfig(getTdposConsensusConf()))
return
}
_, _, err = i.CompeteMaster(3)
if err != nil {
t.Error("CompeteMaster error", "err", err)
}
}
func TestCheckMinerMatch(t *testing.T) {
cCtx, err := prepare(getTdposConsensusConf())
if err != nil {
t.Error("prepare error", "error", err)
return
}
i := NewTdposConsensus(*cCtx, getConfig(getTdposConsensusConf()))
if i == nil {
t.Error("NewTdposConsensus error", "conf", getConfig(getTdposConsensusConf()))
return
}
b3 := kmock.NewBlock(3)
l, _ := cCtx.Ledger.(*kmock.FakeLedger)
l.SetConsensusStorage(1, SetTdposStorage(1, nil))
l.SetConsensusStorage(2, SetTdposStorage(1, nil))
l.SetConsensusStorage(3, SetTdposStorage(1, nil))
c := cCtx.BaseCtx
i.CheckMinerMatch(&c, b3)
}
func TestProcessBeforeMiner(t *testing.T) {
cCtx, err := prepare(getTdposConsensusConf())
if err != nil {
t.Error("prepare error", "error", err)
return
}
i := NewTdposConsensus(*cCtx, getConfig(getTdposConsensusConf()))
if i == nil {
t.Error("NewTdposConsensus error", "conf", getConfig(getTdposConsensusConf()))
return
}
_, _, err = i.ProcessBeforeMiner(time.Now().UnixNano())
if err != ErrTimeoutBlock {
t.Error("ProcessBeforeMiner error", "err", err)
}
}
func TestProcessConfirmBlock(t *testing.T) {
cCtx, err := prepare(getTdposConsensusConf())
if err != nil {
t.Error("prepare error", "error", err)
return
}
i := NewTdposConsensus(*cCtx, getConfig(getTdposConsensusConf()))
if i == nil {
t.Error("NewTdposConsensus error", "conf", getConfig(getTdposConsensusConf()))
return
}
b3 := kmock.NewBlock(3)
l, _ := cCtx.Ledger.(*kmock.FakeLedger)
l.SetConsensusStorage(1, SetTdposStorage(1, nil))
l.SetConsensusStorage(2, SetTdposStorage(1, nil))
l.SetConsensusStorage(3, SetTdposStorage(1, nil))
if err := i.ProcessConfirmBlock(b3); err != nil {
t.Error("ProcessConfirmBlock error", "err", err)
}
}
func SetTdposStorage(term int64, justify *lpb.QuorumCert) []byte {
s := common.ConsensusStorage{
Justify: justify,
CurTerm: term,
CurBlockNum: 3,
}
b, err := json.Marshal(&s)
if err != nil {
return nil
}
return b
}
func justify(height int64) *lpb.QuorumCert {
var m []byte
var err error
if height-1 >= 0 {
parent := &lpb.QuorumCert{
ProposalId: []byte{byte(height - 1)},
ViewNumber: height - 1,
}
m, err = proto.Marshal(parent)
if err != nil {
return nil
}
}
return &lpb.QuorumCert{
ProposalId: []byte{byte(height)},
ViewNumber: height,
ProposalMsg: m,
}
}
func TestBFT(t *testing.T) {
cCtx, err := prepare(getBFTTdposConsensusConf())
if err != nil {
t.Error("prepare error", "error", err)
return
}
i := NewTdposConsensus(*cCtx, getConfig(getBFTTdposConsensusConf()))
if i == nil {
t.Error("NewXpoaConsensus error", "conf", getConfig(getBFTTdposConsensusConf()))
return
}
tdpos, _ := i.(*tdposConsensus)
l, _ := tdpos.election.ledger.(*kmock.FakeLedger)
tdpos.election.address = "dpzuVdosQrF2kmzumhVeFQZa1aYcdgFpN"
// 1, 2区块storage修复
l.SetConsensusStorage(1, SetTdposStorage(1, justify(1)))
l.SetConsensusStorage(2, SetTdposStorage(2, justify(2)))
b3 := kmock.NewBlock(3)
b3.SetTimestamp(1616481092 * int64(time.Millisecond))
b3.SetProposer("TeyyPLpp9L7QAcxHangtcHTu7HUZ6iydY")
l.Put(b3)
l.SetConsensusStorage(3, SetTdposStorage(3, justify(3)))
b33, _ := l.QueryBlockHeaderByHeight(3)
tdpos.CheckMinerMatch(&cCtx.BaseCtx, b33)
tdpos.ProcessBeforeMiner(1616481107 * int64(time.Millisecond))
err = tdpos.ProcessConfirmBlock(b33)
if err != nil {
t.Error("ProcessConfirmBlock error", "err", err)
return
}
}
|
package main
import (
"fmt"
"net/http"
"io/ioutil"
"encoding/xml"
"io"
_"github.com/lib/pq"
"database/sql"
"strconv"
)
const (
DB_USER = "???"
DB_PASSWORD = "???"
DB_NAME = "???"
DB_HOST = "localhost"
CAMINHO = "/arquivamento/???/"
CAMINHO_ERROR = "/arquivamento/error/"
)
type Exames struct {
CodigoClinica string `xml:"codigoClinica"`
Codigo string `xml:"codigo"`
Descricao string `xml:"descricao"`
CrmMedicoRealizante string `xml:"crmMedicoRealizante"`
CrmUfMedicoRealizante string `xml:"crmUfMedicoRealizante"`
NomeRealizante string `xml:"nomeRealizante"`
Datahora string `xml:"datahora"`
Arquivo string `xml:"arquivo"`
}
type Resultados struct {
AccessionNumber string `xml:"accessionNumber"`
PacienteId string `xml:"pacienteid"`
Nome string `xml:"nome"`
DataNascimento string `xml:"dataNascimento"`
Sexo string `xml:"sexo"`
Xml string `xml:",innerxml"`
Exame []Exames `xml:"exames>exame"`
}
type Retorno struct {
XMLName xml.Name `xml:"RetornaProcedimentoResponse"`
RetornaProcedimentoResponse bool `xml:RetornaProcedimento"`
Xml string `xml:",innerxml"`
}
func gravar_arquivo(post []byte,arquivo string) {
// output, err := xml.Marshal(&post)
err := ioutil.WriteFile(CAMINHO + arquivo, post, 0644)
if err != nil {
fmt.Println("Error writing XML to file:", err)
// return
}
}
func gravar_banco(post Resultados) {
dbinfo := fmt.Sprintf("user=%s password=%s dbname=%s sslmode=disable host=%s",
DB_USER, DB_PASSWORD, DB_NAME,DB_HOST)
db, err := sql.Open("postgres", dbinfo)
if err != nil {
fmt.Println("Erro ao conectar banco:",err)
return
}
cd_exame,_ := strconv.Atoi(post.Exame[0].Codigo)
_,err = db.Exec(`insert into aux_laudos_pacs(accession_number,
laudo_datahora,
paciente_id,
laudo_conteudo,
medico_responsavel_conselho_numero,
medico_responsavel_conselho_uf,
medico_responsavel_nome,
ds_paciente,
ds_procedimento,
cd_exame)
values($1,$2,$3,$4,$5,$6,$7,$8,$9,$10);`,
post.AccessionNumber,
post.Exame[0].Datahora,
post.PacienteId,
post.Exame[0].Arquivo,
post.Exame[0].CrmMedicoRealizante,
post.Exame[0].CrmUfMedicoRealizante,
post.Exame[0].NomeRealizante,
post.Nome,
post.Exame[0].Descricao,
cd_exame)
if err != nil {
panic(err)
}
}
func hello(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello!")
}
func integracao(w http.ResponseWriter, r *http.Request) {
// fmt.Fprintf(w, "World!")
// ffmt.Println(r)
// body, _ := ioutil.ReadAll(r.Body)
// fmt.Println(string(body))
decoder := xml.NewDecoder(r.Body)
for {
t, err := decoder.Token()
if err == io.EOF {
break
}
if err != nil {
fmt.Println("Error decoding XML into tokens:", err)
// return
}
switch se := t.(type) {
case xml.StartElement:
if se.Name.Local == "Resultados" {
var result Resultados
decoder.DecodeElement(&result, &se)
fmt.Println(result.Xml)
gravar_arquivo([]byte(result.Xml),result.AccessionNumber)
gravar_banco(result)
}
}
}
// var retorno Retorno;
//retorno.RetornaProcedimentoResponse = true
retorno := Retorno{RetornaProcedimentoResponse : true,}
output,err := xml.Marshal(&retorno)
if err != nil {
fmt.Println("Error marshalling to XML:", err)
}
err = ioutil.WriteFile("post-retorno.xml", output, 0644)
if err != nil {
fmt.Println("Error writing XML to file:", err)
}
fmt.Fprint(w,string(output))
//xml.Unmarshal(body, &result)
// fmt.Println(result)
}
func main() {
/* server := http.Server{
Addr: "127.0.0.1:8081",
}
*/
http.HandleFunc("/integracao/integracao.php", hello)
http.HandleFunc("?????", integracao)
http.ListenAndServe(":80", nil)
// server.ListenAndServe()
}
|
// +build go1.7
package apns2
import (
"context"
"net/http"
"net/http/httputil"
log "github.com/sirupsen/logrus"
)
// A Context carries a deadline, a cancellation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
context.Context
}
func (c *Client) requestWithContext(ctx Context, req *http.Request) (*http.Response, error) {
if ctx != nil {
req = req.WithContext(ctx)
}
reqDump, _ := httputil.DumpRequestOut(req, true)
resp, err := c.HTTPClient.Do(req)
if err != nil {
return nil, err
}
respDump, _ := httputil.DumpResponse(resp, true)
log.WithFields(log.Fields{
"obj": log.Fields{
"request": string(reqDump),
"response": string(respDump),
},
}).Debugf("send request to %s", req.URL.Host)
return resp, nil
}
|
package vis
/*MyName is visible from main.go because the
first letter of the variable is capitalized, unlike
yourName which is not visible.*/
var MyName = "Raffi"
var yourName = "Test" |
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"io"
"strings"
"text/template"
"github.com/google/gapid/core/text/reflow"
)
// EntryParameter represents a parameter for a specific stringtable entry.
type EntryParameter struct {
Identifier string // Name of the parameter.
Type string // Type of the parameter. Default is "string".
}
// Entry represents a single stringtable entry.
type Entry struct {
Key string // Name of the stringtable key.
Parameters []EntryParameter // List of parameters.
}
// EntryList is a sortable slice of entries.
type EntryList []Entry
// Len returns the number of elements in the list.
func (l EntryList) Len() int { return len(l) }
// Less returns true if the i'th element's key is less than the j'th element's
// key.
func (l EntryList) Less(i, j int) bool { return l[i].Key < l[j].Key }
// Swap switches the i'th and j'th element.
func (l EntryList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
// tmplCtx is the template execution context.
type tmplCtx struct {
Entries []Entry
Package string
Copyright string
}
// Execute executes the template with the given list of entries.
func Execute(templateRoutine, templateStr, pkg string, entries []Entry, w io.Writer) error {
funcs := map[string]interface{}{
"ToCamel": underscoredToCamel,
}
tmpl, err := template.New(templateRoutine).Funcs(funcs).Parse(templateStr)
if err != nil {
return err
}
copyright := `////////////////////////////////////////////////////////////////////////////////
// Do not modify!
// Generated by stringgen
////////////////////////////////////////////////////////////////////////////////
`
ctx := tmplCtx{
Entries: entries,
Package: pkg,
Copyright: copyright,
}
r := reflow.New(w)
r.Indent = "\t"
defer r.Flush()
return tmpl.Execute(r, ctx)
}
func underscoredToCamel(s string) string {
parts := strings.Split(s, "_")
for i := range parts {
parts[i] = strings.Title(strings.ToLower(parts[i]))
}
return strings.Join(parts, "")
}
|
package solutions
func candy(ratings []int) int {
length := len(ratings)
if length == 0 {
return 0
}
totalCandies := 1
leftNeighbour := 1
for i := 1; i < length; {
if ratings[i] > ratings[i - 1] {
leftNeighbour = leftNeighbour + 1
totalCandies += leftNeighbour
i++
} else if ratings[i] == ratings[i-1] {
leftNeighbour = 1
totalCandies += 1
i++
} else {
count := 0
for ; i < length && ratings[i - 1] > ratings[i]; i++ {
count++
totalCandies += count
}
if count >= leftNeighbour {
totalCandies += count - leftNeighbour + 1
}
leftNeighbour = 1
}
}
return totalCandies
}
|
package main
import (
"bytes"
"encoding/base64"
"flag"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/google/martian/v3"
martianLog "github.com/google/martian/v3/log"
"github.com/google/martian/v3/mitm"
log "github.com/sirupsen/logrus"
)
var (
port = flag.Int("port", 8888, "listen http port")
secret,_ = base64.StdEncoding.DecodeString("Nlc2SUNTTUFRVElBTENVSTcySFQ0R0tXR1NCUVNNQ0U=")
)
func init() {
martianLog.SetLevel(martianLog.Error)
flag.Parse()
}
func main() {
p := martian.NewProxy()
defer p.Close()
ca, privateKey, _ := mitm.NewAuthority("name", "org", 24*365*time.Hour)
conf, _ := mitm.NewConfig(ca, privateKey)
p.SetMITM(conf)
//proxy, _ := url.Parse("http://localhost:8080")
//p.SetDownstreamProxy(proxy)
l, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port))
if err != nil {
log.Fatal(err)
}
log.Infof("starting listen on %s", l.Addr().String())
p.SetRequestModifier(new(T))
p.SetResponseModifier(new(R))
go p.Serve(l)
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
<-sigCh
}
type R struct {
martian.ResponseModifier
}
func (R) ModifyResponse(resp *http.Response) error {
res := resp
otp := []byte(getotp())
if res.Body == nil{
res.Body = http.NoBody
res.ContentLength = 0
*resp = *res
}else{
b, err := ioutil.ReadAll(resp.Body) //Read html
fmt.Println("encResp: "+string(b))
if err != nil || string(b) == ""{
res.Body = http.NoBody
res.ContentLength = 0
*resp = *res
return nil
}
resp.Body.Close()
b64Resp,err := base64.StdEncoding.DecodeString(string(b)[12:])
if err != nil || string(b64Resp) == ""{
res.Body = http.NoBody
res.ContentLength = 0
*resp = *res
return nil
}
rawResp,err := decrypt(b64Resp,otp)
if err != nil || string(rawResp) == ""{
res.Body = http.NoBody
res.ContentLength = 0
*resp = *res
return nil
}
fmt.Println("rawResp: "+string(rawResp))
body := ioutil.NopCloser(bytes.NewReader(rawResp))
res.Body = body
res.ContentLength = int64(len(rawResp))
*resp = *res
}
return nil
}
type T struct {
martian.RequestModifier
}
func (T) ModifyRequest(req *http.Request) error {
body, _ := ioutil.ReadAll(req.Body)
fmt.Println("rawBody: "+string(body))
u, _ := url.Parse(req.URL.String())
hostu := u.Scheme+"://"+u.Host+"/"
fmt.Println("rawHost: "+hostu)
fmt.Println("rawURI: "+u.Path[1:])
cipher,_ := encrypt(body,[]byte(getotp()))
encBody := "%25PDF-1.7+ " + base64.StdEncoding.EncodeToString(cipher)
fmt.Println("encBody: "+encBody)
uri,_ := encrypt([]byte(u.Path[1:]),[]byte(getotp()))
encodeuri := base64.RawURLEncoding.EncodeToString(uri)
fmt.Println("encURL: "+hostu + encodeuri)
newReq, _ := http.NewRequest(req.Method, hostu + encodeuri, strings.NewReader(encBody))
newReq.URL , _ = url.Parse(hostu + encodeuri)
newReq.Header = req.Header
rawCookie := req.Header.Get("Cookie")
fmt.Println("rawCookies: " + rawCookie)
encCookieByte,_ := encrypt([]byte(rawCookie),[]byte(getotp()))
encCookie := base64.RawURLEncoding.EncodeToString(encCookieByte)
fmt.Println("encCookie: tz=America%2FLos_Angeles; _gh_sess=" + encCookie + "\n\n")
newReq.Header.Set("Cookie", "tz=America%2FLos_Angeles; _gh_sess="+encCookie)
//newReq.Header.Set("Url", req.URL.String())
*req = *newReq
return nil
}
|
package main
import (
"fmt"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/mysql"
)
//定义数据模型
type User struct {
ID int64
Name string `gorm:"default:'noname'"`
Age int64
}
//在字段有默认值的情况下仍要传入0值
//方法1 使用指针
// type User struct {
// ID int64
// Name ×string `gorm:"default:'noname'"`
// Age int64
// }
// u:=User{Name:new(string),Age:18)}
// db.Create(&u)
//
//方法2 使用Scanner/Valuer
// type User struct {
// ID int64
// Name sql.NullString `gorm:"default:'noname'"`
// Age int64
// }
// u:=User{Name:sql.NullString("",true),Age:18)}
// db.Create(&u)
func main() {
//链接数据库
db, err := gorm.Open("mysql", "root:123456@(127.0.0.1:3306)/mytest?charset=utf8mb4&parseTime=True&loc=Local")
if err != nil {
panic(err)
}
defer db.Close()
//把模型和数据库中的表对应
db.AutoMigrate(&User{})
//创建数据
u1 := User{Name: "tom", Age: 18}
u2 := User{Age: 33}
//创建数据
//判断主键是否为空,true表示为空,false表示已经存在
fmt.Println("创建前主键的状态=", db.NewRecord(u1))
db.Create(&u1)
fmt.Println("创建后主键的状态=", db.NewRecord(u1))
db.Create(&u2)
}
|
package mill
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/google/skylark"
"github.com/windmilleng/pets/internal/proc"
"github.com/windmilleng/pets/internal/school"
"github.com/windmilleng/pets/internal/service"
"github.com/windmilleng/wmclient/pkg/dirs"
)
func TestPrint(t *testing.T) {
f := newPetFixture(t)
defer f.tearDown()
petsitter, stdout := f.petsitter, f.stdout
file := filepath.Join(f.dir, "Petsfile")
ioutil.WriteFile(file, []byte(`print("hello")`), os.FileMode(0777))
err := petsitter.ExecFile(file)
if err != nil {
t.Fatal(err)
}
out := stdout.String()
if out != "hello\n" {
t.Errorf("Expected 'hello'. Actual: %s", out)
}
}
func TestPrintFail(t *testing.T) {
f := newPetFixture(t)
defer f.tearDown()
petsitter, stdout := f.petsitter, f.stdout
file := filepath.Join(f.dir, "Petsfile")
ioutil.WriteFile(file, []byte(`print(hello)`), os.FileMode(0777))
err := petsitter.ExecFile(file)
out := stdout.String()
if !(out == "" && strings.Contains(err.Error(), "undefined: hello")) {
t.Errorf("Expected 'hello'. Actual: %s. Err: %s", out, err)
}
}
func TestRun(t *testing.T) {
f := newPetFixture(t)
defer f.tearDown()
petsitter, stdout := f.petsitter, f.stdout
file := filepath.Join(f.dir, "Petsfile")
ioutil.WriteFile(file, []byte(`run("echo meow")`), os.FileMode(0777))
err := petsitter.ExecFile(file)
if err != nil {
t.Fatal(err)
}
out := stdout.String()
if out != "meow\n" {
t.Errorf("Expected 'meow'. Actual: %s", out)
}
}
func TestDryRun(t *testing.T) {
f := newPetFixture(t)
f.petsitter.DryMode = true
defer f.tearDown()
petsitter, stdout, stderr := f.petsitter, f.stdout, f.stderr
file := filepath.Join(f.dir, "Petsfile")
ioutil.WriteFile(file, []byte(`run("echo meow")`), os.FileMode(0777))
err := petsitter.ExecFile(file)
if err != nil {
t.Fatal(err)
}
out := stdout.String()
if out != "" {
t.Errorf("Expected 'meow'. Actual: %s", out)
}
er := stderr.String()
if !strings.Contains(er, "meow") {
t.Errorf("Expected 'pets ran \"echo meow\"'. Actual: %s", f.stderr.String())
}
}
func TestStart(t *testing.T) {
f := newPetFixture(t)
defer f.tearDown()
petsitter, stdout := f.petsitter, f.stdout
file := filepath.Join(f.dir, "Petsfile")
ioutil.WriteFile(file, []byte(`print(start("sleep 10"))`), os.FileMode(0777))
err := petsitter.ExecFile(file)
if err != nil {
t.Fatal(err)
}
out := stdout.String()
if !strings.Contains(out, "pid") {
t.Errorf("Expected 'meow'. Actual: %s", out)
}
}
func TestStartLogs(t *testing.T) {
f := newPetFixture(t)
defer f.tearDown()
petsitter := f.petsitter
file := filepath.Join(f.dir, "Petsfile")
ioutil.WriteFile(file, []byte(`start("echo meow")`), os.FileMode(0777))
err := petsitter.ExecFile(file)
if err != nil {
t.Fatal(err)
}
time.Sleep(10 * time.Millisecond)
contents, err := f.procfs.ReadLogFile(service.Key{})
if err != nil {
t.Fatal(err)
}
if contents != "meow\n" {
t.Errorf("Expected 'meow'. Actual: %s", contents)
}
}
func TestStartLogsInService(t *testing.T) {
f := newPetFixture(t)
defer f.tearDown()
petsitter := f.petsitter
file := filepath.Join(f.dir, "Petsfile")
// nc -lk PORT
// is unix-speak for "Create the dumbest possible server that just listens on PORT"
ioutil.WriteFile(file, []byte(`
def start_local():
return service(start("echo meow; nc -lk 28234"), "localhost", 28234)
register("frontend", "local", start_local)`), os.FileMode(0777))
err := petsitter.ExecFile(file)
if err != nil {
t.Fatal(err)
}
school := f.petsitter.School
key := service.NewKey("frontend", "local")
_, err = school.UpByKey(key)
if err != nil {
t.Fatal(err)
}
time.Sleep(10 * time.Millisecond)
contents, err := f.procfs.ReadLogFile(key)
if err != nil {
t.Fatal(err)
}
if contents != "meow\n" {
t.Errorf("Expected 'meow'. Actual: %s", contents)
}
}
func TestLoadGoGet(t *testing.T) {
f := newPetFixture(t)
defer f.tearDown()
petsitter, stdout := f.petsitter, f.stdout
file := filepath.Join(f.dir, "Petsfile")
ioutil.WriteFile(file, []byte(`
load("go-get://github.com/windmilleng/blorg-frontend", blorg_fe_dir="dir")
print(blorg_fe_dir)
`), os.FileMode(0777))
err := petsitter.ExecFile(file)
if err != nil {
t.Fatal(err)
}
out := stdout.String()
if !strings.Contains(out, "github.com/windmilleng/blorg-frontend") {
t.Errorf("Expected import 'blorg-frontend'. Actual: %s", out)
}
}
func TestLoadRelative(t *testing.T) {
f := newPetFixture(t)
defer f.tearDown()
petsitter, stdout := f.petsitter, f.stdout
file := filepath.Join(f.dir, "Petsfile")
ioutil.WriteFile(file, []byte(`
load("inner", "random_number")
print(random_number())
`), os.FileMode(0777))
innerFile := filepath.Join(f.dir, "inner", "Petsfile")
os.MkdirAll(filepath.Dir(innerFile), os.FileMode(0777))
ioutil.WriteFile(innerFile, []byte(`
def random_number():
return 4
`), os.FileMode(0777))
err := petsitter.ExecFile(file)
if err != nil {
t.Fatal(err)
}
out := stdout.String()
if out != "4\n" {
t.Errorf("Expected print '4'. Actual: %s", out)
}
}
// If we load a file twice (which is easy to do when you have dependency diamonds),
// we should only execute it once.
func TestLoadTwice(t *testing.T) {
f := newPetFixture(t)
defer f.tearDown()
petsitter, stdout := f.petsitter, f.stdout
file := filepath.Join(f.dir, "Petsfile")
ioutil.WriteFile(file, []byte(`
load("inner", "random_number")
load("inner", "dir")
print(random_number())
`), os.FileMode(0777))
innerFile := filepath.Join(f.dir, "inner", "Petsfile")
os.MkdirAll(filepath.Dir(innerFile), os.FileMode(0777))
ioutil.WriteFile(innerFile, []byte(`
def random_number():
return 4
print('loaded')
`), os.FileMode(0777))
err := petsitter.ExecFile(file)
if err != nil {
t.Fatal(err)
}
out := stdout.String()
if out != "loaded\n4\n" {
t.Errorf("Expected print 'loaded\n4'. Actual: %s", out)
}
}
// If there's a cycle between load graphs, we should detect this
// and be able to print the cycle.
func TestLoadCycle(t *testing.T) {
f := newPetFixture(t)
defer f.tearDown()
petsitter := f.petsitter
file := filepath.Join(f.dir, "Petsfile")
ioutil.WriteFile(file, []byte(`
load("inner", "dir")
`), os.FileMode(0777))
innerFile := filepath.Join(f.dir, "inner", "Petsfile")
os.MkdirAll(filepath.Dir(innerFile), os.FileMode(0777))
ioutil.WriteFile(innerFile, []byte(`
load("../", "dir")
`), os.FileMode(0777))
err := petsitter.ExecFile(file)
evalErr, isEvalErr := err.(*skylark.EvalError)
if !isEvalErr || !strings.Contains(evalErr.Error(), "cycle in load graph detected") {
t.Errorf("Expected EvalError with cycle. Actual: %v", err)
}
}
func TestLoadRelativeWorkingDirectory(t *testing.T) {
f := newPetFixture(t)
defer f.tearDown()
petsitter, stdout := f.petsitter, f.stdout
file := filepath.Join(f.dir, "Petsfile")
ioutil.WriteFile(file, []byte(`
load("inner", "inner_pwd")
run("pwd")
inner_pwd()
`), os.FileMode(0777))
innerFile := filepath.Join(f.dir, "inner", "Petsfile")
os.MkdirAll(filepath.Dir(innerFile), os.FileMode(0777))
ioutil.WriteFile(innerFile, []byte(`
def inner_pwd():
run("pwd")
`), os.FileMode(0777))
err := petsitter.ExecFile(file)
if err != nil {
t.Fatal(err)
}
out := stdout.String()
dir, err := filepath.EvalSymlinks(f.dir)
if err != nil {
t.Fatal(err)
}
expected := fmt.Sprintf("%s\n%s/inner\n", dir, dir)
if out != expected {
t.Errorf("Expected:\n%s\n\nActual:\n%s", expected, out)
}
}
func TestRegister(t *testing.T) {
f := newPetFixture(t)
petsitter := f.petsitter
file := filepath.Join(f.dir, "Petsfile")
ioutil.WriteFile(file, []byte(`
def start_local():
result = service(start("nc -lk 28234"), "localhost", 28234)
print(result["host"])
return result
register("blorg-frontend", "local", start_local)
`), os.FileMode(0777))
err := petsitter.ExecFile(file)
if err != nil {
t.Fatal(err)
}
school := f.petsitter.School
key := service.NewKey("blorg-frontend", "local")
_, err = school.UpByKey(key)
if err != nil {
t.Fatal(err)
}
f.assertHasServiceKey(key)
out := f.stdout.String()
if !strings.Contains(out, "localhost:28234") {
t.Errorf("Expected 'localhost:28234'. Actual: %s", out)
}
}
func TestRegisterTwice(t *testing.T) {
f := newPetFixture(t)
petsitter := f.petsitter
file := filepath.Join(f.dir, "Petsfile")
ioutil.WriteFile(file, []byte(`
def start_local():
return service(start("nc -lk 8080"), "localhost", 8080)
register("blorg-frontend", "local", start_local)
register("blorg-frontend", "local", start_local)
`), os.FileMode(0777))
err := petsitter.ExecFile(file)
if err == nil ||
!strings.Contains(err.Error(), "Duplicate provider") ||
!strings.Contains(err.Error(), fmt.Sprintf("First: %s/Petsfile:5", f.dir)) {
t.Errorf("Expected duplicate provider error. Actual: %v", err)
}
}
func TestHealthCheck(t *testing.T) {
f := newPetFixture(t)
petsitter := f.petsitter
file := filepath.Join(f.dir, "Petsfile")
ioutil.WriteFile(file, []byte(`
def start_local():
return service(start("echo meow"), "localhost", 21345)
register("blorg-frontend", "local", start_local)
`), os.FileMode(0777))
err := petsitter.ExecFile(file)
if err != nil {
t.Fatal(err)
}
school := f.petsitter.School
key := service.NewKey("blorg-frontend", "local")
_, err = school.UpByKey(key)
if err == nil ||
!strings.Contains(err.Error(), "Process died without opening a network connection") ||
!strings.Contains(err.Error(), "blorg-frontend-local logs") ||
!strings.Contains(err.Error(), "meow") {
t.Errorf("Expected health check error. Actual: %v", err)
}
}
type petFixture struct {
t *testing.T
petsitter *Petsitter
stdout *bytes.Buffer
stderr *bytes.Buffer
dir string
procfs proc.ProcFS
}
func newPetFixture(t *testing.T) *petFixture {
stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
dir, _ := ioutil.TempDir("", t.Name())
wmDir := dirs.NewWindmillDirAt(dir)
procfs, err := proc.NewProcFSWithDir(wmDir)
drymode := false
if err != nil {
t.Fatal(err)
}
runner := proc.NewRunner(procfs)
school := school.NewPetSchool(procfs)
return &petFixture{
t: t,
petsitter: NewPetsitter(stdout, stderr, runner, procfs, school, drymode),
stdout: stdout,
stderr: stderr,
dir: dir,
procfs: procfs,
}
}
func (f *petFixture) assertHasServiceKey(key service.Key) {
procs, err := f.procfs.ProcsFromFS()
if err != nil {
f.t.Fatal(err)
}
for _, proc := range procs {
if proc.ServiceKey() == key {
return
}
}
f.t.Errorf("Service key not found in running service list: %+v", key)
}
func (f *petFixture) tearDown() {
f.procfs.KillAllForTesting()
os.RemoveAll(f.dir)
}
|
package ravendb
import (
"bytes"
"io"
"net/http"
"strings"
)
// Note: the implementation details are different from Java
// We take advantage of a pipe: a read end is passed as io.Reader
// to the request. A write end is what we use to write to the request.
var _ RavenCommand = &BulkInsertCommand{}
// BulkInsertCommand describes build insert command
type BulkInsertCommand struct {
RavenCommandBase
stream io.Reader
id int64
useCompression bool
Result *http.Response
}
// NewBulkInsertCommand returns new BulkInsertCommand
func NewBulkInsertCommand(id int64, stream io.Reader, useCompression bool) *BulkInsertCommand {
cmd := &BulkInsertCommand{
RavenCommandBase: NewRavenCommandBase(),
stream: stream,
id: id,
useCompression: useCompression,
}
return cmd
}
func (c *BulkInsertCommand) CreateRequest(node *ServerNode) (*http.Request, error) {
url := node.URL + "/databases/" + node.Database + "/bulk_insert?id=" + i64toa(c.id)
// TODO: implement compression. It must be attached to the writer
//message.setEntity(useCompression ? new GzipCompressingEntity(_stream) : _stream)
return newHttpPostReader(url, c.stream)
}
func (c *BulkInsertCommand) SetResponse(response []byte, fromCache bool) error {
return newNotImplementedError("Not implemented")
}
func (c *BulkInsertCommand) Send(client *http.Client, req *http.Request) (*http.Response, error) {
base := c.GetBase()
rsp, err := base.Send(client, req)
if err != nil {
// TODO: don't know how/if this translates to Go
// c.stream.errorOnRequestStart(err)
return nil, err
}
return rsp, nil
}
// BulkInsertOperation represents bulk insert operation
type BulkInsertOperation struct {
generateEntityIDOnTheClient *generateEntityIDOnTheClient
requestExecutor *RequestExecutor
bulkInsertExecuteTask *completableFuture
reader *io.PipeReader
currentWriter *io.PipeWriter
first bool
operationID int64
useCompression bool
concurrentCheck atomicInteger
conventions *DocumentConventions
err error
Command *BulkInsertCommand
}
// NewBulkInsertOperation returns new BulkInsertOperation
func NewBulkInsertOperation(database string, store *DocumentStore) *BulkInsertOperation {
re := store.GetRequestExecutor(database)
f := func(entity interface{}) (string, error) {
return re.GetConventions().GenerateDocumentID(database, entity)
}
reader, writer := io.Pipe()
res := &BulkInsertOperation{
conventions: store.GetConventions(),
requestExecutor: re,
generateEntityIDOnTheClient: newGenerateEntityIDOnTheClient(re.GetConventions(), f),
reader: reader,
currentWriter: writer,
operationID: -1,
first: true,
}
return res
}
func (o *BulkInsertOperation) throwBulkInsertAborted(e error, flushEx error) error {
err := error(o.getErrorFromOperation())
if err == nil {
err = e
}
if err == nil {
err = flushEx
}
return newBulkInsertAbortedError("Failed to execute bulk insert, error: %s", err)
}
func (o *BulkInsertOperation) getErrorFromOperation() error {
stateRequest := NewGetOperationStateCommand(o.requestExecutor.GetConventions(), o.operationID)
err := o.requestExecutor.ExecuteCommand(stateRequest, nil)
if err != nil {
return err
}
status, _ := jsonGetAsText(stateRequest.Result, "Status")
if status != "Faulted" {
return nil
}
if result, ok := stateRequest.Result["Result"]; ok {
if result, ok := result.(map[string]interface{}); ok {
typ, _ := jsonGetAsString(result, "$type")
if strings.HasPrefix(typ, "Raven.Client.Documents.Operations.OperationExceptionResult") {
errStr, _ := jsonGetAsString(result, "Error")
return newBulkInsertAbortedError(errStr)
}
}
}
return nil
}
// WaitForID waits for operation id to finish
func (o *BulkInsertOperation) WaitForID() error {
if o.operationID != -1 {
return nil
}
bulkInsertGetIDRequest := NewGetNextOperationIDCommand()
o.err = o.requestExecutor.ExecuteCommand(bulkInsertGetIDRequest, nil)
if o.err != nil {
return o.err
}
o.operationID = bulkInsertGetIDRequest.Result
return nil
}
// StoreWithID stores an entity with a given id
func (o *BulkInsertOperation) StoreWithID(entity interface{}, id string, metadata *MetadataAsDictionary) error {
if !o.concurrentCheck.compareAndSet(0, 1) {
return newIllegalStateError("Bulk Insert Store methods cannot be executed concurrently.")
}
defer o.concurrentCheck.set(0)
// early exit if we failed previously
if o.err != nil {
return o.err
}
err := bulkInsertOperationVerifyValidID(id)
if err != nil {
return err
}
o.err = o.WaitForID()
if o.err != nil {
return o.err
}
o.err = o.ensureCommand()
if o.err != nil {
return o.err
}
if o.bulkInsertExecuteTask.IsCompletedExceptionally() {
_, err = o.bulkInsertExecuteTask.Get()
panicIf(err == nil, "err should not be nil")
return o.throwBulkInsertAborted(err, nil)
}
if metadata == nil {
metadata = &MetadataAsDictionary{}
}
if !metadata.ContainsKey(MetadataCollection) {
collection := o.requestExecutor.GetConventions().getCollectionName(entity)
if collection != "" {
metadata.Put(MetadataCollection, collection)
}
}
if !metadata.ContainsKey(MetadataRavenGoType) {
goType := o.requestExecutor.GetConventions().getGoTypeName(entity)
if goType != "" {
metadata.Put(MetadataRavenGoType, goType)
}
}
documentInfo := &documentInfo{}
documentInfo.metadataInstance = metadata
jsNode := convertEntityToJSON(entity, documentInfo)
var b bytes.Buffer
if o.first {
b.WriteByte('[')
o.first = false
} else {
b.WriteByte(',')
}
m := map[string]interface{}{}
m["Id"] = o.escapeID(id)
m["Type"] = "PUT"
m["Document"] = jsNode
d, err := jsonMarshal(m)
if err != nil {
return err
}
b.Write(d)
_, o.err = o.currentWriter.Write(b.Bytes())
if o.err != nil {
err = o.getErrorFromOperation()
if err != nil {
o.err = err
return o.err
}
// TODO:
//o.err = o.throwOnUnavailableStream()
return o.err
}
return o.err
}
func (o *BulkInsertOperation) escapeID(input string) string {
if !strings.Contains(input, `"`) {
return input
}
var res bytes.Buffer
for i := 0; i < len(input); i++ {
c := input[i]
if c == '"' {
if i == 0 || input[i-1] != '\\' {
res.WriteByte('\\')
}
}
res.WriteByte(c)
}
return res.String()
}
func (o *BulkInsertOperation) ensureCommand() error {
if o.Command != nil {
return nil
}
bulkCommand := NewBulkInsertCommand(o.operationID, o.reader, o.useCompression)
panicIf(o.bulkInsertExecuteTask != nil, "already started _bulkInsertExecuteTask")
o.bulkInsertExecuteTask = newCompletableFuture()
go func() {
err := o.requestExecutor.ExecuteCommand(bulkCommand, nil)
if err != nil {
o.bulkInsertExecuteTask.completeWithError(err)
} else {
o.bulkInsertExecuteTask.complete(nil)
}
}()
o.Command = bulkCommand
return nil
}
// Abort aborts insert operation
func (o *BulkInsertOperation) Abort() error {
if o.operationID == -1 {
return nil // nothing was done, nothing to kill
}
if err := o.WaitForID(); err != nil {
return err
}
command, err := NewKillOperationCommand(i64toa(o.operationID))
if err != nil {
return err
}
err = o.requestExecutor.ExecuteCommand(command, nil)
if err != nil {
if _, ok := err.(*RavenError); ok {
return newBulkInsertAbortedError("Unable to kill ths bulk insert operation, because it was not found on the server.")
}
return err
}
return nil
}
// Close closes operation
func (o *BulkInsertOperation) Close() error {
if o.operationID == -1 {
// closing without calling a single Store.
return nil
}
d := []byte{']'}
_, err := o.currentWriter.Write(d)
errClose := o.currentWriter.Close()
if o.bulkInsertExecuteTask != nil {
_, err2 := o.bulkInsertExecuteTask.Get()
if err2 != nil && err == nil {
err = o.throwBulkInsertAborted(err, errClose)
}
}
if err != nil {
o.err = err
return err
}
return nil
}
// Store schedules entity for storing and returns its id. metadata can be nil
func (o *BulkInsertOperation) Store(entity interface{}, metadata *MetadataAsDictionary) (string, error) {
var err error
var id string
if metadata == nil || !metadata.ContainsKey(MetadataID) {
if id, err = o.GetID(entity); err != nil {
return "", err
}
} else {
idVal, ok := metadata.Get(MetadataID)
panicIf(!ok, "didn't find %s key in meatadata", MetadataID)
id = idVal.(string)
}
return id, o.StoreWithID(entity, id, metadata)
}
// GetID returns id for an entity
func (o *BulkInsertOperation) GetID(entity interface{}) (string, error) {
var err error
idRef, ok := o.generateEntityIDOnTheClient.tryGetIDFromInstance(entity)
if ok {
return idRef, nil
}
idRef, err = o.generateEntityIDOnTheClient.generateDocumentKeyForStorage(entity)
if err != nil {
return "", err
}
// set id property if it was null
o.generateEntityIDOnTheClient.trySetIdentity(entity, idRef)
return idRef, nil
}
func (o *BulkInsertOperation) throwOnUnavailableStream(id string, innerEx error) error {
// TODO: don't know how/if this translates to Go
//_streamExposerContent.errorOnProcessingRequest(new BulkInsertAbortedError("Write to stream failed at document with id " + id, innerEx))
_, err := o.bulkInsertExecuteTask.Get()
if err != nil {
return unwrapError(err)
}
return nil
}
func bulkInsertOperationVerifyValidID(id string) error {
if stringIsEmpty(id) {
return newIllegalStateError("Document id must have a non empty value")
}
if strings.HasSuffix(id, "|") {
return newUnsupportedOperationError("Document ids cannot end with '|', but was called with %s", id)
}
return nil
}
|
package ucfunnel
import (
"encoding/json"
"fmt"
"testing"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/adapters"
"github.com/prebid/prebid-server/config"
"github.com/prebid/prebid-server/openrtb_ext"
)
func TestMakeRequests(t *testing.T) {
imp := openrtb2.Imp{
ID: "1234",
Banner: &openrtb2.Banner{},
}
imp2 := openrtb2.Imp{
ID: "1235",
Video: &openrtb2.Video{},
}
imp3 := openrtb2.Imp{
ID: "1236",
Audio: &openrtb2.Audio{},
}
imp4 := openrtb2.Imp{
ID: "1237",
Native: &openrtb2.Native{},
}
imp5 := openrtb2.Imp{
ID: "1237",
Native: &openrtb2.Native{},
}
internalRequest01 := openrtb2.BidRequest{Imp: []openrtb2.Imp{}}
internalRequest02 := openrtb2.BidRequest{Imp: []openrtb2.Imp{imp, imp2, imp3, imp4, imp5}}
internalRequest03 := openrtb2.BidRequest{Imp: []openrtb2.Imp{imp, imp2, imp3, imp4, imp5}}
internalRequest03.Imp[0].Ext = []byte(`{"bidder": {"adunitid": "ad-488663D474E44841E8A293379892348","partnerid": "par-7E6D2DB9A8922AB07B44A444D2BA67"}}`)
internalRequest03.Imp[1].Ext = []byte(`{"bidder": {"adunitid": "ad-488663D474E44841E8A293379892348","partnerid": "par-7E6D2DB9A8922AB07B44A444D2BA67"}}`)
internalRequest03.Imp[2].Ext = []byte(`{"bidder": {"adunitid": "ad-488663D474E44841E8A293379892348","partnerid": "par-7E6D2DB9A8922AB07B44A444D2BA67"}}`)
internalRequest03.Imp[3].Ext = []byte(`{"bidder": {"adunitid": "ad-488663D474E44841E8A293379892348","partnerid": "par-7E6D2DB9A8922AB07B44A444D2BA67"}}`)
internalRequest03.Imp[4].Ext = []byte(`{"bidder": {"adunitid": "aa","partnerid": ""}}`)
bidder, buildErr := Builder(openrtb_ext.BidderUcfunnel, config.Adapter{
Endpoint: "http://localhost/bid"}, config.Server{ExternalUrl: "http://hosturl.com", GvlID: 1, DataCenter: "2"})
if buildErr != nil {
t.Fatalf("Builder returned unexpected error %v", buildErr)
}
var testCases = []struct {
in []openrtb2.BidRequest
out1 [](int)
out2 [](bool)
}{
{
in: []openrtb2.BidRequest{internalRequest01, internalRequest02, internalRequest03},
out1: [](int){1, 1, 0},
out2: [](bool){false, false, true},
},
}
for idx := range testCases {
for i := range testCases[idx].in {
RequestData, err := bidder.MakeRequests(&testCases[idx].in[i], nil)
if ((RequestData == nil) == testCases[idx].out2[i]) && (len(err) == testCases[idx].out1[i]) {
t.Errorf("actual = %v expected = %v", len(err), testCases[idx].out1[i])
}
}
}
}
func TestMakeBids(t *testing.T) {
imp := openrtb2.Imp{
ID: "1234",
Banner: &openrtb2.Banner{},
}
imp2 := openrtb2.Imp{
ID: "1235",
Video: &openrtb2.Video{},
}
imp3 := openrtb2.Imp{
ID: "1236",
Audio: &openrtb2.Audio{},
}
imp4 := openrtb2.Imp{
ID: "1237",
Native: &openrtb2.Native{},
}
imp5 := openrtb2.Imp{
ID: "1237",
Native: &openrtb2.Native{},
}
internalRequest03 := openrtb2.BidRequest{Imp: []openrtb2.Imp{imp, imp2, imp3, imp4, imp5}}
internalRequest04 := openrtb2.BidRequest{Imp: []openrtb2.Imp{imp}}
internalRequest03.Imp[0].Ext = []byte(`{"bidder": {"adunitid": "ad-488663D474E44841E8A293379892348","partnerid": "par-7E6D2DB9A8922AB07B44A444D2BA67"}}`)
internalRequest03.Imp[1].Ext = []byte(`{"bidder": {"adunitid": "ad-488663D474E44841E8A293379892348","partnerid": "par-7E6D2DB9A8922AB07B44A444D2BA67"}}`)
internalRequest03.Imp[2].Ext = []byte(`{"bidder": {"adunitid": "ad-488663D474E44841E8A293379892348","partnerid": "par-7E6D2DB9A8922AB07B44A444D2BA67"}}`)
internalRequest03.Imp[3].Ext = []byte(`{"bidder": {"adunitid": "ad-488663D474E44841E8A293379892348","partnerid": "par-7E6D2DB9A8922AB07B44A444D2BA67"}}`)
internalRequest03.Imp[4].Ext = []byte(`{"bidder": {"adunitid": "aa","partnerid": ""}}`)
internalRequest04.Imp[0].Ext = []byte(`{"bidder": {"adunitid": "0"}}`)
mockResponse200 := adapters.ResponseData{StatusCode: 200, Body: json.RawMessage(`{"seatbid": [{"bid": [{"impid": "1234"}]},{"bid": [{"impid": "1235"}]},{"bid": [{"impid": "1236"}]},{"bid": [{"impid": "1237"}]}]}`)}
mockResponse203 := adapters.ResponseData{StatusCode: 203, Body: json.RawMessage(`{"seatbid":[{"bid":[{"impid":"1234"}]},{"bid":[{"impid":"1235"}]}]}`)}
mockResponse204 := adapters.ResponseData{StatusCode: 204, Body: json.RawMessage(`{"seatbid":[{"bid":[{"impid":"1234"}]},{"bid":[{"impid":"1235"}]}]}`)}
mockResponse400 := adapters.ResponseData{StatusCode: 400, Body: json.RawMessage(`{"seatbid":[{"bid":[{"impid":"1234"}]},{"bid":[{"impid":"1235"}]}]}`)}
mockResponseError := adapters.ResponseData{StatusCode: 200, Body: json.RawMessage(`{"seatbid":[{"bid":[{"im236"}],{"bid":[{"impid":"1237}]}`)}
RequestData01 := adapters.RequestData{Method: "POST", Body: []byte(`{"imp":[{"id":"1234","banner":{}},{"id":"1235","video":{}},{"id":"1236","audio":{}},{"id":"1237","native":{}}]}`)}
RequestData02 := adapters.RequestData{Method: "POST", Body: []byte(`{"imp":[{"id":"1234","banne"1235","video":{}},{"id":"1236","audio":{}},{"id":"1237","native":{}}]}`)}
bidder, buildErr := Builder(openrtb_ext.BidderUcfunnel, config.Adapter{
Endpoint: "http://localhost/bid"}, config.Server{ExternalUrl: "http://hosturl.com", GvlID: 1, DataCenter: "2"})
if buildErr != nil {
t.Fatalf("Builder returned unexpected error %v", buildErr)
}
var testCases = []struct {
in1 []openrtb2.BidRequest
in2 []adapters.RequestData
in3 []adapters.ResponseData
out1 [](bool)
out2 [](bool)
}{
{
in1: []openrtb2.BidRequest{internalRequest03, internalRequest03, internalRequest03, internalRequest03, internalRequest03, internalRequest04},
in2: []adapters.RequestData{RequestData01, RequestData01, RequestData01, RequestData01, RequestData01, RequestData02},
in3: []adapters.ResponseData{mockResponse200, mockResponse203, mockResponse204, mockResponse400, mockResponseError, mockResponse200},
out1: [](bool){true, false, false, false, false, false},
out2: [](bool){false, true, false, true, true, true},
},
}
for idx := range testCases {
for i := range testCases[idx].in1 {
BidderResponse, err := bidder.MakeBids(&testCases[idx].in1[i], &testCases[idx].in2[i], &testCases[idx].in3[i])
if (BidderResponse == nil) == testCases[idx].out1[i] {
fmt.Println(i)
fmt.Println("BidderResponse")
t.Errorf("actual = %t expected == %v", (BidderResponse == nil), testCases[idx].out1[i])
}
if (err == nil) == testCases[idx].out2[i] {
fmt.Println(i)
fmt.Println("error")
t.Errorf("actual = %t expected == %v", err, testCases[idx].out2[i])
}
}
}
}
|
package upload
import (
"crypto/md5"
"encoding/hex"
"fmt"
"io"
"net/http"
"os"
"path"
"strconv"
"time"
"github.com/510909033/bgf_log"
)
var logger = bgf_log.GetLogger("upload_bo")
func Save(name string) (id int64, err error) {
bo := &UploadBO{}
newBO, err := NewUploadBO(bo, false)
h := md5.New()
h.Write([]byte(name + strconv.FormatInt(time.Now().UnixNano(), 10)))
rename := hex.EncodeToString(h.Sum(nil))
newBO.Name = rename
//扩展名
newBO.Ext = path.Ext(name)
id, err = newBO.Insert()
if err != nil {
err = fmt.Errorf("Inert err, err=%w\n", err)
logger.Errorf(err.Error())
return
}
return
}
const (
ParseMultipartForm = 32 << 20 //M , 文件最大使用的内存量
UPLOAD_DIR = "/vdb1/uploads"
)
func UploadHandler(r *http.Request, name string) (err error, res *Result) {
if r.Method != "POST" {
err = fmt.Errorf("不是POST请求, method=%s, request=%+v\n", r.Method, *r)
return
}
r.ParseMultipartForm(ParseMultipartForm)
f, h, err := r.FormFile(name)
if err != nil {
err = fmt.Errorf("FormFile err, err=%w\n", err)
return
}
defer f.Close()
filename := h.Filename
filepath := UPLOAD_DIR + "/" + filename
t, err := os.Create(filepath)
if err != nil {
err = fmt.Errorf("Create err, err=%w\n", err)
return
}
defer t.Close()
if _, err = io.Copy(t, f); err != nil {
err = fmt.Errorf("Copy err, err=%w\n", err)
return
}
id, err := Save(filename)
if err != nil {
err = fmt.Errorf("Save err, err=%w\n", err)
return
}
res = &Result{}
res.Id = id
//改文件名
err = os.Rename(filepath, UPLOAD_DIR+"/"+strconv.FormatInt(id, 10))
if err != nil {
err = fmt.Errorf("Rename err, err=%w\n", err)
return
}
return
}
|
package main
import (
"context"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/drhodes/golorem"
)
type Response events.APIGatewayProxyResponse
func Handler(ctx context.Context) (Response, error) {
resp := Response{
StatusCode: 200,
IsBase64Encoded: false,
Body: lorem.Sentence(8,15),
}
return resp, nil
}
func main() {
lambda.Start(Handler)
}
|
// main.go
package main
import (
"errors"
"net/http"
"strconv"
"github.com/gin-gonic/gin"
)
var router *gin.Engine
func main() {
router = gin.Default()
router.LoadHTMLGlob("templates/*")
initializeRoutes()
router.Run()
}
func initializeRoutes() {
router.GET("/", showIndexPage)
router.GET("/about-us", aboutUsPage)
router.GET("/contact-us", contactUsPage)
router.GET("/article/view/:article_id", getArticle)
}
func showIndexPage(c *gin.Context) {
articles := getAllArticles()
c.HTML(
http.StatusOK,
"index.html",
gin.H{
"title": "Home Page",
"payload": articles,
},
)
}
func contactUsPage(c *gin.Context) {
articles := getAllArticles()
c.HTML(
http.StatusOK,
"contact_us.html",
gin.H{
"title": "Home Page",
"payload": articles,
},
)
}
func aboutUsPage(c *gin.Context) {
articles := getAllArticles()
c.HTML(
http.StatusOK,
"about_us.html",
gin.H{
"title": "Home Page",
"payload": articles,
},
)
}
func getArticle(c *gin.Context) {
if articleID, err := strconv.Atoi(c.Param("article_id")); err == nil {
if article, err := getArticleByID(articleID); err == nil {
c.HTML(
http.StatusOK,
"article.html",
gin.H{
"title": article.Title,
"payload": article,
},
)
} else {
c.AbortWithError(http.StatusNotFound, err)
}
} else {
c.AbortWithStatus(http.StatusNotFound)
}
}
type article struct {
ID int `json:"id"`
Title string `json:"title"`
Content string `json:"content"`
}
var articleList = []article{
article{ID: 1, Title: "Article 1", Content: "Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged."},
article{ID: 2, Title: "Article 2", Content: "Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged."},
article{ID: 3, Title: "Article 3", Content: "Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged."},
article{ID: 4, Title: "Article 4", Content: "Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged."},
article{ID: 5, Title: "Article 5", Content: "Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged."},
article{ID: 6, Title: "Article 6", Content: "Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged."},
}
func getAllArticles() []article {
return articleList
}
func getArticleByID(id int) (*article, error) {
for _, a := range articleList {
if a.ID == id {
return &a, nil
}
}
return nil, errors.New("Article not found")
}
|
// +build linux
package devicemapper
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"strings"
"syscall"
"github.com/hyperhq/hyper/storage"
"github.com/hyperhq/runv/lib/glog"
)
type jsonMetadata struct {
Device_id int `json:"device_id"`
Size int `json:"size"`
Transaction_id int `json:"transaction_id"`
Initialized bool `json:"initialized"`
}
// For device mapper, we do not need to mount the container to sharedDir.
// All of we need to provide the block device name of container.
func MountContainerToSharedDir(containerId, sharedDir, devPrefix string) (string, error) {
devFullName := fmt.Sprintf("/dev/mapper/%s-%s", devPrefix, containerId)
return devFullName, nil
}
func CreateNewDevice(containerId, devPrefix, rootPath string) error {
var metadataPath = fmt.Sprintf("%s/metadata/", rootPath)
// Get device id from the metadata file
idMetadataFile := path.Join(metadataPath, containerId)
if _, err := os.Stat(idMetadataFile); err != nil && os.IsNotExist(err) {
return err
}
jsonData, err := ioutil.ReadFile(idMetadataFile)
if err != nil {
return err
}
var dat jsonMetadata
if err := json.Unmarshal(jsonData, &dat); err != nil {
return err
}
deviceId := dat.Device_id
deviceSize := dat.Size
// Activate the device for that device ID
devName := fmt.Sprintf("%s-%s", devPrefix, containerId)
poolName := fmt.Sprintf("/dev/mapper/%s-pool", devPrefix)
createDeviceCmd := fmt.Sprintf("dmsetup create %s --table \"0 %d thin %s %d\"", devName, deviceSize/512, poolName, deviceId)
createDeviceCommand := exec.Command("/bin/sh", "-c", createDeviceCmd)
output, err := createDeviceCommand.Output()
if err != nil {
glog.Error(output)
return err
}
return nil
}
func InjectFile(src io.Reader, containerId, devPrefix, target, rootPath string, perm, uid, gid int) error {
if containerId == "" {
return fmt.Errorf("Please make sure the arguments are not NULL!\n")
}
permDir := perm | 0111
// Define the basic directory, need to get them via the 'info' command
var (
mntPath = fmt.Sprintf("%s/mnt/", rootPath)
devName = fmt.Sprintf("%s-%s", devPrefix, containerId)
)
// Get the mount point for the container ID
idMountPath := path.Join(mntPath, containerId)
rootFs := path.Join(idMountPath, "rootfs")
targetFile := path.Join(rootFs, target)
// Whether we have the mounter directory
if _, err := os.Stat(idMountPath); err != nil && os.IsNotExist(err) {
if err := os.MkdirAll(idMountPath, os.FileMode(permDir)); err != nil {
return err
}
}
// Mount the block device to that mount point
var flags uintptr = syscall.MS_MGC_VAL
devFullName := fmt.Sprintf("/dev/mapper/%s", devName)
fstype, err := ProbeFsType(devFullName)
if err != nil {
return err
}
glog.V(3).Infof("The filesytem type is %s", fstype)
options := ""
if fstype == "xfs" {
// XFS needs nouuid or it can't mount filesystems with the same fs
options = joinMountOptions(options, "nouuid")
}
err = syscall.Mount(devFullName, idMountPath, fstype, flags, joinMountOptions("discard", options))
if err != nil && err == syscall.EINVAL {
err = syscall.Mount(devFullName, idMountPath, fstype, flags, options)
}
if err != nil {
return fmt.Errorf("Error mounting '%s' on '%s': %s", devFullName, idMountPath, err)
}
defer syscall.Unmount(idMountPath, syscall.MNT_DETACH)
return storage.WriteFile(src, targetFile, perm, uid, gid)
}
func ProbeFsType(device string) (string, error) {
// The daemon will only be run on Linux platform, so 'file -s' command
// will be used to test the type of filesystem which the device located.
cmd := fmt.Sprintf("file -sL %s", device)
command := exec.Command("/bin/sh", "-c", cmd)
fileCmdOutput, err := command.Output()
if err != nil {
return "", nil
}
if strings.Contains(strings.ToLower(string(fileCmdOutput)), "ext") {
return "ext4", nil
}
if strings.Contains(strings.ToLower(string(fileCmdOutput)), "xfs") {
return "xfs", nil
}
return "", fmt.Errorf("Unknown filesystem type on %s", device)
}
func joinMountOptions(a, b string) string {
if a == "" {
return b
}
if b == "" {
return a
}
return a + "," + b
}
type DeviceMapper struct {
Datafile string
Metadatafile string
DataLoopFile string
MetadataLoopFile string
Size int
PoolName string
}
func CreatePool(dm *DeviceMapper) error {
if _, err := os.Stat("/dev/mapper/" + dm.PoolName); err == nil {
return nil
}
// Create data file and metadata file
parms := fmt.Sprintf("dd if=/dev/zero of=%s bs=1 seek=%d count=0", dm.Datafile, dm.Size)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
parms = fmt.Sprintf("fallocate -l 128M %s", dm.Metadatafile)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
if _, err := os.Stat(dm.DataLoopFile); err != nil {
l := len(dm.DataLoopFile)
parms = fmt.Sprintf("mknod -m 0660 %s b 7 %s", dm.DataLoopFile, dm.DataLoopFile[(l-1):l])
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
}
if _, err := os.Stat(dm.MetadataLoopFile); err != nil {
l := len(dm.MetadataLoopFile)
parms = fmt.Sprintf("mknod -m 0660 %s b 7 %s", dm.MetadataLoopFile, dm.MetadataLoopFile[(l-1):l])
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
}
// Setup the loop device for data and metadata files
parms = fmt.Sprintf("losetup %s %s", dm.DataLoopFile, dm.Datafile)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
parms = fmt.Sprintf("losetup %s %s", dm.MetadataLoopFile, dm.Metadatafile)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
// Make filesystem for data loop device and metadata loop device
parms = fmt.Sprintf("mkfs.ext4 %s", dm.DataLoopFile)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
parms = fmt.Sprintf("mkfs.ext4 %s", dm.MetadataLoopFile)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
parms = fmt.Sprintf("dd if=/dev/zero of=%s bs=4096 count=1", dm.MetadataLoopFile)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
parms = fmt.Sprintf("dmsetup create %s --table '0 %d thin-pool %s %s 128 0'", dm.PoolName, dm.Size/512, dm.MetadataLoopFile, dm.DataLoopFile)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
return nil
}
func CreateVolume(poolName, volName, dev_id string, size int, restore bool) error {
glog.Infof("/dev/mapper/%s", volName)
if _, err := os.Stat("/dev/mapper/" + volName); err == nil {
return nil
}
if restore == false {
parms := fmt.Sprintf("dmsetup message /dev/mapper/%s 0 \"create_thin %s\"", poolName, dev_id)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
}
parms := fmt.Sprintf("dmsetup create %s --table \"0 %d thin /dev/mapper/%s %s\"", volName, size/512, poolName, dev_id)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
if restore == false {
parms = fmt.Sprintf("mkfs.ext4 \"/dev/mapper/%s\"", volName)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
}
return nil
}
func DeleteVolume(dm *DeviceMapper, dev_id int) error {
var parms string
// Delete the thin pool for test
parms = fmt.Sprintf("dmsetup message /dev/mapper/%s 0 \"delete %d\"", dm.PoolName, dev_id)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
return nil
}
// Delete the pool which is created in 'Init' function
func DMCleanup(dm *DeviceMapper) error {
var parms string
// Delete the thin pool for test
parms = fmt.Sprintf("dmsetup remove \"/dev/mapper/%s\"", dm.PoolName)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
// Delete the loop device
parms = fmt.Sprintf("losetup -d %s", dm.MetadataLoopFile)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
parms = fmt.Sprintf("losetup -d %s", dm.DataLoopFile)
if res, err := exec.Command("/bin/sh", "-c", parms).CombinedOutput(); err != nil {
glog.Error(string(res))
return fmt.Errorf(string(res))
}
return nil
}
|
package sorts
import (
"fmt"
"math"
"math/rand"
"time"
)
// TestSorts test sort algorithms
func Tests() {
fmt.Println("*****************Sorting******************")
arr := []int{5, 2, 4, 6, 1, 3}
fmt.Println("-----------SortInt--------------")
fmt.Println(arr)
SortInt(arr)
fmt.Println(arr)
i := LinearSearch(arr, 3)
fmt.Println(i)
a := []int{1, 0, 0, 1, 1, 1, 0}
b := []int{1, 1, 1, 1, 0, 1, 1}
c := BinaryAdd(a, b)
n := len(a) - 1
aa := 0
bb := 0
ii := 0
for ; n >= 0; n-- {
p := int(math.Pow(2, float64(ii)))
aa += a[n] * p
bb += b[n] * p
ii++
}
n0 := len(c) - 1
cc := 0
ii = 0
for ; n0 >= 0; n0-- {
p := int(math.Pow(2, float64(ii)))
cc += c[n0] * p
ii++
}
fmt.Println(aa, " ", bb, " ", cc, " ", c)
fmt.Println("--------------selection sort-----------------")
arr2 := []int{3, 6, 1, 9, 4}
fmt.Println(arr2)
SelectionSort(arr2)
fmt.Println(arr2)
fmt.Println("--------------merge sort--------------")
arr3 := []int{5, 2, 4, 7, 1, 3, 2, 6}
fmt.Println(arr3)
MergeSort(arr3, 0, len(arr3))
fmt.Println(arr3)
fmt.Println("--------------insert sort--------------")
arr3 = []int{5, 2, 4, 7, 1, 3, 2, 6}
fmt.Println(arr3)
InsertionSort(arr3, len(arr3)-1)
fmt.Println(arr3)
fmt.Println("--------------bubble sort--------------")
arr3 = []int{5, 2, 4, 7, 1, 3, 2, 6}
fmt.Println(arr3)
BubbleSort(arr3)
fmt.Println(arr3)
fmt.Println("--------------max heap sort--------------")
arr3 = []int{5, 7, 4, 2, 1, 3, 2, 6}
fmt.Println(arr3)
MaxHeapSort(arr3)
fmt.Println(arr3)
fmt.Println("--------------quick sort--------------")
arr3 = []int{5, 7, 4, 2, 1, 3, 2, 6}
fmt.Println(arr3)
QuickSort(arr3, 0, len(arr3)-1)
fmt.Println(arr3)
fmt.Println("--------------sort k--------------")
arr3 = []int{4, 7, 0, 2, 1, 3, 9, 6}
fmt.Println("before sorted for k...")
fmt.Println(arr3)
k := SortK(arr3, 5)
fmt.Println("after sorted for k...")
fmt.Println(arr3)
fmt.Println(k)
fmt.Println("--------------dec2binary--------------")
for _, n = range []int{10, 11, 12, 13, 14, 15, 16, 17, 18, 19} {
bit := dec2binary(n)
fmt.Println(n, bit)
}
fmt.Println("--------------compare sort--------------")
arr0 := []int{60, 35, 81, 98, 14, 47}
fmt.Println(arr0)
arr00 := ComparisonSort(arr0)
fmt.Println(arr00)
fmt.Println("--------------exp--------------")
rand.Seed(time.Now().UTC().UnixNano())
startIndex := 1000
step := 500
for ii := 0; ii < 20; ii++ {
aSize := startIndex + step*ii
aArr := make([]int, aSize)
for jj := 0; jj < aSize; jj++ {
aArr[jj] = rand.Intn(aSize)
}
aCount := InsertionSortCount(aArr)
fmt.Println(aSize, aCount)
}
fmt.Println("--------------find max Index--------------")
fmt.Println(arr0)
maxIndex := findMaxIndex(arr0, 0, len(arr0))
fmt.Println(maxIndex, arr0[maxIndex])
min, max := findMinMaxValue(arr0, 0, len(arr0))
fmt.Println(min, max)
fmt.Println("--------------merge sort 2--------------")
arrarr := []int{6, 9, 7, 8, 5, 1, 3}
fmt.Println(arrarr)
MergeSort2(arrarr)
fmt.Println(arrarr)
}
func findMaxIndex(arr []int, startIndex, endIndex int) int {
mid := (startIndex + endIndex) / 2
if startIndex == mid {
return startIndex
}
index1 := findMaxIndex(arr, startIndex, mid)
index2 := findMaxIndex(arr, mid, endIndex)
if arr[index1] < arr[index2] {
return index2
}
return index1
}
func findMinMaxValue(arr []int, startIndex, endIndex int) (int, int) {
mid := (startIndex + endIndex) / 2
if mid == startIndex {
return arr[startIndex], arr[startIndex]
} else if startIndex+1 == mid {
if arr[startIndex] < arr[mid] {
return arr[startIndex], arr[mid]
}
return arr[mid], arr[startIndex]
}
minL, maxL := findMinMaxValue(arr, startIndex, mid)
minR, maxR := findMinMaxValue(arr, mid, endIndex)
min := minL
if min > minR {
min = minR
}
max := maxL
if max < maxR {
max = maxR
}
return min, max
}
func pow2(i int) int {
if i == 0 {
return 1
}
v := 2
for j := 1; j < i; j++ {
v *= 2
}
return v
}
func dec2binary(n int) []int {
var bit []int
for n > 1 {
i := 0
n0 := n
for n0 > 1 {
//fmt.Println(n0)
n0 = n0 / 2
i++
}
n -= pow2(i)
bit = append(bit, i)
}
if n == 1 {
bit = append(bit, 0)
}
return bit
}
// SortInt InsertionSort
func SortInt(arr []int) {
for j := 1; j < len(arr); j++ {
a := arr[j]
i := j - 1
for i >= 0 && arr[i] > a {
arr[i+1] = arr[i]
i--
}
arr[i+1] = a
}
}
// LinearSearch (A, v)
//1. for i= 1 to A.length
//2. if v == A[i]
//3. return i
//4. return NIL
func LinearSearch(arr []int, v int) int {
for i, v0 := range arr {
if v0 == v {
return i
}
}
return -1
}
// BinaryAdd add two binary number
//1. for i = A.length downto 1
//2. C[i+1] = C[i+1] + A[i] + B[i]
//3. if C[i+1] >= 2
//4. C[i] = C[i] + 1
//5. C[i+1] = C[i+1] - 2
func BinaryAdd(a []int, b []int) []int {
c := make([]int, len(a)+1)
for i := len(a) - 1; i >= 0; i-- {
cc := a[i] + b[i]
// if cc >= 2 {
// c[i+1]++
// c[i] = cc - 2
// } else {
// c[i] = cc
// }
c[i+1] += cc
if c[i+1] >= 2 {
c[i]++
c[i+1] -= 2
}
}
return c
}
|
package amqppool
import (
"errors"
"log"
"os"
"testing"
)
func TestShouldCreateANewAmqpPool(t *testing.T) {
//Arrange
connectionString := os.Getenv("AMQP_CONNECTION")
maxChannels := 10
logger := log.New(os.Stdout, "", log.LstdFlags)
//Action
pool, err := NewPool(connectionString, maxChannels, logger)
defer pool.Close()
//Assert
if err != nil {
t.Errorf("Occurred a error to create a new pool: %v", err.Error())
}
if pool.maxChannels != maxChannels {
t.Errorf("The maximum quantity of channels is inconsistent: Expected %v and found %v",
maxChannels, pool.maxChannels)
}
lenChannelsReleased := len(pool.channelsReleased)
if lenChannelsReleased != maxChannels {
t.Errorf("The quantity of channels opend is inconsistent: Expected %v and found %v",
maxChannels, lenChannelsReleased)
}
}
func TestShouldReturnAErrorWhenCreateANewAmqpPoolWhenTheConnectionFail(t *testing.T) {
//Arrange
connectionString := os.Getenv("AMQP_CONNECTION_WRONG")
maxChannels := 10
logger := log.New(os.Stdout, "", log.LstdFlags)
//Action
pool, err := NewPool(connectionString, maxChannels, logger)
//Assert
if pool != nil && err == nil {
t.Error("Don't occurred an error to create a new pool: Was created with a fail connection")
}
}
func TestShouldCloseAAmqpPool(t *testing.T) {
//Arrange
connectionString := os.Getenv("AMQP_CONNECTION")
maxChannels := 10
logger := log.New(os.Stdout, "", log.LstdFlags)
pool, _ := NewPool(connectionString, maxChannels, logger)
//Action
err := pool.Close()
//Assert
if err != nil {
t.Errorf("Occurred a error to close the pool: %v", err.Error())
}
}
func TestShouldCloseAReusableChannelByTheYourId(t *testing.T) {
//Arrange
connectionString := os.Getenv("AMQP_CONNECTION")
maxChannels := 10
logger := log.New(os.Stdout, "", log.LstdFlags)
pool, _ := NewPool(connectionString, maxChannels, logger)
defer pool.Close()
//Action
err := pool.CloseReusableChannel(1)
//Assert
if err != nil {
t.Errorf("Occurred a error to close a reusable channel: %v", err.Error())
}
}
func TestShouldGetAReusableChannelWhenThePoolHaveAnyChannelReleased(t *testing.T) {
//Arrange
connectionString := os.Getenv("AMQP_CONNECTION")
maxChannels := 10
logger := log.New(os.Stdout, "", log.LstdFlags)
pool, _ := NewPool(connectionString, maxChannels, logger)
defer pool.Close()
//Action
reusableChannel, err := pool.GetReusableChannel()
defer reusableChannel.Release()
//Assert
if err != nil {
t.Errorf("Occurred a error to get a reusable channel: %v", err.Error())
}
if reusableChannel.released == true {
t.Errorf("The reusable channel was obtained how released")
}
}
func TestShouldGetAReusableChannelWhenThePoolDontHaveAnyChannelReleasedButIsNotAtTheLimitOfUse(t *testing.T) {
//Arrange
connectionString := os.Getenv("AMQP_CONNECTION")
maxChannels := 1
logger := log.New(os.Stdout, "", log.LstdFlags)
pool, _ := NewPool(connectionString, maxChannels, logger)
defer pool.Close()
_ = pool.CloseReusableChannel(1)
//Action
reusableChannel, err := pool.GetReusableChannel()
defer reusableChannel.Release()
//Assert
if err != nil {
t.Errorf("Occurred a error to get a reusable channel: %v", err.Error())
}
if reusableChannel.released == true {
t.Errorf("The reusable channel was obtained how released")
}
}
func TestShouldReturnErrorInGetAReusableChannelWhenTheAllChannelAreInUse(t *testing.T) {
//Arrange
connectionString := os.Getenv("AMQP_CONNECTION")
maxChannels := 1
logger := log.New(os.Stdout, "", log.LstdFlags)
pool, _ := NewPool(connectionString, maxChannels, logger)
defer pool.Close()
reusableChannel, err := pool.GetReusableChannel()
defer reusableChannel.Release()
//Action
reusableChannel, err = pool.GetReusableChannel()
//Assert
if err == nil {
t.Error("Don't occurred a error to get a reusable channel when all are in use.")
}
if reusableChannel != nil {
t.Error("The reusable channel was obtained same when all are in use")
}
if !errors.Is(err, ErrAllChannelsInUse){
t.Error("The type of error returned is different of expected")
}
} |
package template
import (
"net/http"
"github.com/gorilla/mux"
"github.com/wincentrtz/gobase/gobase/infrastructures/db"
"github.com/wincentrtz/gobase/gobase/utils"
"github.com/wincentrtz/gobase/models/dto/responses"
)
type TemplateHandler struct {
templateUsecase TemplateUsecase
}
func NewTemplateHandler(r *mux.Router) {
db := db.Postgres()
ur := NewTemplateRepository(db)
us := NewTemplateUsecase(ur)
handler := &TemplateHandler{
templateUsecase: us,
}
r.HandleFunc("/api/template/{id}", handler.FindById).Methods("GET")
}
func (uh *TemplateHandler) FindById(w http.ResponseWriter, r *http.Request) {
resp := &responses.BaseResponse{
Message: http.StatusText(http.StatusOK),
Code: http.StatusOK,
Data: "template",
}
utils.WriteResponse(w, resp, http.StatusOK)
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package shell
import (
"yunion.io/x/onecloud/pkg/cloudprovider"
"yunion.io/x/onecloud/pkg/multicloud/aliyun"
"yunion.io/x/onecloud/pkg/util/shellutils"
)
func init() {
type MountTargetListOptions struct {
ID string `help:"Id"`
DomainName string
PageSize int `help:"page size"`
PageNum int `help:"page num"`
}
shellutils.R(&MountTargetListOptions{}, "mount-target-list", "List MountTargets", func(cli *aliyun.SRegion, args *MountTargetListOptions) error {
mounts, _, err := cli.GetMountTargets(args.ID, args.DomainName, args.PageSize, args.PageNum)
if err != nil {
return err
}
printList(mounts, 0, 0, 0, []string{})
return nil
})
shellutils.R(&cloudprovider.SMountTargetCreateOptions{}, "mount-target-create", "Create Nas MountTarget", func(cli *aliyun.SRegion, args *cloudprovider.SMountTargetCreateOptions) error {
mt, err := cli.CreateMountTarget(args)
if err != nil {
return err
}
printObject(mt)
return nil
})
type MountTargetDeleteOptions struct {
FILE_SYSTEM_ID string
MOUT_POINT_ID string
}
shellutils.R(&MountTargetDeleteOptions{}, "mount-target-delete", "Delete Nas MountTarget", func(cli *aliyun.SRegion, args *MountTargetDeleteOptions) error {
return cli.DeleteMountTarget(args.FILE_SYSTEM_ID, args.MOUT_POINT_ID)
})
}
|
package main
import (
"context"
"fmt"
"strconv"
"errors"
"time"
"github.com/go-redis/redis/v8"
)
type Database interface {
GetKey(string) (string, string, error)
SetKey(string) error
CheckExist(string) bool
IncreaseVisit(string) error
FlushAll() error
Lock() error
Unlock() error
}
type RedisServer struct {
timeout time.Duration
client *redis.Client
ctx context.Context
maxIP int
}
func NewRedis(maxIP, timeout int, host, port string) (*RedisServer, error) {
rs := new(RedisServer)
addr := fmt.Sprintf("%v:%v", host, port)
rs.client = redis.NewClient(&redis.Options{
Addr: addr,
DB: 0,
})
err := rs.client.Ping(rs.client.Context()).Err()
if err != nil {
return nil, err
}
rs.ctx = context.Background()
rs.timeout = time.Duration(timeout) * time.Second
rs.maxIP = maxIP
return rs, nil
}
func (rs *RedisServer) Set(ip string) error {
err := rs.client.Set(rs.ctx, ip, "1", rs.timeout).Err()
if err != nil {
return err
}
return nil
}
func (rs *RedisServer) CheckExist(ip string) bool {
_, err := rs.client.Get(rs.ctx, ip).Result()
return err != redis.Nil
}
func (rs *RedisServer) Get(ip string) (string, string, error) {
count, err := rs.client.Get(rs.client.Context(), ip).Int()
if err != nil {
return "", "", err
}
ttl := rs.client.TTL(rs.client.Context(), ip).Val().String()
remaining := rs.maxIP - count
return strconv.Itoa(remaining), ttl, nil
}
func (rs *RedisServer) IncreaseVisit(ip string) error {
err := rs.client.Incr(rs.ctx, ip).Err()
if err != nil {
return err
}
return nil
}
func (rs *RedisServer) FlushAll() error {
_, err := rs.client.FlushAll(rs.ctx).Result()
if err != nil {
return err
}
return nil
}
// accroding to https://redis.io/commands/setnx, implement lock/unlock methods using setnx.
func (rs *RedisServer) Lock() error {
for {
lockTimeout := time.Now().Add(10 * time.Second).Unix()
ok, err := rs.client.SetNX(rs.ctx, "lock", lockTimeout, 0).Result()
if err != nil {
return err
}
// successfully get lock
if ok {
break
}
// the lock is taken
TTL, err := rs.client.Get(rs.ctx, "lock").Int64()
if err != nil {
return err
}
// if expired
curr := time.Now().Unix()
if TTL <= curr {
lockTimeout = time.Now().Add(10 * time.Second).Unix()
// if multiple clients compete to snatch the lock
response, err := rs.client.GetSet(rs.ctx, "lock", lockTimeout).Int64()
if err != nil {
return err
}
// response should not timeout if another client get the lock
if curr > response {
break
}
}
time.Sleep(10 * time.Millisecond)
}
return nil
}
func (rs *RedisServer) Unlock() error {
now := time.Now().Unix()
response, err := rs.client.Get(rs.ctx, "lock").Int64()
if err != nil {
return err
}
// shouldn't unlock due to successfully lock
if now > response {
return nil
}
response, err = rs.client.Del(rs.ctx, "lock").Result()
if err != nil {
return err
}
if response != 1 {
return errors.New("fail to delete lock")
}
return nil
} |
package function
import (
"encoding/json"
"errors"
"github.com/hecatoncheir/Storage"
"log"
"os"
)
type Storage interface {
CreateJSON([]byte) (string, error)
}
type Functions interface {
ReadPageInstructionByID(string, string) storage.PageInstruction
}
type Executor struct {
Store Storage
Functions Functions
}
var ExecutorLogger = log.New(os.Stdout, "Executor: ", log.Lshortfile)
var (
// ErrPageInstructionCanNotBeCreated means that the page instruction can't be added to database
ErrPageInstructionCanNotBeCreated = errors.New("page instruction can't be created")
)
// CreatePageInstruction make PageInstruction and save it to storage
func (executor *Executor) CreatePageInstruction(pageInstruction storage.PageInstruction, language string) (storage.PageInstruction, error) {
encodedPageInstruction, err := json.Marshal(pageInstruction)
if err != nil {
return pageInstruction, ErrPageInstructionCanNotBeCreated
}
uidOfCreatedPageInstruction, err := executor.Store.CreateJSON(encodedPageInstruction)
if err != nil {
return pageInstruction, ErrPageInstructionCanNotBeCreated
}
createdPageInstruction := executor.Functions.ReadPageInstructionByID(uidOfCreatedPageInstruction, language)
return createdPageInstruction, nil
}
|
package backtracking
import (
"reflect"
"testing"
"github.com/NBR41/gosudoku/model"
)
func TestGetMapValues(t *testing.T) {
v := getMapValues(4)
exp := map[int]struct{}{1: {}, 2: {}, 3: {}, 4: {}}
if !reflect.DeepEqual(exp, v) {
t.Error("unexpected value")
}
}
func TestGetPossibilities(t *testing.T) {
cells := map[string]model.Celler{
"0-0": newNode(0, 0),
"0-1": newNode(0, 1),
"1-0": newNode(1, 0),
"1-1": newNode(1, 1),
}
v := 1
cells["0-0"].Set(&v)
cells["0-0"].(*node).nodes = [][]model.Celler{{cells["0-1"]}, {cells["1-0"]}}
cells["0-1"].(*node).nodes = [][]model.Celler{{cells["0-0"]}, {cells["1-1"]}}
cells["1-0"].(*node).nodes = [][]model.Celler{{cells["0-0"]}, {cells["1-1"]}}
cells["1-1"].(*node).nodes = [][]model.Celler{{cells["0-1"]}, {cells["1-0"]}}
pos := getPossibilities(cells, 2)
if len(pos) != 3 {
t.Error("unexpected value")
} else {
if pos[0].Node.Key() != "0-1" && pos[0].Node.Key() != "1-0" && !reflect.DeepEqual(pos[0].Values, []int{2}) {
t.Error("unexpected value")
}
if pos[1].Node.Key() != "0-1" && pos[1].Node.Key() != "1-0" && !reflect.DeepEqual(pos[1].Values, []int{2}) {
t.Error("unexpected value")
}
if pos[2].Node.Key() != "1-1" && pos[2].Node.Key() != "1-0" && !reflect.DeepEqual(pos[2].Values, []int{1, 2}) {
t.Error("unexpected value")
}
}
}
|
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package mirbft
import (
pb "github.com/IBM/mirbft/mirbftpb"
)
// Actions are the responsibility of the library user to fulfill.
// The user receives a set of Actions from a read of *Node.Ready(),
// and it is the user's responsibility to execute all actions, returning
// ActionResults to the *Node.AddResults call.
// TODO add details about concurrency
type Actions struct {
// Broadcast messages should be sent to every node in the cluster (including yourself).
Broadcast []*pb.Msg
// Unicast messages should be sent only to the specified target.
Unicast []Unicast
// Preprocess is a set of messages (and their origins) for pre-processing.
// For each item in the Preprocess list, the caller must AddResult with a PreprocessResult.
// The source of the proposal is included in case the caller wishes to do more
// validation on proposals originating from other nodes than proposals originating from
// itself.
Preprocess []Proposal
// Digest is a set of batches which have been proposed by a node in the network.
// For each item in the Digest list, the caller must AddResult with a DigestResult.
// The resulting digest is used as an alias for the underlying data, and it should
// exhibit the properties of a strong hash function.
Digest []*Entry
// Validate is a set of batches which have been proposed by a node in the network.
// For each item in the Validate list, the caller must AddResult with a ValidateResult.
// The validation should check that every message in the batch is valid according to
// the application logic. Note, it is expected that all batches are valid, except
// under byzantine behavior from a node. An invalid batch will result in an epoch
// change and or state transfer.
Validate []*Entry
// Commit is a set of batches which have achieved final order and are ready to commit.
Commit []*Entry
// Checkpoint is a set of sequence numbers for which all previous sequence numbers in
// all buckets have been sent to Commit. It is the responsibility of the user to
// ensure that all commits up to and including (but not past) this sequence number
// have been applied. For each Checkpoint in the list, it is the responsbility of
// the caller to AddResult with a CheckpointResult.
Checkpoint []uint64
}
// Clear nils out all of the fields.
func (a *Actions) Clear() {
a.Broadcast = nil
a.Unicast = nil
a.Preprocess = nil
a.Digest = nil
a.Validate = nil
a.Commit = nil
a.Checkpoint = nil
}
// IsEmpty returns whether every field is zero in length.
func (a *Actions) IsEmpty() bool {
return len(a.Broadcast) == 0 &&
len(a.Unicast) == 0 &&
len(a.Preprocess) == 0 &&
len(a.Digest) == 0 &&
len(a.Validate) == 0 &&
len(a.Commit) == 0 &&
len(a.Checkpoint) == 0
}
// Append takes a set of actions and for each field, appends it to
// the corresponding field of itself.
func (a *Actions) Append(o *Actions) {
a.Broadcast = append(a.Broadcast, o.Broadcast...)
a.Unicast = append(a.Unicast, o.Unicast...)
a.Preprocess = append(a.Preprocess, o.Preprocess...)
a.Digest = append(a.Digest, o.Digest...)
a.Validate = append(a.Validate, o.Validate...)
a.Commit = append(a.Commit, o.Commit...)
a.Checkpoint = append(a.Checkpoint, o.Checkpoint...)
}
// Unicast is an action to send a message to a particular node.
type Unicast struct {
Target uint64
Msg *pb.Msg
}
// ActionResults should be populated by the caller as a result of
// executing the actions, then returned to the state machine.
type ActionResults struct {
Digests []DigestResult
Validations []ValidateResult
Preprocesses []PreprocessResult
Checkpoints []*CheckpointResult
}
// CheckpointResult gives the state machine a verifiable checkpoint for the network
// to return to, and allows it to prune previous entries from its state.
type CheckpointResult struct {
// SeqNo is the sequence number of this checkpoint.
SeqNo uint64
// Value is a concise representation of the state of the application when
// all entries less than or equal to (but not greater than) the sequence
// have been applied. Typically, this is a hash of the world state, usually
// computed from a Merkle tree, hash chain, or other structure exihibiting
// the properties of a strong hash function.
Value []byte
// Attestation is non-repudiable evidence that this node agrees with the checkpoint
// Value for this sequence number. Typically, this is a signature from a private key
// of a known public/private key pair, but other schemes are possible.
Attestation []byte
}
// Proposal is data which is proposed to be included in a batch and appended to the log.
type Proposal struct {
// Source is the node which originated the proposal.
Source uint64
// Data is the message of the proposal
Data []byte
}
// DigestResult gives the state machine a digest by which to refer to a particular entry.
// The digest will be sent in place of the entry's batch, during the Prepare and Commit
// phases and should generally be computed using a strong hashing function.
type DigestResult struct {
Entry *Entry
Digest []byte
}
// ValidateResult gives the state machine information about whether a
// particular entry should be considered valid. Note, that indicating an entry
// is not valid implies that the leader who proposed it is behaving in a
// byzantine way.
type ValidateResult struct {
Entry *Entry
Valid bool
}
// PreprocessResult gives the state machine a location which may be used
// to assign a proposal to a bucket for ordering.
type PreprocessResult struct {
// Cup is a 'smaller bucket', and should ideally be uniformly
// distributed across the uint64 space. The Cup is used to assign
// proposals to a particular bucket (which will service all requests
// assigned to this Cup as well as many others).
Cup uint64
// Proposal is the proposal which was processed into this Preprocess result.
Proposal Proposal
}
// Entry represents a log entry which may be
type Entry struct {
Epoch uint64
SeqNo uint64
BucketID uint64
Batch [][]byte
}
|
package LeetCode
import (
"fmt"
)
func Code264() {
num := nthUglyNumber(100)
fmt.Println(num)
}
/**
编写一个程序,找出第 n 个丑数。
丑数就是只包含质因数 2, 3, 5 的正整数。
示例:
输入: n = 10
输出: 12
解释: 1, 2, 3, 4, 5, 6, 8, 9, 10, 12 是前 10 个丑数。
说明:
1 是丑数。
n 不超过1690。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/ugly-number-ii
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
*/
/**
* 超时解法1 会超时..
*
**/
/*func nthUglyNumber(n int) int {
pool := make([]int, 0)
if n == 1 {
return 1
}
for i := 1; i < math.MaxInt64; i++ {
temp := i
if len(pool) == n {
break
}
for temp%2 == 0 {
temp = temp / 2
}
for temp%3 == 0 {
temp = temp / 3
}
for temp%5 == 0 {
temp = temp / 5
}
if temp == 1 {
pool = append(pool, i)
}
}
return pool[len(pool)-1]
}*/
func nthUglyNumber(n int) int {
if n == 1 {
return n
}
dp := make([]int, n)
dp[0] = 1
i, int2, int3, int5 := 1, 0, 0, 0
stack := &Stack{}
for i < n {
a := 2 * dp[int2]
b := 3 * dp[int3]
c := 5 * dp[int5]
dp[i] = minThree(2*dp[int2], 3*dp[int3], 5*dp[int5])
Stackpush(dp[i], stack)
if dp[i] == a {
int2++
}
if dp[i] == b {
int3++
}
if dp[i] == c {
int5++
}
i++
}
return Stackpop(stack).(int)
}
func minThree(a, b, c int) int {
temp := a
if a > b {
temp = b
if temp > c {
return c
} else {
return temp
}
} else {
if temp > c {
return c
} else {
return temp
}
}
}
|
// Copyright 2020 Dell Inc, or its subsidiaries.
// SPDX-License-Identifier: Apache-2.0
package service
import (
"encoding/json"
"flag"
"fmt"
"os"
"gitlab.eng.vmware.com/dell-iot/iotss-utils/util"
"gitlab.eng.vmware.com/dell-iot/iotss/go-skeleton-project/model"
"gitlab.eng.vmware.com/dell-iot/iotss/go-skeleton-project/shared"
fileutil "gitlab.eng.vmware.com/dell-iot/iotss/go-skeleton-project/util"
)
var appConfig model.AppConfig
var standardLogger = shared.GlobalLogger
const (
configPath = "asset/config/"
configFileExt = "_application.json"
)
// AddArgs adds command line args to shared global struct
func AddArgs() {
secret, stage := initFlag()
if *secret == "" || *stage == "" {
standardLogger.InvalidArg("startup")
os.Exit(-1)
}
shared.GlobalEnv.SaltSecret = *secret
shared.GlobalEnv.Environment = *stage
standardLogger.Info("args added")
}
// AddConfig adds admin password to shared global config
func AddConfig() {
byteValue, err := fileutil.ReadFile(getConfigFile())
if err != nil {
standardLogger.Error(fmt.Sprintf("unable to read config file, %v", err))
}
err = json.Unmarshal(byteValue, &appConfig)
if err != nil {
standardLogger.Error(fmt.Sprintf("unable to decode into struct, %v", err))
}
shared.GlobalConfig = appConfig
adminPass, err := util.DecodeAndDecrypt(shared.GlobalConfig.ServiceConfiguration.ServiceVCenterDetails.Password, shared.GlobalEnv.SaltSecret)
if err != nil {
standardLogger.Error(err.Error())
}
shared.GlobalConfig.ServiceConfiguration.ServiceVCenterDetails.Password = string(adminPass)
standardLogger.Info("config added")
}
func getConfigFile() (configFile string) {
return configPath + shared.GlobalEnv.Environment + configFileExt
}
func initFlag() (*string, *string) {
secret := flag.String("secret", "", "secret string")
stage := flag.String("stage", "", "stage string")
flag.Parse()
return secret, stage
}
|
/*
* Copyright 2022 Kube Admission Webhook Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package certificate
import (
"context"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo/reporters"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
)
var (
testEnv *envtest.Environment
cli client.Client
//TODO: Read it from flag or put true if we have a
// KUBECONFIG env var
useCluster = false
sideEffects = admissionregistrationv1.SideEffectClassNone
expectedNamespace = corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "foowebhook",
},
}
expectedService = corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foowebhook",
Name: "foowebhook-service",
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "https",
Port: 8443,
},
},
},
}
expectedMutatingWebhookConfiguration = admissionregistrationv1.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: "foowebhook",
},
Webhooks: []admissionregistrationv1.MutatingWebhook{
{
SideEffects: &sideEffects,
AdmissionReviewVersions: []string{"v1"},
Name: "foowebhook.qinqon.io",
ClientConfig: admissionregistrationv1.WebhookClientConfig{
Service: &admissionregistrationv1.ServiceReference{
Name: expectedService.Name,
Namespace: expectedService.Namespace,
},
},
},
},
}
expectedSecret = corev1.Secret{
ObjectMeta: expectedService.ObjectMeta,
}
expectedCASecret = corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: expectedNamespace.Name,
Name: expectedMutatingWebhookConfiguration.Name + "-ca",
},
}
)
func createResources() {
err := cli.Create(context.TODO(), expectedMutatingWebhookConfiguration.DeepCopy())
ExpectWithOffset(1, err).ToNot(HaveOccurred(), "should success creating mutatingwebhookconfiguration")
err = cli.Create(context.TODO(), expectedService.DeepCopy())
ExpectWithOffset(1, err).ToNot(HaveOccurred(), "should success creating service")
}
func deleteResources() {
_ = cli.Delete(context.TODO(), &expectedMutatingWebhookConfiguration)
_ = cli.Delete(context.TODO(), &expectedService)
_ = cli.Delete(context.TODO(), &expectedSecret)
_ = cli.Delete(context.TODO(), &expectedCASecret)
EventuallyWithOffset(1, func() error {
secretKey := types.NamespacedName{
Namespace: expectedCASecret.Namespace,
Name: expectedCASecret.Name,
}
return cli.Get(context.TODO(), secretKey, &corev1.Secret{})
}, 10*time.Second, 1*time.Second).ShouldNot(Succeed(), "should eventually fail getting deleted CA secret")
}
var _ = BeforeSuite(func() {
klog.InitFlags(nil)
testEnv = &envtest.Environment{
UseExistingCluster: &useCluster,
}
cfg, err := testEnv.Start()
Expect(err).ToNot(HaveOccurred(), "should success starting testenv")
cli, err = client.New(cfg, client.Options{})
Expect(err).ToNot(HaveOccurred(), "should success creating client")
// Ideally we create/delete the namespace at every test but, envtest
// cannot delete namespaces [1] so we just create it at the beginning
// of the test suite.
//
// [1] https://book.kubebuilder.io/reference/testing/envtest.html?highlight=envtest#testing-considerations
By("Create namespace, webhook configuration and service")
err = cli.Create(context.TODO(), &expectedNamespace)
Expect(err).ToNot(HaveOccurred(), "should success creating namespace")
})
var _ = AfterSuite(func() {
if useCluster {
err := cli.Delete(context.TODO(), &expectedNamespace)
Expect(err).ToNot(HaveOccurred(), "should success deleting namespace")
}
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred(), "should success stopping testenv")
})
func TestCertificate(t *testing.T) {
RegisterFailHandler(Fail)
junitReporter := reporters.NewJUnitReporter("junit.certificate_suite_test.xml")
RunSpecsWithDefaultAndCustomReporters(t, "Certificate Test Suite", []Reporter{junitReporter})
}
|
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"os"
)
var (
VERSION string
)
var RootCmd = &cobra.Command{
Use: "sweady",
Short: "First cluster docker swarm ready for production",
}
func Execute(version string) {
VERSION = version
if err := RootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(-1)
}
}
|
package main
//code: https://play.golang.org/p/Lmbyn7bO7e
import (
"context"
"fmt"
"runtime"
"time"
)
func main() {
ctx, cancel := context.WithCancel(context.Background())
fmt.Println("error check 1:", ctx.Err())
fmt.Println("num gortins 1:", runtime.NumGoroutine())
go func() {
n := 0
for {
select {
case <-ctx.Done():
return
default:
n++
time.Sleep(time.Millisecond * 200)
fmt.Println("working", n)
}
}
}()
time.Sleep(time.Second * 2)
fmt.Println("error check 2:", ctx.Err())
fmt.Println("num gortins 2:", runtime.NumGoroutine())
fmt.Println("about to cancel context")
cancel()
fmt.Println("cancelled context")
time.Sleep(time.Second * 2)
fmt.Println("error check 3:", ctx.Err())
fmt.Println("num gortins 3:", runtime.NumGoroutine())
}
//Context
//In Go servers, each incoming request is handled in its own goroutine. Request handlers often start additional goroutines to access backends such as databases and RPC services. The set of goroutines working on a request typically needs access to request-specific values such as the identity of the end user, authorization tokens, and the request's deadline. When a request is canceled or times out, all the goroutines working on that request should exit quickly so the system can reclaim any resources they are using. At Google, we developed a context package that makes it easy to pass request-scoped values, cancellation signals, and deadlines across API boundaries to all the goroutines involved in handling a request. The package is publicly available as context. This article describes how to use the package and provides a complete working example.
//further reading:
//https://blog.golang.org/context
//https://medium.com/@matryer/context-has-arrived-per-request-state-in-go-1-7-4d095be83bd8
//https://peter.bourgon.org/blog/2016/07/11/context.html
//code:
//exploring context
//background
//https://play.golang.org/p/cByXyrxXUf
//WithCancel
//throwing away CancelFunc
//https://play.golang.org/p/XOknf0aSpx
//using CancelFunc
//https://play.golang.org/p/UzQxxhn_fm
//Example
//https://play.golang.org/p/Lmbyn7bO7e
//func WithCancel(parent Context) (ctx Context, cancel CancelFunc)
//https://play.golang.org/p/wvGmvMzIMW
//cancelling goroutines with deadline
//func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc)
//https://play.golang.org/p/Q6mVdQqYTt
//with timeout
//func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc)
//https://play.golang.org/p/OuES9sP_yX
//with value
//func WithValue(parent Context, key, val interface{}) Context
//https://play.golang.org/p/8JDCGk1K4P
//video: 163
|
package whoislookup
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"github.com/PuerkitiBio/goquery"
"github.com/davecgh/go-spew/spew"
"io/ioutil"
"log"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"regexp"
"strings"
"sync"
"time"
)
// use for dumping in nicer format
// "github.com/davecgh/go-spew/spew"
// spew.Dump(<anything>)
const localWhois = 1
const whoisLookupSite = 2
const domainBigDataSite = 3
const markMonitorSite = 4
const youGetSignalSite = 5
const ipAddressOrgSite = 6
const robTexSite = 7
const domainPunchSite = 8
const whoisDomainSearch = 9
type Blocked struct {
Source int
Status bool
Count int
}
type Header struct {
Name string
Value string
}
type Query struct {
Key string
Value string
}
// used for get or post request
type Request struct {
Method string // Request Method
Url *url.URL // Url requried
Body []byte // Post request body
Headers []Header
Params []Query // Get params ( Post params are build at request time )
}
// domainbigdata response is in the format
// {"d": "whois information"}
type DomainBigDataMessage struct {
D string // keys need to be in capital when doing json mapping
}
// yougetsignal json response is in below format
type YouGetSignalMessage struct {
DomainAvailable string
Message string
RemoteAddress string
Status string
WhoisData string
}
// from domainpunch response we'll only take the RAW part
type DomainPunchMessage struct {
RAW string
}
// output structure
type Output struct {
Query string
Method int
Whois string
IP string
}
// global
// initial default block status & count
var blockers = make(map[int]Blocked)
func initBlockers() {
for _, source := range getAllSources() {
blockers[source] = Blocked{ Source: source, Status: false, Count: 0 }
}
}
func getHostIPAddress() (string, error) {
interfaces, err := net.Interfaces()
if err != nil {
return "", err
}
for _, iface := range interfaces {
if iface.Flags&net.FlagUp == 0 {
continue // interface down
}
if iface.Flags&net.FlagLoopback != 0 {
continue // loopback interface
}
addrs, err := iface.Addrs()
if err != nil {
return "", err
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip == nil || ip.IsLoopback() {
continue
}
ip = ip.To4()
if ip == nil {
continue // not an ipv4 address
}
return ip.String(), nil
}
}
return "", errors.New("are you connected to the network?")
}
func getAllSources() []int {
return []int{
localWhois,
whoisLookupSite,
domainBigDataSite,
markMonitorSite,
youGetSignalSite,
ipAddressOrgSite,
robTexSite,
domainPunchSite,
whoisDomainSearch,
}
}
func getSources(search string) []int {
sources := make([]int, 0)
if isIP(search) {
return append(sources,
localWhois,
domainBigDataSite,
markMonitorSite,
ipAddressOrgSite,
robTexSite,
whoisDomainSearch)
}
return append(sources,
localWhois,
whoisLookupSite,
domainBigDataSite,
markMonitorSite,
youGetSignalSite,
ipAddressOrgSite,
robTexSite,
domainPunchSite,
whoisDomainSearch)
}
func areMaximumSitesBlockingUs(sources []int) bool {
count := 0
for _, source := range sources {
if blockers[source].Status {
count++
}
}
if count >= int( ( float64(len(sources)/80) ) * 100 ) {
return true
}
return false
}
func isLocalWhoisBlockingUs() bool {
return isSourceBlockingUs(localWhois)
}
func isSourceBlockingUs(source int) bool {
return blockers[source].Status
}
func randomMethod(search string) int {
sources := getSources(search)
// Fisher–Yates shuffle
// shuffle without allocating any additional slices.
for i := range sources {
j := rand.Intn(i + 1)
sources[i], sources[j] = sources[j], sources[i]
}
source := sources[rand.Intn(len(sources))]
if isSourceBlockingUs(source) {
if areMaximumSitesBlockingUs(sources) {
if isLocalWhoisBlockingUs() {
// Give Up!
log.Fatal("All sources are blocking us!")
} else {
// exhaust local usage
return localWhois
}
} else {
randomMethod(search)
}
}
return source
}
func randomUserAgent() string {
userAgents := make([]string, 0)
userAgents = append(userAgents,
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.53 Safari/525.19",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US) AppleWebKit/533.4 (KHTML, like Gecko) Chrome/5.0.375.86 Safari/533.4",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML,like Gecko) Chrome/9.1.0.0 Safari/540.0",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Ubuntu/10.10 Chromium/8.0.552.237 Chrome/8.0.552.237 Safari/534.10",
"Opera/9.80 (X11; Linux i686; Ubuntu/14.10) Presto/2.12.388 Version/12.16",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; x64; fr; rv:1.9.2.13) Gecko/20101203 Firebird/3.6.13",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A",
"Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25",
"Mozilla/5.0 (Windows NT 5.2; RW; rv:7.0a1) Gecko/20091211 SeaMonkey/9.23a1pre",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1",
"Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0",
)
// Fisher–Yates shuffle
// shuffle without allocating any additional slices.
for i := range userAgents {
j := rand.Intn(i + 1)
userAgents[i], userAgents[j] = userAgents[j], userAgents[i]
}
return userAgents[rand.Intn(len(userAgents))]
}
// replaces the html tags present in the whois string
func replaceHTMLTags(whois string) string {
cleanedWhois := strings.TrimSpace(whois)
reg := regexp.MustCompile(`(?U)<.*>`)
return reg.ReplaceAllString(cleanedWhois, "\n")
}
// extra blank lines are replaced
func replaceBlankLines(whois string) string {
// in case of error, let's send back the original whois string
regex, err := regexp.Compile("\n\n")
if err != nil {
return whois
}
return regex.ReplaceAllString(whois, "\n")
}
func dumpResponse(response *http.Response) {
body, err := ioutil.ReadAll(response.Body)
if err != nil {
spew.Dump(err)
}
spew.Dump(string(body))
}
func bakeRequest(request Request) *http.Request {
req, err := http.NewRequest(request.Method, request.Url.String(), bytes.NewBuffer(request.Body))
if err != nil {
panic(err)
}
for _, header := range request.Headers {
req.Header.Set(header.Name, header.Value)
}
if len(request.Params) > 0 {
query := req.URL.Query()
for _, Query := range request.Params {
query.Add(Query.Key, Query.Value)
}
req.URL.RawQuery = query.Encode()
}
return req
}
// makes http get or post query
func query(request Request) *http.Response {
req := bakeRequest(request)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
return resp
}
// checks if the user whois lookup is IP
func isIP(search string) bool {
if net.ParseIP(search) != nil {
return true
}
return false
}
// currently all errors occurred while parsing the url
// string or response error or any other erros are
// replied back as no match found
func noMatchFound(search string) string {
return "No match for " + search + "."
}
// Method: 1
func RunLocalWhois(search string) string {
var out bytes.Buffer
// https://golang.org/src/os/exec/exec.go?s=4289:4334#L119
cmd := exec.Command("sh", "-c", "whois "+search)
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
return noMatchFound(search)
}
return base64.StdEncoding.EncodeToString(out.Bytes())
}
// Method: 2
func ScrapeFromWhoisLookupSite(search string) string {
uri := "http://www.whoislookup.com/whoislookupORIG.php?domain=" + search
doc, err := goquery.NewDocument(uri)
if err != nil {
return noMatchFound(search)
}
return base64.StdEncoding.EncodeToString([]byte(replaceBlankLines(replaceHTMLTags(doc.Find("table.cwhoisform > tbody > tr > td").Text()))))
}
// Method: 3
func ScrapeFromDomainBigDataSite(search string) string {
url, err := url.Parse("http://domainbigdata.com/" + search)
if err != nil {
return noMatchFound(search)
}
body := []byte("")
headers := []Header{
Header{Name: "Host", Value: "domainbigdata.com"},
Header{Name: "Proxy-Connection", Value: "keep-alive"},
Header{Name: "Pragma", Value: "no-cache"},
Header{Name: "Cache-Control", Value: "no-cache"},
Header{Name: "Origin", Value: "http://domainbigdata.com"},
Header{Name: "Referer", Value: "http://domainbigdata.com/"},
Header{Name: "User-Agent", Value: randomUserAgent()},
Header{Name: "Accept-Language", Value: "en-US,en;q=0.8"},
Header{Name: "Upgrade-Insecure-Requests", Value: "1"},
Header{Name: "Accept", Value: "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"},
}
resp := query(Request{Method: "GET", Url: url, Body: body, Headers: headers})
defer resp.Body.Close()
doc, err := goquery.NewDocumentFromResponse(resp)
if err != nil {
return noMatchFound(search)
}
html := ""
if isIP(search) {
html, err = doc.Find("p#plitwhoisip").Html()
} else {
html, err = doc.Find("#whois .pd5").Html() // for domain
}
if err != nil {
return noMatchFound(search)
}
// checking if there are any errors in response
if strings.EqualFold(html, "ERROR") || strings.Contains(html, "ERROR") {
html = "" // blank is detected as error!
}
// checking if the site has blocked us
if strings.Contains(html, "ip logged") {
html = ""
// site has blocked has, no more queries to this site
site := blockers[domainBigDataSite]
site.Status = true
}
return base64.StdEncoding.EncodeToString([]byte(replaceHTMLTags(html)))
}
// Method: 4
func ScrapeFromMarkMonitorSite(search string) string {
url, err := url.Parse("https://www.markmonitor.com/cgi-bin/affsearch.cgi")
if err != nil {
return noMatchFound(search)
}
body := []byte("")
headers := []Header{
Header{Name: "Host", Value: "www.markmonitor.com"},
Header{Name: "Connection", Value: "keep-alive"},
Header{Name: "Pragma", Value: "no-cache"},
Header{Name: "Cache-Control", Value: "no-cache"},
Header{Name: "Upgrade-Insecure-Requests", Value: "1"},
Header{Name: "Accept", Value: "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"},
Header{Name: "User-Agent", Value: randomUserAgent()},
Header{Name: "Accept-Language", Value: "en-US,en;q=0.8"},
}
params := []Query{
Query{Key: "q", Value: search},
Query{Key: "dn", Value: search},
Query{Key: "partner", Value: "yes"},
}
resp := query(Request{Method: "GET", Url: url, Body: body, Headers: headers, Params: params})
defer resp.Body.Close()
doc, err := goquery.NewDocumentFromResponse(resp)
if err != nil {
return noMatchFound(search)
}
html, err := doc.Find("pre").Html()
if err != nil {
return noMatchFound(search)
}
return base64.StdEncoding.EncodeToString([]byte(replaceBlankLines(replaceHTMLTags(html))))
}
// Method: 5
func ScrapeFromYouGetSignalSite(search string) string {
url, err := url.Parse("http://www.yougetsignal.com/tools/whois-lookup/php/get-whois-lookup-json-data.php")
if err != nil {
return noMatchFound(search)
}
body := []byte("remoteAddress=" + search + "&_=")
headers := []Header{
Header{Name: "Host", Value: "www.yougetsignal.com"},
Header{Name: "Connection", Value: "keep-alive"},
Header{Name: "Pragma", Value: "no-cache"},
Header{Name: "Cache-Control", Value: "no-cache"},
Header{Name: "Origin", Value: "http://www.yougetsignal.com"},
Header{Name: "User-Agent", Value: randomUserAgent()},
Header{Name: "Content-Type", Value: "application/x-www-form-urlencoded; charset=UTF-8"},
Header{Name: "Accept", Value: "text/javascript, text/html, application/xml, text/xml, */*"},
Header{Name: "X-Prototype-Version", Value: "1.6.0"},
Header{Name: "X-Requested-With", Value: "XMLHttpRequest"},
Header{Name: "Referer", Value: "http://www.yougetsignal.com/tools/whois-lookup/"},
Header{Name: "Accept-Language", Value: "en-US,en;q=0.8"},
}
resp := query(Request{Method: "POST", Url: url, Body: body, Headers: headers})
defer resp.Body.Close()
message := new(YouGetSignalMessage)
json.NewDecoder(resp.Body).Decode(message)
return base64.StdEncoding.EncodeToString([]byte(message.WhoisData))
}
// Method: 6
func ScrapeFromIPAddressOrgSite(search string) string {
url, err := url.Parse("http://www.ip-address.org/tracer/ip-whois.php")
if err != nil {
return noMatchFound(search)
}
body := []byte("query=" + search + "&Submit=IP Whois Lookup")
headers := []Header{
Header{Name: "Host", Value: "www.ip-address.org"},
Header{Name: "Connection", Value: "keep-alive"},
Header{Name: "Pragma", Value: "no-cache"},
Header{Name: "Cache-Control", Value: "no-cache"},
Header{Name: "Upgrade-Insecure-Requests", Value: "1"},
Header{Name: "Origin", Value: "http://www.ip-address.org"},
Header{Name: "User-Agent", Value: randomUserAgent()},
Header{Name: "Content-Type", Value: "application/x-www-form-urlencoded"},
Header{Name: "Accept", Value: "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"},
Header{Name: "Referer", Value: "http://www.ip-address.org/tracer/ip-whois.php"},
Header{Name: "Accept-Language", Value: "en-US,en;q=0.8"},
}
resp := query(Request{Method: "POST", Url: url, Body: body, Headers: headers})
defer resp.Body.Close()
doc, err := goquery.NewDocumentFromResponse(resp)
if err != nil {
return noMatchFound(search)
}
html, err := doc.Find("pre").Html()
if err != nil {
return noMatchFound(search)
}
return base64.StdEncoding.EncodeToString([]byte(replaceBlankLines(replaceHTMLTags(html))))
}
// Method: 7
func ScrapeFromRobTexSite(search string) string {
url, err := url.Parse(getRobTexSiteUrl(search))
if err != nil {
return noMatchFound(search)
}
body := []byte("")
headers := []Header{
Header{Name: "Pragma", Value: "no-cache"},
Header{Name: "Cache-Control", Value: "no-cache"},
Header{Name: "Upgrade-Insecure-Requests", Value: "1"},
Header{Name: "Accept", Value: "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"},
Header{Name: "User-Agent", Value: randomUserAgent()},
Header{Name: "Accept-Language", Value: "en-US,en;q=0.8"},
}
resp := query(Request{Method: "GET", Url: url, Body: body, Headers: headers})
defer resp.Body.Close()
doc, err := goquery.NewDocumentFromResponse(resp)
if err != nil {
return noMatchFound(search)
}
var whois bytes.Buffer
doc.Find("table.s").Each(func(i int, table *goquery.Selection) {
table.Find("tr").Each(func(i int, tr *goquery.Selection) {
key := tr.Find("td:first-child > b")
value := tr.Find("td:last-child")
if key.Text() != "" {
whois.WriteString(key.Text())
whois.WriteString(value.Text())
whois.WriteString("\n")
}
})
})
return base64.StdEncoding.EncodeToString(whois.Bytes())
}
func getRobTexSiteUrl(search string) string {
return "https://www.dnswhois.info/" + search
}
// Method: 8
// Website: Domainpunch
func ScrapeFromDomainPunch(search string) string {
url, err := url.Parse("https://domainpunch.com/whois/whois.php?tld=" + search)
if err != nil {
return noMatchFound(search)
}
body := []byte("")
headers := []Header{
Header{Name: "Host", Value: "domainpunch.com"},
Header{Name: "User-Agent", Value: randomUserAgent()},
Header{Name: "Accept", Value: "*/*"},
Header{Name: "Accept-Language", Value: "en-US,en;q=0.5"},
Header{Name: "Referer", Value: "https://domainpunch.com/whois/"},
Header{Name: "X-Requested-With", Value: "XMLHttpRequest"},
}
resp := query(Request{Method: "POST", Url: url, Body: body, Headers: headers})
defer resp.Body.Close()
message := new(DomainPunchMessage)
json.NewDecoder(resp.Body).Decode(message)
return base64.StdEncoding.EncodeToString([]byte(message.RAW))
}
func ScrapeFromWhoisDomainSearch(search string) string {
url, err := url.Parse("https://whoisds.com/whois-lookup/lookup?domain=" + search)
if err != nil {
return noMatchFound(search)
}
body := []byte("")
headers := []Header{
Header{Name: "Host", Value: "whoisds.com"},
Header{Name: "User-Agent", Value: randomUserAgent()},
Header{Name: "Accept", Value: "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"},
Header{Name: "Accept-Language", Value: "en-US,en;q=0.5"},
}
resp := query(Request{Method: "GET", Url: url, Body: body, Headers: headers})
defer resp.Body.Close()
doc, err := goquery.NewDocumentFromResponse(resp)
if err != nil {
return noMatchFound(search)
}
// $$('.container .row .row .col-md-12:last-child')
// getting raw html for applying the regex
html, err := doc.Find(".container .row .row .col-md-12:last-child").Html()
if err != nil {
return noMatchFound(search)
}
html = strings.TrimSpace(html)
// <h4 class="heading-primary">(.*)<\/h4>
// domainAvailableReg := regexp.MustCompile(`(?i)<h4 class="heading-primary">(.*)<\/h4>`)
// domainRegistered := false
// registrationReg := regexp.MustCompile(`(?i)already registered`)
// if len(domainAvailableReg.FindStringIndex(html)) > 0 {
// availabilityStr := domainAvailableReg.FindStringSubmatch(html)[1]
// if len(registrationReg.FindStringIndex(availabilityStr)) > 0 {
// domainRegistered = true
// }
// }
// <h4 class="heading-primary">.*<\/h4>\s+(.*)
whoisReg := regexp.MustCompile(`(?is)<h4 class="heading-primary">.*<\/h4>\s+(.*)\s+`)
whois := ""
if len(whoisReg.FindStringIndex(html)) > 0 {
whois = whoisReg.FindStringSubmatch(html)[1]
if whois != "" {
whois = replaceBlankLines(replaceHTMLTags(whois)) // clean it
}
}
return base64.StdEncoding.EncodeToString([]byte(whois))
}
// init will be called before the main function
// Its the right place to initialize the seed Value
func init() {
// note:
// Each time you set the same seed, you get the same sequence
// You have to set the seed only once
// you simply call Intn to get the next random integer
rand.Seed(time.Now().UTC().UnixNano())
}
func lookup(query string, method int) Output {
ip, err := getHostIPAddress()
if err != nil {
log.Fatal("Failed getting the host IP address.")
}
if method == 0 {
method = randomMethod(query)
}
var whois string
switch method {
case localWhois:
whois = RunLocalWhois(query)
case whoisLookupSite:
whois = ScrapeFromWhoisLookupSite(query)
case domainBigDataSite:
whois = ScrapeFromDomainBigDataSite(query)
case markMonitorSite:
whois = ScrapeFromMarkMonitorSite(query)
case youGetSignalSite:
whois = ScrapeFromYouGetSignalSite(query)
case ipAddressOrgSite:
whois = ScrapeFromIPAddressOrgSite(query)
case robTexSite:
whois = ScrapeFromRobTexSite(query)
case domainPunchSite:
whois = ScrapeFromDomainPunch(query)
case whoisDomainSearch:
whois = ScrapeFromWhoisDomainSearch(query)
default:
whois = RunLocalWhois(query)
}
return Output{Query: query, Method: method, Whois: whois, IP: ip}
}
func main() {
if 2 > len(os.Args) {
log.Fatal("Usage: whoislookup queries")
}
initBlockers()
queries := strings.Split(os.Args[1], "*")
throttle := 5
type empty struct{}
var wg sync.WaitGroup
var sem = make(chan empty, throttle)
outputs := make([]Output, 0)
for _, query := range queries {
wg.Add(1)
sem <- struct{}{}
go func(value string) {
defer wg.Done()
defer func() { <-sem }()
defer func() {
if r := recover(); r != nil {
fmt.Println("Recovered in defer", r)
}
}()
// lookup(value)
o := lookup(value, 0)
if o.Whois == "" {
o = lookup(value, 0)
}
if o.Whois == "" {
o = lookup(value, localWhois)
}
outputs = append(outputs, o)
}(query)
}
wg.Wait()
m, err := json.Marshal(outputs)
if err != nil {
fmt.Println("Error occurred!")
}
fmt.Println(string(m))
}
|
package portal
import (
"fmt"
"net/http"
"time"
"github.com/golang/glog"
"kope.io/auth/pkg/configreader"
"kope.io/auth/pkg/keystore"
"kope.io/auth/pkg/oauth"
"kope.io/auth/pkg/tokenstore"
)
type HTTPServer struct {
config *configreader.ManagedConfiguration
listen string
staticDir string
oauthServer *oauth.Server
tokenStore tokenstore.Interface
}
func NewHTTPServer(config *configreader.ManagedConfiguration, listen string, staticDir string, keyStore keystore.KeyStore, tokenStore tokenstore.Interface) (*HTTPServer, error) {
keyset, err := keyStore.KeySet("oauth")
if err != nil {
return nil, fmt.Errorf("error initializing keyset: %v", err)
}
oauthServer := &oauth.Server{
CookieName: "_auth_portal",
CookieExpiry: time.Duration(168) * time.Hour,
CookieRefresh: time.Duration(0),
Keyset: keyset,
Config: config,
// UserMapper set below
}
s := &HTTPServer{
config: config,
listen: listen,
staticDir: staticDir,
oauthServer: oauthServer,
tokenStore: tokenStore,
}
s.oauthServer.UserMapper = s.mapUser
return s, nil
}
func (s *HTTPServer) ListenAndServe() error {
mux := http.NewServeMux()
mux.HandleFunc("/oauth2/start", s.oauthStart)
mux.HandleFunc("/oauth2/callback", s.oauthCallback)
mux.HandleFunc("/oauth2/", func(w http.ResponseWriter, r *http.Request) {
http.NotFound(w, r)
})
mux.HandleFunc("/api/whoami", s.apiWhoAmI)
mux.HandleFunc("/api/tokens", s.apiTokens)
mux.HandleFunc("/api/kubeconfig", s.apiKubeconfig)
mux.HandleFunc("/api/", func(w http.ResponseWriter, r *http.Request) {
http.NotFound(w, r)
})
mux.HandleFunc("/portal/actions/login", s.portalActionLogin)
mux.HandleFunc("/portal/actions/logout", s.portalActionLogout)
mux.HandleFunc("/portal/actions/kubeconfig", s.portalActionKubeconfig)
mux.HandleFunc("/portal/", func(w http.ResponseWriter, r *http.Request) {
http.NotFound(w, r)
})
staticServer := http.FileServer(http.Dir(s.staticDir))
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path
glog.Infof("%s %s", r.Method, path)
if path == "/" {
s.portalIndex(w, r)
} else {
staticServer.ServeHTTP(w, r)
}
})
server := &http.Server{
Addr: s.listen,
Handler: mux,
}
return server.ListenAndServe()
}
|
package mocks
import (
"github.com/oreuta/easytrip/models"
)
type BankUAClientMock struct {
Body []byte
Unpacked []models.CurrencyBank
Err error
}
func (m BankUAClientMock) Get() (body []byte, err error) {
return m.Body, m.Err
}
func (m BankUAClientMock) GetCurrBank() (unpacked []models.CurrencyBank, err error) {
return m.Unpacked, m.Err
}
|
package cmd_test
import (
"bytes"
"os"
"path/filepath"
"testing"
"github.com/raba-jp/primus/pkg/cli/cmd"
)
func TestExecute(t *testing.T) {
tests := []struct {
name string
args []string
goldenFile string
}{
{
name: "no args",
args: []string{},
goldenFile: "execute_no_args.golden",
},
{
name: "set logLevel",
args: []string{"--logLevel=info"},
goldenFile: "execute_enable_debug.golden",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := new(bytes.Buffer)
rootCmd := cmd.Initialize()
rootCmd.SetOut(buf)
rootCmd.SetErr(buf)
rootCmd.SetArgs(tt.args)
if err := rootCmd.Execute(); err != nil {
t.Fatalf("%v", err)
}
wd, _ := os.Getwd()
path := filepath.Join(wd, "testdata", "golden", tt.goldenFile)
goldenTest(t, path, buf.String())
})
}
}
|
package model
import (
"encoding/json"
"fmt"
"time"
"mdstest/helper"
"golang.org/x/crypto/bcrypt"
"github.com/jinzhu/gorm"
"github.com/pkg/errors"
)
const UserStatusActive = "A"
const UserStatusInactive = "I"
const UserStatusDeleted = "D"
var UserStatusMap = map[string]string {
"A" : "Active",
"I" : "Inactive",
"D" : "Deleted",
}
type User struct {
UserId string `json:"user_id" gorm:"primary_key"`
UserName string `json:"user_name"`
UserPassword string `json:"user_password"`
UserStatus string `json:"user_status"`
LastUpdated time.Time `json:"last_updated"`
UserSettings []*UserSetting `json:"user_setting" gorm:"ForeignKey:UserId;AssociationForeignKey:UserId"`
}
//MarshalJSON is a custom JSON marshaller for the user struct (satisfies interface json.Marshaler)
func (u *User) MarshalJSON() ([]byte, error) {
type Alias User
var lastUpdated string
if u.LastUpdated.IsZero()== false {
lastUpdated = u.LastUpdated.Format(helper.DatetimeFormat)
}
return json.Marshal(&struct{
LastUpdated string `json:"lastUpdated"`
*Alias
}{
//special treatment for fields containing datetime value (outputs to mysql datetime format/RFC3339). Do note that this limits time precision to seconds
LastUpdated: lastUpdated,
Alias : (*Alias)(u),
})
}
//UnmarshalJSON is a custom JSON unmarshaller for the user struct (satisfies interface json.Unmarshaler)
func (u *User) UnmarshalJSON(data []byte) error {
type Alias User
user := &struct{
LastUpdated string `json:"lastUpdated"`
*Alias
}{
Alias: (*Alias)(u),
}
var err error
if err = json.Unmarshal(data, &user); err != nil {
return err
}
//special treatment for fields containing datetime (parse from a mysql datetime format/RFC3339 to time.Time)
var lastUpdated time.Time
if user.LastUpdated != "" {
lastUpdated, err = time.Parse(helper.DatetimeFormat, user.LastUpdated)
if err != nil {
return err
}
}
u.LastUpdated = lastUpdated.In(helper.DefaultLocation)
return nil
}
//validateStatus performs validation on field UserStatus
func (u *User) ValidateStatus() error {
if _, ok := UserStatusMap[u.UserStatus]; ok == false {
return ValidationError{
ErrorField: "UserStatus",
ErrorMsg: fmt.Sprintf("Invalid status value: %v", u.UserStatus),
}
}
return nil
}
//validateAdd performs validation on the model for new user case
func (u *User) ValidateAdd(db *gorm.DB) error {
//check if user with same id exists
var count int
db.Table("users").Where("user_id = ?", u.UserId).Count(&count)
if db.Error != nil {
return errors.Wrap(db.Error, "Query error")
}
if count >= 1 {
return ValidationError{
ErrorField: "UserId",
ErrorMsg: "User already exists",
}
}
return u.ValidateStatus()
}
//validateEdit performs validation on the model for edit user case
func (u *User) ValidateEdit(db *gorm.DB) error {
//check if user with same id exists
var count int
db.Table("users").Where("user_id = ?", u.UserId).Count(&count)
if db.Error != nil {
return errors.Wrap(db.Error, "Query error")
}
if count == 0 {
return ValidationError{
ErrorField: "UserId",
ErrorMsg: "User doesn't exist",
}
}
return u.ValidateStatus()
}
//SetPassword sets bcrypt hash for field password (default cost = 10)
func (u *User) SetPassword(plaintext string) error {
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(plaintext), bcrypt.DefaultCost)
if err != nil {
return errors.Wrap(err, "Error generating password hash")
}
u.UserPassword = string(hashedPassword)
return nil
}
func (*User) TableName() string {
return "users"
} |
package main
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/gorilla/mux"
"github.com/stretchr/testify/assert"
)
func Test_getBooks(t *testing.T) {
ww := httptest.NewRecorder()
type args struct {
w http.ResponseWriter
r *http.Request
}
tests := []struct {
name string
args args
want int
}{
{
name: "test 1",
args: args{
w: ww,
r: httptest.NewRequest("GET", "/api/books", nil),
},
want: 200,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
getBooks(tt.args.w, tt.args.r)
assert.Equal(t, tt.want, ww.Code)
})
}
}
func Test_getBook(t *testing.T) {
books = append(books, Book{ID: "1", Isbn: "meh", Title: "B1", Author: &Author{Firstname: "John", Lastname: "Doe"}})
req := httptest.NewRequest("GET", "/api/book/id", nil)
req = mux.SetURLVars(req, map[string]string{"id": "1"})
type args struct {
w httptest.ResponseRecorder
r *http.Request
}
tests := []struct {
name string
args args
want int
}{
{
name: "test 1",
args: args{
w: *httptest.NewRecorder(),
r: httptest.NewRequest("GET", "/api/book/bad", nil),
},
want: http.StatusNotFound,
},
{
name: "test 1",
args: args{
w: *httptest.NewRecorder(),
r: req,
},
want: http.StatusOK,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
getBook(&tt.args.w, tt.args.r)
assert.Equal(t, tt.want, tt.args.w.Code)
})
}
}
func Test_deleteBook(t *testing.T) {
type args struct {
w http.ResponseWriter
r *http.Request
}
tests := []struct {
name string
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
deleteBook(tt.args.w, tt.args.r)
})
}
}
func Test_editBook(t *testing.T) {
type args struct {
w http.ResponseWriter
r *http.Request
}
tests := []struct {
name string
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
editBook(tt.args.w, tt.args.r)
})
}
}
func Test_createBook(t *testing.T) {
type args struct {
w http.ResponseWriter
r *http.Request
}
tests := []struct {
name string
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
createBook(tt.args.w, tt.args.r)
})
}
}
func Test_main(t *testing.T) {
type args struct {
r mux.Router
}
tests := []struct {
name string
args args
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
main()
})
}
}
|
package cache
import (
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/ayoisaiah/stellar-photos-server/config"
"github.com/ayoisaiah/stellar-photos-server/unsplash"
"github.com/ayoisaiah/stellar-photos-server/utils"
)
const stellarPhotosCollectionID = 998309
const (
standardRes = 2000
highRes = 4000
)
func getCollection() (unsplash.Collection, error) {
unsplashAccessKey := config.Conf.Unsplash.AccessKey
url := fmt.Sprintf(
"%s/collections/%d?client_id=%s",
unsplash.APIBaseURL,
stellarPhotosCollectionID,
unsplashAccessKey,
)
var c unsplash.Collection
_, err := utils.SendGETRequest(url, &c)
if err != nil {
return c, err
}
return c, nil
}
func retrieveAllPhotos() (map[string]unsplash.Photo, error) {
collection, err := getCollection()
if err != nil {
return nil, err
}
unsplashAccessKey := config.Conf.Unsplash.AccessKey
var allPhotos = make([]unsplash.Photo, collection.TotalPhotos)
page, perPage := 1, 30
for {
var photos []unsplash.Photo
url := fmt.Sprintf(
"%s/collections/%d/photos?page=%d&per_page=%d&client_id=%s",
unsplash.APIBaseURL,
stellarPhotosCollectionID,
page,
perPage,
unsplashAccessKey,
)
_, err := utils.SendGETRequest(url, &photos)
if err != nil {
return nil, err
}
allPhotos = append(allPhotos, photos...)
page++
if len(photos) == 0 {
break
}
}
var m = make(map[string]unsplash.Photo)
for i := range allPhotos {
v := allPhotos[i]
m[v.ID] = v
}
return m, nil
}
func downloadPhotos(photos map[string]unsplash.Photo) []error {
var errs []error
for k := range photos {
v := photos[k]
err := os.MkdirAll(filepath.Join("cached_images", k), os.ModePerm)
if err != nil {
errs = append(errs, err)
continue
}
widths := []int{standardRes}
if v.Width >= highRes {
widths = append(widths, highRes, v.Width)
} else {
widths = append(widths, v.Width)
}
for _, width := range widths {
imageURL := fmt.Sprintf("%s&w=%d", v.Urls.Raw, width)
fileName := fmt.Sprintf("%d.txt", width)
filePath := filepath.Join("cached_images", k, fileName)
if _, err = os.Stat(filePath); err == nil ||
errors.Is(err, os.ErrExist) {
continue
}
var base64 string
base64, err = utils.GetImageBase64(imageURL, fileName, k)
if err != nil {
errs = append(errs, err)
continue
}
err = os.WriteFile(filePath, []byte(base64), os.ModePerm)
if err != nil {
errs = append(errs, err)
continue
}
}
fileName := k + ".json"
filePath := filepath.Join("cached_images", k, fileName)
if _, err = os.Stat(filePath); err == nil ||
errors.Is(err, os.ErrExist) {
continue
}
b, err := json.Marshal(v)
if err != nil {
errs = append(errs, err)
continue
}
err = os.WriteFile(filePath, b, os.ModePerm)
if err != nil {
errs = append(errs, err)
continue
}
}
return errs
}
func cleanup(photos map[string]unsplash.Photo) {
files, err := os.ReadDir("cached_images")
if err != nil {
utils.Logger().Errorw("Unable to read cached_images directory",
"tag", "read_cached_images_dir_failure",
"error", err,
)
return
}
cleaned := make(map[string]bool)
for _, f := range files {
fileName := f.Name()
id := strings.Split(fileName[:len(fileName)-len(filepath.Ext(fileName))], "_")[0]
if _, ok := cleaned[id]; ok {
continue
}
if _, ok := photos[id]; !ok {
err := os.RemoveAll(filepath.Join("cached_images", id))
if err != nil {
utils.Logger().
Warnw("Unable to clean deleted photo from cached_images directory",
"tag", "cache_clean_failure",
"image_id", id,
"error", err,
)
continue
}
cleaned[id] = true
utils.Logger().
Infow("Photo cleaned from cached_images directory successfully",
"image_id", id,
)
}
}
}
// Photos caches all Unsplash images in the default collection locally.
// It also cleans up images that were deleted from the collection.
func Photos() {
l := utils.Logger()
l.Infow("Pre-caching all images in default collection",
"tag", "pre_caching_start",
)
photos, err := retrieveAllPhotos()
if err != nil {
l.Errorw("Unable to retrieve all images in default collection",
"tag", "retrieve_all_photos_failure",
"error", err,
)
return
}
errs := downloadPhotos(photos)
if len(errs) != 0 {
l.Errorw("Some downloads failed to complete",
"tag", "download_photos_cache_failure",
"error", errs,
)
return
}
cleanup(photos)
l.Infow("Cached images updated successfully!",
"tag", "pre_caching_end",
)
}
|
package describe
import (
"context"
"encoding/json"
"errors"
"fmt"
"github.com/spf13/cobra"
kcmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/templates"
"github.com/openshift/oc-mirror/pkg/bundle"
"github.com/openshift/oc-mirror/pkg/cli"
)
type DescribeOptions struct {
*cli.RootOptions
From string
}
func NewDescribeCommand(f kcmdutil.Factory, ro *cli.RootOptions) *cobra.Command {
o := DescribeOptions{}
o.RootOptions = ro
cmd := &cobra.Command{
Use: "describe <archive path>",
Short: "Pretty print the contents of mirror metadata",
Example: templates.Examples(`
# Output the contents of 'mirror_seq1_00000.tar'
oc-mirror describe mirror_seq1_00000.tar
`),
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
kcmdutil.CheckErr(o.Complete(args))
kcmdutil.CheckErr(o.Validate())
kcmdutil.CheckErr(o.Run(cmd.Context()))
},
}
o.BindFlags(cmd.PersistentFlags())
return cmd
}
func (o *DescribeOptions) Complete(args []string) error {
if len(args) == 1 {
o.From = args[0]
}
return nil
}
func (o *DescribeOptions) Validate() error {
if len(o.From) == 0 {
return errors.New("must specify path to imageset archive")
}
return nil
}
func (o *DescribeOptions) Run(ctx context.Context) error {
meta, err := bundle.ReadMetadataFromFile(ctx, o.From)
if err != nil {
return fmt.Errorf("error retrieving metadata from %q: %v", o.From, err)
}
// Process metadata for output
data, err := json.MarshalIndent(&meta, "", " ")
if err != nil {
return err
}
fmt.Fprintln(o.IOStreams.Out, string(data))
return nil
}
|
package main
import (
"fmt"
// "time"
"strconv"
"remoteCacheToGo/cacheClient"
)
func main() {
fmt.Println("Client test")
errorStream := make(chan error)
// creates new cacheClient struct and connects to remoteCache instance
// no tls encryption -> param3: false
client := cacheClient.New()
go client.ConnectToCache("127.0.0.1", 8000, "", "", errorStream)
go concurrentWriteTest(client, errorStream)
// starting testing functions
// go subscriptionTest(client)
go concurrentGetTest(client)
if err := <- errorStream; err != nil {
fmt.Println(err)
return
}
}
func subscriptionTest(client cacheClient.RemoteCache) {
sCh := client.Subscribe()
for {
select {
case res := <-sCh:
fmt.Println(res.Key + ": " + string(res.Data))
}
}
}
func concurrentWriteTest(client cacheClient.RemoteCache, errorStream chan error) {
i := 0
for {
i++
if err := client.AddValByKey("remote"+strconv.Itoa(i), []byte("remote"+strconv.Itoa(i))); err != nil {
errorStream <- err
return
}
// time.Sleep(1 * time.Millisecond)
}
}
func concurrentGetTest(client cacheClient.RemoteCache) {
i := 0
for {
i++
res, err := client.GetValByKey("remote"+strconv.Itoa(i))
if err != nil {
fmt.Println(err)
break
}
fmt.Println("remote"+strconv.Itoa(i) + ": " + string(res))
// time.Sleep(1 * time.Millisecond)
}
}
|
/*******************************************************************************
* Copyright 2017 Samsung Electronics All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*******************************************************************************/
// Package api/group provides functionality to handle request related to group.
package group
import (
"api/common"
"commons/errors"
"commons/logger"
"commons/results"
URL "commons/url"
"manager/group"
"net/http"
"strings"
)
const (
GET string = "GET"
PUT string = "PUT"
POST string = "POST"
DELETE string = "DELETE"
)
type _SDAMGroupApisHandler struct{}
type _SDAMGroupApis struct{}
var sdamH _SDAMGroupApisHandler
var sdam _SDAMGroupApis
var sdamGroupController group.GroupInterface
func init() {
SdamGroupHandle = sdamH
SdamGroup = sdam
sdamGroupController = group.GroupController{}
}
// Handle calls a proper function according to the url and method received from remote device.
func (sdamH _SDAMGroupApisHandler) Handle(w http.ResponseWriter, req *http.Request) {
url := strings.Replace(req.URL.Path, URL.Base()+URL.Groups(), "", -1)
split := strings.Split(url, "/")
switch len(split) {
case 1:
if req.Method == GET {
SdamGroup.groups(w, req)
} else {
common.WriteError(w, errors.InvalidMethod{req.Method})
}
case 2:
if "/"+split[1] == URL.Create() {
if req.Method == POST {
SdamGroup.createGroup(w, req)
} else {
common.WriteError(w, errors.InvalidMethod{req.Method})
}
} else {
if req.Method == GET || req.Method == DELETE {
groupID := split[1]
SdamGroup.group(w, req, groupID)
} else {
common.WriteError(w, errors.InvalidMethod{req.Method})
}
}
case 3:
groupID := split[1]
switch {
case "/"+split[2] == URL.Deploy():
if req.Method == POST {
SdamGroup.groupDeployApp(w, req, groupID)
} else {
common.WriteError(w, errors.InvalidMethod{req.Method})
}
case "/"+split[2] == URL.Join():
if req.Method == POST {
SdamGroup.groupJoin(w, req, groupID)
} else {
common.WriteError(w, errors.InvalidMethod{req.Method})
}
case "/"+split[2] == URL.Leave():
if req.Method == POST {
SdamGroup.groupLeave(w, req, groupID)
} else {
common.WriteError(w, errors.InvalidMethod{req.Method})
}
case "/"+split[2] == URL.Apps():
if req.Method == GET {
SdamGroup.groupInfoApps(w, req, groupID)
} else {
common.WriteError(w, errors.InvalidMethod{req.Method})
}
default:
common.WriteError(w, errors.NotFoundURL{})
}
case 4:
if "/"+split[2] == URL.Apps() {
groupID, appID := split[1], split[3]
switch req.Method {
case GET:
SdamGroup.groupInfoApp(w, req, groupID, appID)
case POST:
SdamGroup.groupUpdateAppInfo(w, req, groupID, appID)
case DELETE:
SdamGroup.groupDeleteApp(w, req, groupID, appID)
default:
common.WriteError(w, errors.InvalidMethod{req.Method})
}
} else {
common.WriteError(w, errors.NotFoundURL{})
}
case 5:
if "/"+split[2] == URL.Apps() {
groupID, appID := split[1], split[3]
switch {
case "/"+split[4] == URL.Start() && req.Method == POST:
SdamGroup.groupStartApp(w, req, groupID, appID)
case "/"+split[4] == URL.Stop() && req.Method == POST:
SdamGroup.groupStopApp(w, req, groupID, appID)
case "/"+split[4] == URL.Update() && req.Method == POST:
SdamGroup.groupUpdateApp(w, req, groupID, appID)
default:
common.WriteError(w, errors.InvalidMethod{req.Method})
}
} else {
common.WriteError(w, errors.NotFoundURL{})
}
}
}
// createGroup handles requests which is used to create new group.
//
// paths: '/api/v1/groups/create'
// method: GET
// responses: if successful, 200 status code will be returned.
func (Groupasdam _SDAMGroupApis) createGroup(w http.ResponseWriter, req *http.Request) {
logger.Logging(logger.DEBUG, "[GROUP] Create SDA Group")
result, resp, err := sdamGroupController.CreateGroup()
common.MakeResponse(w, result, common.ChangeToJson(resp), err)
}
// group handles requests which is used to get information of group identified by the given groupID.
//
// paths: '/api/v1/groups/{groupID}'
// method: GET
// responses: if successful, 200 status code will be returned.
func (Groupasdam _SDAMGroupApis) group(w http.ResponseWriter, req *http.Request, groupID string) {
var result int
var resp map[string]interface{}
var err error
switch req.Method {
case GET:
logger.Logging(logger.DEBUG, "[GROUP] Get SDA Group")
result, resp, err = sdamGroupController.GetGroup(groupID)
case DELETE:
logger.Logging(logger.DEBUG, "[GROUP] Delete SDA Group")
result, resp, err = sdamGroupController.DeleteGroup(groupID)
}
common.MakeResponse(w, result, common.ChangeToJson(resp), err)
}
// groups handles requests which is used to get information of all groups created.
//
// paths: '/api/v1/groups'
// method: GET
// responses: if successful, 200 status code will be returned.
func (Groupasdam _SDAMGroupApis) groups(w http.ResponseWriter, req *http.Request) {
logger.Logging(logger.DEBUG, "[GROUP] Get All SDA Groups")
result, resp, err := sdamGroupController.GetGroups()
common.MakeResponse(w, result, common.ChangeToJson(resp), err)
}
// groupJoin handles requests which is used to add an agent to a list of group members
// identified by the given groupID.
//
// paths: '/api/v1/groups/{groupID}/join'
// method: POST
// responses: if successful, 200 status code will be returned.
func (Groupasdam _SDAMGroupApis) groupJoin(w http.ResponseWriter, req *http.Request, groupID string) {
logger.Logging(logger.DEBUG, "[GROUP] Join SDA Group")
body, err := common.GetBodyFromReq(req)
if err != nil {
common.MakeResponse(w, results.ERROR, nil, err)
return
}
result, resp, err := sdamGroupController.JoinGroup(groupID, body)
common.MakeResponse(w, result, common.ChangeToJson(resp), err)
}
// groupLeave handles requests which is used to delete an agent from a list of group members
// identified by the given groupID.
//
// paths: '/api/v1/groups/{groupID}/leave'
// method: POST
// responses: if successful, 200 status code will be returned.
func (Groupasdam _SDAMGroupApis) groupLeave(w http.ResponseWriter, req *http.Request, groupID string) {
logger.Logging(logger.DEBUG, "[GROUP] Leave SDA Group")
body, err := common.GetBodyFromReq(req)
if err != nil {
common.MakeResponse(w, results.ERROR, nil, err)
return
}
result, resp, err := sdamGroupController.LeaveGroup(groupID, body)
common.MakeResponse(w, result, common.ChangeToJson(resp), err)
}
// groupDeployApp handles requests which is used to deploy new application to group
// identified by the given groupID.
//
// paths: '/api/v1/groups/{groupID}/apps/deploy'
// method: POST
// responses: if successful, 200 status code will be returned.
func (Groupasdam _SDAMGroupApis) groupDeployApp(w http.ResponseWriter, req *http.Request, groupID string) {
logger.Logging(logger.DEBUG, "[GROUP] Deploy App")
body, err := common.GetBodyFromReq(req)
if err != nil {
common.MakeResponse(w, results.ERROR, nil, err)
return
}
result, resp, err := sdamGroupController.DeployApp(groupID, body)
common.MakeResponse(w, result, common.ChangeToJson(resp), err)
}
// groupInfoApps handles requests which is used to get information of all applications
// installed on group identified by the given groupID.
//
// paths: '/api/v1/groups/{groupID}/apps'
// method: GET
// responses: if successful, 200 status code will be returned.
func (Groupasdam _SDAMGroupApis) groupInfoApps(w http.ResponseWriter, req *http.Request, groupID string) {
logger.Logging(logger.DEBUG, "[GROUP] Get Info Apps")
result, resp, err := sdamGroupController.GetApps(groupID)
common.MakeResponse(w, result, common.ChangeToJson(resp), err)
}
// groupInfoApp handles requests which is used to get information of application
// identified by the given appID.
//
// paths: '/api/v1/groups/{groupID}/apps/{appID}'
// method: GET
// responses: if successful, 200 status code will be returned.
func (Groupasdam _SDAMGroupApis) groupInfoApp(w http.ResponseWriter, req *http.Request, groupID string, appID string) {
logger.Logging(logger.DEBUG, "[GROUP] Get Info App")
result, resp, err := sdamGroupController.GetApp(groupID, appID)
common.MakeResponse(w, result, common.ChangeToJson(resp), err)
}
// groupUpdateAppInfo handles requests related to updating application installed on group
// with given yaml in body.
//
// paths: '/api/v1/groups/{groupID}/apps/{appID}'
// method: POST
// responses: if successful, 200 status code will be returned.
func (Groupasdam _SDAMGroupApis) groupUpdateAppInfo(w http.ResponseWriter, req *http.Request, groupID string, appID string) {
logger.Logging(logger.DEBUG, "[GROUP] Update App Info")
body, err := common.GetBodyFromReq(req)
if err != nil {
common.MakeResponse(w, results.ERROR, nil, err)
return
}
result, resp, err := sdamGroupController.UpdateAppInfo(groupID, appID, body)
common.MakeResponse(w, result, common.ChangeToJson(resp), err)
}
// groupDeleteApp handles requests related to delete application installed on group
// identified by the given groupID.
//
// paths: '/api/v1/groups/{groupID}/apps/{appID}'
// method: DELETE
// responses: if successful, 200 status code will be returned.
func (Groupasdam _SDAMGroupApis) groupDeleteApp(w http.ResponseWriter, req *http.Request, groupID string, appID string) {
logger.Logging(logger.DEBUG, "[GROUP] Delete App")
result, resp, err := sdamGroupController.DeleteApp(groupID, appID)
common.MakeResponse(w, result, common.ChangeToJson(resp), err)
}
// groupStartApp handles requests related to start application installed on group
// identified by the given groupID.
//
// paths: '/api/v1/groups/{groupID}/apps/{appID}/start'
// method: POST
// responses: if successful, 200 status code will be returned.
func (Groupasdam _SDAMGroupApis) groupStartApp(w http.ResponseWriter, req *http.Request, groupID string, appID string) {
logger.Logging(logger.DEBUG, "[GROUP] Start App")
result, resp, err := sdamGroupController.StartApp(groupID, appID)
common.MakeResponse(w, result, common.ChangeToJson(resp), err)
}
// groupStopApp handles requests related to stop application installed on group
// identified by the given groupID.
//
// paths: '/api/v1/groups/{groupID}/apps/{appID}/stop'
// method: POST
// responses: if successful, 200 status code will be returned.
func (Groupasdam _SDAMGroupApis) groupStopApp(w http.ResponseWriter, req *http.Request, groupID string, appID string) {
logger.Logging(logger.DEBUG, "[GROUP] Stop App")
result, resp, err := sdamGroupController.StopApp(groupID, appID)
common.MakeResponse(w, result, common.ChangeToJson(resp), err)
}
// groupUpdateApp handles requests related to updating application installed on group
// identified by the given groupID.
//
// paths: '/api/v1/groups/{groupID}/apps/{appID}/update'
// method: POST
// responses: if successful, 200 status code will be returned.
func (Groupasdam _SDAMGroupApis) groupUpdateApp(w http.ResponseWriter, req *http.Request, groupID string, appID string) {
logger.Logging(logger.DEBUG, "[GROUP] Update App")
result, resp, err := sdamGroupController.UpdateApp(groupID, appID)
common.MakeResponse(w, result, common.ChangeToJson(resp), err)
}
|
package guard
import (
"context"
"github.com/adamluzsi/frameless/internal/consttypes"
)
type Locker interface {
// Lock locks the Locker resource.
// If the lock is already in use, the calling will be blocked until the locks is available.
// It returns a context that represent a locked context, and an error that is nil if locking succeeded.
// The returned context may hold locking related runtime states,
// It might signal cancellation if the ownership of the lock is lost for some reason.
Lock(ctx context.Context) (_lockCtx context.Context, _ error)
// Unlock unlocks the Locker resource.
// It is an error if Locker is not locked on entry to Unlock.
//
// It takes the context that Lock returned.
Unlock(lockCtx context.Context) error
}
const ErrNoLock consttypes.Error = "ErrNoLock"
type LockerFactory[Key comparable] interface {
LockerFor(Key) Locker
}
type FactoryFunc[Key comparable] func(Key) Locker
func (fn FactoryFunc[Key]) LockerFor(key Key) Locker { return fn(key) }
|
package main
import (
"bufio"
"context"
"encoding/json"
"flag"
"fmt"
"html/template"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path"
"strings"
"sync"
"time"
"github.com/google/uuid"
log "github.com/sirupsen/logrus"
)
type query struct {
Code string `json:"code"`
Args map[string]interface{} `json:"args"`
Gen *int `json:"gen,omitempty"`
Readonly bool `json:"readonly,omitempty"`
}
func (q *query) String() string {
return fmt.Sprintf("%#v (%#v)", q.Code, q.Args)
}
type queryResult struct {
Object interface{} `json:"object"`
Error interface{} `json:"error,omitempty"`
Warm bool `json:"warm"`
WallTime uint64 `json:"walltime"`
Gen int `json:"gen"`
Parent int `json:"parent"`
}
type transac struct {
Query query `json:"query"`
Result queryResult `json:"result"`
}
type evaler struct {
db string
in io.Writer
out io.Reader
cmd *exec.Cmd
sync.Mutex
}
var evalers = []*evaler{}
var elock = sync.RWMutex{}
var userDBsPath string
func newEvaler(db string) (*evaler, error) {
evalerName, err := dbEvaler(db)
if err != nil {
return nil, err
}
cmd := exec.Command("./"+evalerName, "-d", path.Join(userDBsPath, db), "-s")
cmd.Stderr = os.Stderr
stdin, err := cmd.StdinPipe()
if err != nil {
return nil, err
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
err = cmd.Start()
if err != nil {
return nil, err
}
return &evaler{
Mutex: sync.Mutex{},
db: db,
cmd: cmd,
in: stdin,
out: stdout,
}, nil
}
func acquireEvaler(db string) (*evaler, bool, error) {
elock.RLock()
for _, e := range evalers {
if e.db == db {
elock.RUnlock()
e.Lock()
return e, false, nil
}
}
elock.RUnlock()
e, err := newEvaler(db)
if err != nil {
return nil, false, err
}
elock.Lock()
evalers = append(evalers, e)
elock.Unlock()
e.Lock()
return e, true, nil
}
func (e *evaler) release() {
e.Unlock()
}
func (e *evaler) timeout() {
e.cmd.Process.Kill()
e.cmd.Wait()
elock.Lock()
defer elock.Unlock()
for i, ev := range evalers {
if ev.db == e.db {
evalers[i] = evalers[len(evalers)-1]
evalers = evalers[:len(evalers)-1]
return
}
}
panic("oh nos")
}
const MaxRequestSize = 4096
func parseQuery(w http.ResponseWriter, r *http.Request) (*query, bool) {
ct := r.Header.Get("Content-Type")
if strings.Contains(ct, "application/json") {
buff, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return nil, false
}
var q query
err = json.Unmarshal(buff, &q)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return nil, false
}
if q.Args == nil {
q.Args = map[string]interface{}{}
}
return &q, true
}
http.Error(w, "json only", http.StatusBadRequest)
return nil, false
}
func memgraph(w http.ResponseWriter, r *http.Request) {
var renderer *exec.Cmd
if r.URL.Query().Get("render") == "neato" {
renderer = exec.Command("neato", "-Tsvg")
} else {
renderer = exec.Command("dot", "-Tsvg")
}
db := r.URL.Query().Get("db")
if db == "" {
return
}
grapher := exec.Command("./memgraph", "-d", path.Join(userDBsPath, db))
if r.URL.Query().Get("labels") != "" {
grapher.Args = append(grapher.Args, "-l")
}
if r.URL.Query().Get("segments") != "" {
grapher.Args = append(grapher.Args, "-s")
}
renderer.Stdin, _ = grapher.StdoutPipe()
renderer.Stdout = w
w.Header().Set("content-type", "image/svg+xml")
err := grapher.Start()
if err != nil {
log.WithField("db", db).WithError(err).Println("cant start grapher")
w.WriteHeader(500)
return
}
err = renderer.Start()
if err != nil {
log.WithField("db", db).WithError(err).Println("cant start render")
w.WriteHeader(500)
return
}
err = grapher.Wait()
if err != nil {
log.WithField("db", db).WithError(err).Println("cant wait grapher")
w.WriteHeader(500)
return
}
err = renderer.Wait()
if err != nil {
log.WithField("db", db).WithError(err).Println("cant wait render")
w.WriteHeader(500)
return
}
}
func tail(w http.ResponseWriter, r *http.Request) {
target := strings.TrimPrefix(r.URL.Path, "/tail/")
log.WithField("db", target).Println("tailing")
w.Header().Set("Content-Type", "text/event-stream")
f := w.(http.Flusher)
ch := make(chan *transac)
go func() {
err := tailDB(target, ch)
if err != nil {
log.WithField("db", target).WithError(err).Println("tail error")
http.Error(w, "oh no", http.StatusBadRequest)
}
}()
defer unTailDB(ch)
ctx := r.Context()
for {
select {
case <-ctx.Done():
log.WithField("db", target).Println("tail done")
return
case t := <-ch:
data, _ := json.Marshal(t)
_, err := w.Write([]byte(
"event: transac\ndata: " + string(data) + "\n\n",
))
if err != nil {
log.WithField("db", db).WithError(err).Println("tail write error")
return
}
f.Flush()
}
}
}
func doEval(dbname string, q *query) *queryResult {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
e, fresh, err := acquireEvaler(dbname)
if err != nil {
log.WithField("db", dbname).WithError(err).Println("unable to acquire")
panic(err)
}
defer e.release()
startAt := time.Now()
go func() {
qbuff, err := json.Marshal(q)
if err != nil {
panic(err)
}
e.in.Write(qbuff)
e.in.Write([]byte{'\n'})
}()
ran := make(chan struct{})
var buff []byte
go func() {
scan := bufio.NewScanner(e.out)
scan.Scan()
buff = scan.Bytes()
close(ran)
}()
var result queryResult
select {
case <-ctx.Done():
log.WithField("db", dbname).
WithField("query", q).
WithError(err).Println("execution timed out")
e.timeout()
<-ran
result = queryResult{
Parent: 0,
Gen: -1,
Error: "execution timed out",
}
case <-ran:
if err := json.Unmarshal(buff, &result); err != nil {
result.Parent = 0
result.Gen = -1
log.WithField("db", dbname).
WithField("query", q).
WithError(err).
Printf("unable to unmarshal: %#v", string(buff))
result.Error = fmt.Sprintf(
"evaler send a bad response:\n%v\ngot: %#v",
err,
string(buff),
)
break
}
}
result.Warm = !fresh
result.WallTime = uint64(time.Since(startAt))
err = logTransac(dbname, &transac{
Query: *q,
Result: result,
})
if err != nil {
panic(err)
}
return &result
}
func eval(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set(
"Access-Control-Allow-Methods",
"POST, GET, OPTIONS, PUT, DELETE",
)
if r.Method == "HEAD" || r.Method == "OPTIONS" {
return
}
dbname := strings.TrimPrefix(r.URL.Path, "/eval/")
if !hasDB(dbname) {
http.Error(w, "doesn't exist", http.StatusBadRequest)
return
}
r.Body = http.MaxBytesReader(w, r.Body, MaxRequestSize)
q, ok := parseQuery(w, r)
if !ok {
return
}
log.WithField("db", dbname).WithField("query", q).Println("evaling")
result := doEval(dbname, q)
remarsh, err := json.Marshal(result)
if err != nil {
panic(err)
}
w.Header().Set("Content-Type", "application/json")
w.Write(remarsh)
}
func create(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
log.WithError(err).Println("unable to parse create")
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
lang := r.FormValue("lang")
name := uuid.New().String()
if hasDB(name) {
panic("wut?")
}
if lang == "" {
http.Error(w, "not sure about that language", http.StatusBadRequest)
return
}
if lang != "luaval" && lang != "duktape" {
log.WithError(err).Println("unepxected language:", lang)
http.Error(w, "invalid language", http.StatusBadRequest)
return
}
err = createDB(name, lang)
if err != nil {
if err == errDBExists {
log.WithField("db", name).Println("db exists, weird")
http.Redirect(w, r, "/query/"+name, http.StatusFound)
return
}
log.WithField("db", name).WithError(err).Println("unable to create db")
http.Error(w, "oh no", http.StatusInternalServerError)
return
}
log.WithField("db", name).WithField("lang", lang).Println("created db")
http.Redirect(w, r, "/query/"+name, http.StatusFound)
}
func link(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
if err != nil {
http.Error(w, "unable to parse link", http.StatusBadRequest)
return
}
dbname := r.FormValue("dbname")
if !hasDB(dbname) {
http.Error(w, "huh that db doesn't exist???", http.StatusBadRequest)
return
}
hostname := r.FormValue("hostname")
if existing, _ := dbForLink(hostname); existing != "" {
http.Error(w, "another db is using that hostname", http.StatusBadRequest)
return
}
if err := setLink(dbname, hostname); err != nil {
panic(err)
}
log.WithField("db", dbname).WithField("host", hostname).Println("linked domain")
http.Redirect(w, r, "/query/"+dbname, http.StatusFound)
}
func queryPage(w http.ResponseWriter, r *http.Request) {
dbname := strings.TrimPrefix(r.URL.Path, "/query/")
log.WithField("db", dbname).Println("get query page")
if !hasDB(dbname) {
http.Error(w, "not found", http.StatusNotFound)
return
}
lang, err := dbEvaler(dbname)
if err != nil {
http.Error(w, "not found", http.StatusInternalServerError)
return
}
link, err := linkForDB(dbname)
if err != nil {
link = ""
}
var indexTmpl = template.Must(template.ParseFiles("./client/query.html"))
err = indexTmpl.Execute(w, struct {
Lang string
Name string
Link string
}{
Lang: lang,
Name: dbname,
Link: link,
})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func webReqHandle(w http.ResponseWriter, r *http.Request) {
dbname, err := dbForHost(r.Host)
if err == errDBNotExists {
w.WriteHeader(http.StatusNotFound)
w.Write([]byte("404"))
log.WithField("db", dbname).
WithField("path", r.URL.Path).
WithField("host", r.Host).
Println("web req not found")
return
} else if err != nil {
panic(err)
}
log.WithField("db", dbname).
WithField("path", r.URL.Path).
WithField("host", r.Host).
Println("web req")
result := doEval(dbname, &query{
Code: "return http.req(r)",
Args: map[string]interface{}{
"r": map[string]interface{}{
"path": r.URL.Path,
"method": r.Method,
//"headers": r.Header,
},
},
})
if result.Error != nil {
w.WriteHeader(http.StatusInternalServerError)
if asStr, ok := result.Error.(string); ok {
w.Write([]byte(asStr))
} else {
w.Write([]byte("internal error, route return object not a string"))
}
return
}
if asStr, ok := result.Object.(string); ok {
w.Write([]byte(asStr))
} else if asObj, ok := result.Object.(map[string]interface{}); ok {
status := http.StatusOK
if maybeKey, ok := asObj["status"]; ok {
if maybeInt, ok := maybeKey.(float64); ok {
status = int(maybeInt)
}
}
if maybeKey, ok := asObj["headers"]; ok {
if maybeHeaders, ok := maybeKey.(map[string]interface{}); ok {
for key, val := range maybeHeaders {
if maybeStr, ok := val.(string); ok {
w.Header().Set(key, maybeStr)
}
}
}
}
w.WriteHeader(status)
if maybeKey, ok := asObj["body"]; ok {
if maybeBody, ok := maybeKey.(string); ok {
w.Write([]byte(maybeBody))
}
}
} else {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("internal error, route didn't return a string"))
}
}
func main() {
dp := flag.String("path", "./db/", "path to the root folder")
port := flag.String("port", "5000", "port to listen on")
flag.Parse()
openDB(path.Join(*dp, "__meta"))
userDBsPath = path.Join(*dp, "dbs")
dbMux := &http.ServeMux{}
dbMux.HandleFunc("/create", create)
dbMux.HandleFunc("/link", link)
dbMux.HandleFunc("/memgraph.svg", memgraph)
dbMux.HandleFunc("/eval/", eval)
dbMux.HandleFunc("/tail/", tail)
dbMux.HandleFunc("/query/", queryPage)
dbMux.Handle("/", http.FileServer(http.Dir("./client")))
webMux := &http.ServeMux{}
webMux.HandleFunc("/", webReqHandle)
root := &http.ServeMux{}
root.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if isRoot(r.Host) {
dbMux.ServeHTTP(w, r)
} else {
webMux.ServeHTTP(w, r)
}
})
log.Println("http on :" + *port)
log.Fatalln(http.ListenAndServe(":"+*port, root))
}
func dbForHost(host string) (string, error) {
host = strings.TrimSuffix(host, ".localhost:5000")
host = strings.TrimSuffix(host, ".turb.io")
return dbForLink(host)
}
func isRoot(host string) bool {
return host == "localhost:5000" || host == "evaldb.turb.io"
}
|
package hasher
import (
"crypto/sha1"
"fmt"
)
type PasswordHasher interface {
Hash(password string) (string, error)
}
type SHA1Hasher struct {
salt string
}
func NewSHA1Hasher(salt string) *SHA1Hasher {
return &SHA1Hasher{salt: salt}
}
func (h *SHA1Hasher) Hash(password string) (string, error) {
hash := sha1.New()
if _, err := hash.Write([]byte(password)); err != nil {
return "", err
}
return fmt.Sprintf("%x", hash.Sum([]byte(h.salt))), nil
}
|
package config
import (
"fmt"
"path/filepath"
"reflect"
"time"
"github.com/dnephin/configtf"
pth "github.com/dnephin/configtf/path"
docker "github.com/fsouza/go-dockerclient"
"github.com/pkg/errors"
)
// ImageConfig An **image** resource provides actions for working with a Docker
// image. If an image is buildable it is considered up-to-date if all files in
// the build context have a modified time older than the created time of the
// image. If using inline Dockerfile, the **dobi.yaml** file will be considered
// as a part of the build context.
// name: image
// example: An image with build args:
//
// .. code-block:: yaml
//
// image=project-dev:
// image: myproject-dev
// context: .
// args:
// version: '3.1.4'
// url: http://example.com/foo
//
type ImageConfig struct {
// Image The name of the **image** without a tag. Tags must be specified
// in the **tags** field. This field supports :doc:`variables`.
Image string `config:"required,validate"`
// Dockerfile The path to the ``Dockerfile`` used to build the image. This
// path is relative to ``context``. Can not be used with ``steps``
Dockerfile string
// Steps An inline Dockerfile used to build the image. ``steps`` can not
// be used with the ``dockerfile`` field.
Steps string
// Context The build context used to build the image.
// default: ``.``
Context string
// Args Build args used to build the image. Values in the mapping support
// :doc:`variables`.
// type: mapping ``key: value``
Args map[string]string
// Target The target stage to build in a multi-stage Dockerfile. Defaults to
// the last stage.
Target string
// PullBaseImageOnBuild If **true** the base image used in the
// ``Dockerfile`` will be pulled before building the image.
PullBaseImageOnBuild bool
// Pull Pull an image instead of building it. The value may be one of:
// * ``once`` - only pull if the image:tag does not exist
// * ``always`` - always pull the image
// * ``never`` - don't pull or build the image. Use one that is already present locally
// * ``<duration>`` - pull if the image hasn't been pulled in at least
// ``duration``. The format of duration is a number followed by a single
// character time unit (ex: ``40s``, ``2h``, ``30min``)
// type: string
// default: ``always``
Pull pull
// Tags The image tags applied to the image.
// The first tag in the list is used when the image is built.
// Each item in the list supports :doc:`variables`.
// default: ``['{unique}']``
// type: list of tags
Tags []string `config:"validate"`
// RemoteTags The image tags applied to the image before pushing/pulling the image
// to/from a registry. If not provided, regular tags are used for remote operations.
// Each item in the list supports :doc:`variables`.
// default: ``tags``
// type: list of tags
RemoteTags []string
// NetworkMode The network mode to use for each step in the Dockerfile.
NetworkMode string
// CacheFrom A list of images to use as the cache for a build.
CacheFrom []string
Dependent
Annotations
}
// Validate checks that all fields have acceptable values
func (c *ImageConfig) Validate(path pth.Path, config *Config) *pth.Error {
if err := c.validateBuildOrPull(); err != nil {
return pth.Errorf(path, err.Error())
}
return nil
}
func (c *ImageConfig) validateBuildOrPull() error {
c.setDefaultContext()
switch {
case c.Context == "" && !c.Pull.IsSet():
return errors.New("one of context, or pull is required")
case c.Dockerfile != "" && c.Steps != "":
return errors.New("dockerfile can not be used with steps")
}
c.setDefaultDockerfile()
return nil
}
func (c *ImageConfig) setDefaultContext() {
if c.Dockerfile != "" && c.Context == "" {
c.Context = "."
}
}
func (c *ImageConfig) setDefaultDockerfile() {
if c.Context != "" && c.Steps == "" && c.Dockerfile == "" {
c.Dockerfile = "Dockerfile"
}
}
// IsBuildable returns true if the config has the minimum required fields to
// build an image
func (c *ImageConfig) IsBuildable() bool {
return c.Context != "" && (c.Steps != "" || c.Dockerfile != "")
}
// ValidateImage validates the image field does not include a tag
func (c *ImageConfig) ValidateImage() error {
_, tag := docker.ParseRepositoryTag(c.Image)
if tag != "" {
return errors.Errorf(
"tag %q must be specified in the `tags` field, not in `image`", tag)
}
return nil
}
// ValidateTags to ensure the first tag is a basic tag without an image name.
func (c *ImageConfig) ValidateTags() error {
if len(c.Tags) == 0 {
return nil
}
_, tag := docker.ParseRepositoryTag(c.Tags[0])
if tag != "" {
return errors.Errorf("the first tag %q may not include an image name", tag)
}
return nil
}
func (c *ImageConfig) String() string {
dir := filepath.Join(c.Context, c.Dockerfile)
return fmt.Sprintf("Build image '%s' from '%s'", c.Image, dir)
}
// Resolve resolves variables in the resource
func (c *ImageConfig) Resolve(resolver Resolver) (Resource, error) {
conf := *c
var err error
conf.Tags, err = resolver.ResolveSlice(c.Tags)
if err != nil {
return &conf, err
}
conf.CacheFrom, err = resolver.ResolveSlice(c.CacheFrom)
if err != nil {
return &conf, err
}
conf.Image, err = resolver.Resolve(c.Image)
if err != nil {
return &conf, err
}
conf.Steps, err = resolver.Resolve(c.Steps)
if err != nil {
return &conf, err
}
for key, value := range c.Args {
conf.Args[key], err = resolver.Resolve(value)
if err != nil {
return &conf, err
}
}
return &conf, nil
}
// NewImageConfig creates a new ImageConfig with default values
func NewImageConfig() *ImageConfig {
return &ImageConfig{}
}
type pullAction func(*time.Time) bool
type pull struct {
action pullAction
}
func (p *pull) TransformConfig(raw reflect.Value) error {
if !raw.IsValid() {
return fmt.Errorf("must be a string, was undefined")
}
switch value := raw.Interface().(type) {
case string:
switch value {
case "once":
p.action = pullOnce
case "never":
p.action = pullNever
case "always":
p.action = pullAlways
default:
duration, err := time.ParseDuration(value)
if err != nil {
return fmt.Errorf("invalid pull value %q: %s", value, err)
}
p.action = pullAfter{duration: duration}.doPull
}
default:
return fmt.Errorf("must be a string, not %T", value)
}
return nil
}
func (p *pull) Required(lastPull *time.Time) bool {
if !p.IsSet() {
return true
}
return p.action(lastPull)
}
func (p *pull) IsSet() bool {
return p.action != nil
}
func pullAlways(_ *time.Time) bool {
return true
}
func pullNever(_ *time.Time) bool {
return false
}
func pullOnce(lastPull *time.Time) bool {
return lastPull == nil
}
type pullAfter struct {
duration time.Duration
}
func (p pullAfter) doPull(lastPull *time.Time) bool {
if lastPull == nil {
return true
}
return lastPull.Before(time.Now().Add(-p.duration))
}
func imageFromConfig(name string, values map[string]interface{}) (Resource, error) {
image := NewImageConfig()
return image, configtf.Transform(name, values, image)
}
func init() {
RegisterResource("image", imageFromConfig)
}
|
package release
import (
"log"
"os"
"os/exec"
"github.com/google/go-github/github"
"github.com/goreleaser/goreleaser/clients"
"github.com/goreleaser/goreleaser/context"
"golang.org/x/sync/errgroup"
)
// Pipe for github release
type Pipe struct{}
// Description of the pipe
func (Pipe) Description() string {
return "Releasing to GitHub"
}
// Run the pipe
func (Pipe) Run(ctx *context.Context) error {
client := clients.GitHub(ctx.Token)
r, err := getOrCreateRelease(client, ctx)
if err != nil {
return err
}
var g errgroup.Group
for _, archive := range ctx.Archives {
archive := archive
g.Go(func() error {
return upload(client, *r.ID, archive, ctx)
})
}
return g.Wait()
}
func getOrCreateRelease(client *github.Client, ctx *context.Context) (*github.RepositoryRelease, error) {
owner := ctx.ReleaseRepo.Owner
repo := ctx.ReleaseRepo.Name
data := &github.RepositoryRelease{
Name: github.String(ctx.Git.CurrentTag),
TagName: github.String(ctx.Git.CurrentTag),
Body: github.String(description(ctx.Git.Diff)),
}
r, _, err := client.Repositories.GetReleaseByTag(owner, repo, ctx.Git.CurrentTag)
if err != nil {
log.Println("Creating release", ctx.Git.CurrentTag, "on", ctx.Config.Release.Repo)
r, _, err = client.Repositories.CreateRelease(owner, repo, data)
return r, err
}
log.Println("Updating existing release", ctx.Git.CurrentTag, "on", ctx.Config.Release.Repo)
r, _, err = client.Repositories.EditRelease(owner, repo, *r.ID, data)
return r, err
}
func description(diff string) string {
result := "## Changelog\n" + diff + "\n\n--\nAutomated with @goreleaser"
cmd := exec.Command("go", "version")
bts, err := cmd.CombinedOutput()
if err != nil {
return result
}
return result + "\nBuilt with " + string(bts)
}
func upload(client *github.Client, releaseID int, archive string, ctx *context.Context) error {
archive = archive + "." + ctx.Config.Archive.Format
file, err := os.Open("dist/" + archive)
if err != nil {
return err
}
defer func() { _ = file.Close() }()
log.Println("Uploading", file.Name())
_, _, err = client.Repositories.UploadReleaseAsset(
ctx.ReleaseRepo.Owner,
ctx.ReleaseRepo.Name,
releaseID,
&github.UploadOptions{Name: archive},
file,
)
return err
}
|
/*
* KIAB SDK
*
* KIAB SMS Service
*
* OpenAPI spec version:
*
* Generated by: https://github.com/swagger-api/swagger-codegen.git
*/
package swagger
type Body struct {
GrantType string `json:"grant_type"`
RefreshToken string `json:"refresh_token,omitempty"`
ClientId string `json:"client_id"`
ClientSecret string `json:"client_secret"`
}
|
package module
import (
"github.com/ypyf/salmon/runtime"
"regexp"
"sync"
"github.com/sirupsen/logrus"
"github.com/ypyf/salmon/chat/adapter"
"github.com/ypyf/salmon/store"
"github.com/ypyf/salmon/store/redis"
lua "github.com/yuin/gopher-lua"
)
type ChatBot struct {
sync.Mutex
store store.Store
avail []*runtime.BotState
robot adapter.Robot
pluginsPath string
}
func NewChatBot(bot adapter.Robot, pluginsPath string) *ChatBot {
return &ChatBot{
store: redis.New(),
avail: make([]*runtime.BotState, 0, 4),
robot: bot,
pluginsPath: pluginsPath,
}
}
func (pl *ChatBot) Run() {
pl.robot.OnMessage(pl.onMessage)
pl.robot.Run()
}
func (pl *ChatBot) Get() *runtime.BotState {
pl.Lock()
defer pl.Unlock()
n := len(pl.avail)
if n == 0 {
bot := newRobot(pl.robot)
s := runtime.NewRobotState()
s.Responds = bot.responds
s.Hears = bot.hears
s.RegisterGlobalType(bot)
s.RegisterGlobalType(new(THttp))
s.RegisterGlobalType(new(TResponse))
// Robot.new(Http.new())
s.NewGlobalObject("Robot", "robot", s.NewObject("Http"))
// 最后装载脚本插件
s.LoadPlugins(pl.pluginsPath)
return s
}
x := pl.avail[n-1]
pl.avail = pl.avail[0 : n-1]
return x
}
func (pl *ChatBot) Put(state *runtime.BotState) {
pl.Lock()
defer pl.Unlock()
pl.avail = append(pl.avail, state)
}
// 调用消息处理器
func (pl *ChatBot) onMessage(msg *adapter.Message) {
vm := pl.Get()
defer pl.Put(vm)
// Call hear handlers
for pattern, handler := range vm.Hears {
match := pattern.FindStringSubmatch(msg.Text)
if match != nil {
tbl := vm.L.NewTable()
for i, m := range match {
// 注意下标按照Lua惯例从1开始
// 一些内置函数依赖这个假定
tbl.Insert(i+1, lua.LString(m))
}
// 构造消息回调的参数
// 初始化response对象
resObj := vm.NewObject("Response", tbl)
ud := resObj.(*lua.LUserData).Value.(*TResponse)
ud.robot = pl.robot
ud.envelope = &msg.Envelope
if err := runtime.CallLuaFunc(vm.L, 0, handler, resObj); err != nil {
logrus.Warnf("CallLuaFunc: %v\n", err)
return
}
}
}
// Call respond handlers
// @<botname> <text>
// <botname> <text>
// <botname>:<text>
// /<text>
re := regexp.MustCompile(`(?i)(/|(@?gobot)[\s:]|<@U0F8EH14Y>)(.*)`)
match := re.FindStringSubmatch(msg.Text)
if match == nil {
return
}
content := match[len(match)-1]
for pattern, handler := range vm.Responds {
match := pattern.FindStringSubmatch(content)
if match != nil {
tbl := vm.L.NewTable()
for i, m := range match {
tbl.Insert(i+1, lua.LString(m))
}
// 构造消息回调的参数
// 初始化response对象
resObj := vm.NewObject("Response", tbl)
ud := resObj.(*lua.LUserData).Value.(*TResponse)
ud.robot = pl.robot
ud.envelope = &msg.Envelope
if err := runtime.CallLuaFunc(vm.L, 0, handler, resObj); err != nil {
logrus.Warnf("CallLuaFunc: %v\n", err)
return
}
}
}
}
func (pl *ChatBot) Shutdown() {
for _, luastate := range pl.avail {
luastate.L.Close()
}
pl.store.Close()
}
|
//go:build rocksdb
package rocksdb
import (
"fmt"
"github.com/iotaledger/grocksdb"
"github.com/iotaledger/hive.go/runtime/ioutils"
)
// RocksDB holds the underlying grocksdb.DB instance and options.
type RocksDB struct {
db *grocksdb.DB
ro *grocksdb.ReadOptions
wo *grocksdb.WriteOptions
fo *grocksdb.FlushOptions
}
// CreateDB creates a new RocksDB instance.
func CreateDB(directory string, options ...Option) (*RocksDB, error) {
if err := ioutils.CreateDirectory(directory, 0700); err != nil {
return nil, fmt.Errorf("could not create directory: %w", err)
}
dbOpts := dbOptions(options)
opts := grocksdb.NewDefaultOptions()
opts.SetCreateIfMissing(true)
opts.SetCompression(grocksdb.NoCompression)
if dbOpts.compression {
opts.SetCompression(grocksdb.ZSTDCompression)
}
if dbOpts.parallelism > 0 {
opts.IncreaseParallelism(dbOpts.parallelism)
}
for _, str := range dbOpts.custom {
var err error
opts, err = grocksdb.GetOptionsFromString(opts, str)
if err != nil {
return nil, err
}
}
ro := grocksdb.NewDefaultReadOptions()
ro.SetFillCache(dbOpts.fillCache)
wo := grocksdb.NewDefaultWriteOptions()
wo.SetSync(dbOpts.sync)
wo.DisableWAL(dbOpts.disableWAL)
fo := grocksdb.NewDefaultFlushOptions()
db, err := grocksdb.OpenDb(opts, directory)
if err != nil {
return nil, err
}
return &RocksDB{
db: db,
ro: ro,
wo: wo,
fo: fo,
}, nil
}
// OpenDBReadOnly opens a new RocksDB instance in read-only mode.
func OpenDBReadOnly(directory string, options ...Option) (*RocksDB, error) {
dbOpts := dbOptions(options)
opts := grocksdb.NewDefaultOptions()
opts.SetCompression(grocksdb.NoCompression)
if dbOpts.compression {
opts.SetCompression(grocksdb.ZSTDCompression)
}
for _, str := range dbOpts.custom {
var err error
opts, err = grocksdb.GetOptionsFromString(opts, str)
if err != nil {
return nil, err
}
}
ro := grocksdb.NewDefaultReadOptions()
ro.SetFillCache(dbOpts.fillCache)
db, err := grocksdb.OpenDbForReadOnly(opts, directory, true)
if err != nil {
return nil, err
}
return &RocksDB{
db: db,
ro: ro,
}, nil
}
func dbOptions(optionalOptions []Option) *Options {
result := &Options{
compression: false,
fillCache: false,
sync: false,
disableWAL: true,
parallelism: 0,
}
for _, optionalOption := range optionalOptions {
optionalOption(result)
}
return result
}
// Flush the database.
func (r *RocksDB) Flush() error {
return r.db.Flush(r.fo)
}
// Close the database.
func (r *RocksDB) Close() error {
r.db.Close()
return nil
}
// GetProperty returns the value of a database property.
func (r *RocksDB) GetProperty(name string) string {
return r.db.GetProperty(name)
}
// GetIntProperty similar to "GetProperty", but only works for a subset of properties whose
// return value is an integer. Return the value by integer.
func (r *RocksDB) GetIntProperty(name string) (uint64, bool) {
return r.db.GetIntProperty(name)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.