text stringlengths 11 4.05M |
|---|
package main
import "fmt"
func main() {
data := []float64{43, 56, 87, 12, 45, 57} // ja tem uma slice, passar a slice
n := average(data...) //data é um iten, e mesmo assim que seja so um,
//existe um monte de coisa la dentro e estao listados, assim pega esse 1 iten e adiciona ... no final
fmt.Println(n)
}
func average(sf ...float64) float64 { //esse precisa de argumentos separados por virgula
total := 0.0
for _, v := range sf {
total += v
}
return total / float64(len(sf))
}
|
package raycaster
import (
"image"
"image/color"
"math"
)
const (
//--move speed--//
moveSpeed = 0.06
//--rotate speed--//
rotSpeed = 0.03
)
// Camera Class that represents a camera in terms of raycasting.
// Contains methods to move the camera, and handles projection to,
// set the rectangle slice position and height,
type Camera struct {
//--camera position, init to start position--//
pos *Vector2
//--current facing direction, init to values coresponding to FOV--//
dir *Vector2
//--the 2d raycaster version of camera plane, adjust y component to change FOV (ratio between this and dir x resizes FOV)--//
plane *Vector2
//--viewport width and height--//
w int
h int
//--world map--//
mapObj *Map
worldMap [][]int
upMap [][]int
midMap [][]int
//--texture width--//
texWidth int
//--slices--//
s []*image.Rectangle
//--cam x pre calc--//
camX []float64
//--structs that contain rects and tints for each level render--//
lvls []*Level
}
// Vector2 converted struct from C#
type Vector2 struct {
X float64
Y float64
}
// NewCamera initalizes a Camera object
func NewCamera(width int, height int, texWid int, slices []*image.Rectangle, levels []*Level) *Camera {
c := &Camera{}
//--camera position, init to start position--//
c.pos = &Vector2{X: 22.5, Y: 11.5}
//--current facing direction, init to values coresponding to FOV--//
c.dir = &Vector2{X: -1.0, Y: 0.0}
//--the 2d raycaster version of camera plane, adjust y component to change FOV (ratio between this and dir x resizes FOV)--//
c.plane = &Vector2{X: 0.0, Y: 0.66}
c.w = width
c.h = height
c.texWidth = texWid
c.s = slices
c.lvls = levels
//--init cam pre calc array--//
c.camX = make([]float64, c.w)
c.preCalcCamX()
c.mapObj = NewMap()
c.worldMap = c.mapObj.getGrid()
c.upMap = c.mapObj.getGridUp()
c.midMap = c.mapObj.getGridMid()
//do an initial raycast
c.raycast()
return c
}
// Update - updates the camera view
func (c *Camera) Update() {
//--do raycast--//
c.raycast()
}
// precalculates camera x coordinate
func (c *Camera) preCalcCamX() {
for x := 0; x < c.w; x++ {
c.camX[x] = 2.0*float64(x)/float64(c.w) - 1.0
}
}
func (c *Camera) raycast() {
for x := 0; x < c.w; x++ {
for i := 0; i < cap(c.lvls); i++ {
var rMap [][]int
if i == 0 {
rMap = c.worldMap
} else if i == 1 {
rMap = c.midMap
} else {
rMap = c.upMap //if above lvl2 just keep extending up
}
lvl := c.lvls[i]
c.castLevel(x, rMap, lvl, i) // TODO: calculate more levels simultaneously using go routine (use # cores)
}
}
}
// credit : Raycast loop and setting up of vectors for matrix calculations
// courtesy - http://lodev.org/cgtutor/raycasting.html
func (c *Camera) castLevel(x int, grid [][]int, lvl *Level, levelNum int) {
var _cts, _sv []*image.Rectangle
var _st []*color.RGBA
_cts = lvl.Cts
_sv = lvl.Sv
_st = lvl.St
//calculate ray position and direction
cameraX := c.camX[x] //x-coordinate in camera space
rayDirX := c.dir.X + c.plane.X*cameraX
rayDirY := c.dir.Y + c.plane.Y*cameraX
//--rays start at camera position--//
rayPosX := c.pos.X
rayPosY := c.pos.Y
//which box of the map we're in
mapX := int(rayPosX)
mapY := int(rayPosY)
//length of ray from current position to next x or y-side
var sideDistX float64
var sideDistY float64
//length of ray from one x or y-side to next x or y-side
deltaDistX := math.Abs(1 / rayDirX)
deltaDistY := math.Abs(1 / rayDirY)
var perpWallDist float64
//what direction to step in x or y-direction (either +1 or -1)
var stepX int
var stepY int
hit := 0 //was there a wall hit?
side := -1 //was a NS or a EW wall hit?
//calculate step and initial sideDist
if rayDirX < 0 {
stepX = -1
sideDistX = (rayPosX - float64(mapX)) * deltaDistX
} else {
stepX = 1
sideDistX = (float64(mapX) + 1.0 - rayPosX) * deltaDistX
}
if rayDirY < 0 {
stepY = -1
sideDistY = (rayPosY - float64(mapY)) * deltaDistY
} else {
stepY = 1
sideDistY = (float64(mapY) + 1.0 - rayPosY) * deltaDistY
}
//perform DDA
for hit == 0 {
//jump to next map square, OR in x-direction, OR in y-direction
if sideDistX < sideDistY {
sideDistX += deltaDistX
mapX += stepX
side = 0
} else {
sideDistY += deltaDistY
mapY += stepY
side = 1
}
//Check if ray has hit a wall
if mapX < 24 && mapY < 24 && mapX > 0 && mapY > 0 {
if grid[mapX][mapY] > 0 {
hit = 1
}
} else {
//hit grid boundary
hit = 2
//prevent out of range errors, needs to be improved
if mapX < 0 {
mapX = 0
} else if mapX > 23 {
mapX = 23
}
if mapY < 0 {
mapY = 0
} else if mapY > 23 {
mapY = 23
}
}
}
//Calculate distance of perpendicular ray (oblique distance will give fisheye effect!)
if side == 0 {
perpWallDist = (float64(mapX) - rayPosX + (1.0-float64(stepX))/2.0) / rayDirX
} else {
perpWallDist = (float64(mapY) - rayPosY + (1.0-float64(stepY))/2.0) / rayDirY
}
//Calculate height of line to draw on screen
lineHeight := int(float64(c.h) / perpWallDist)
//calculate lowest and highest pixel to fill in current stripe
drawStart := (-lineHeight/2 + c.h/2) - lineHeight*levelNum
drawEnd := drawStart + lineHeight
//--due to modern way of drawing using quads this is removed to avoid glitches at the edges--//
// if drawStart < 0 { drawStart = 0 }
// if drawEnd >= c.h { drawEnd = c.h - 1 }
//texturing calculations
texNum := grid[mapX][mapY] - 1 //1 subtracted from it so that texture 0 can be used
if texNum < 0 {
texNum = 0 //why?
}
c.lvls[levelNum].CurrTexNum[x] = texNum
//calculate value of wallX
var wallX float64 //where exactly the wall was hit
if side == 0 {
wallX = rayPosY + perpWallDist*rayDirY
} else {
wallX = rayPosX + perpWallDist*rayDirX
}
wallX -= math.Floor(wallX)
//x coordinate on the texture
texX := int(wallX * float64(c.texWidth))
if side == 0 && rayDirX > 0 {
texX = c.texWidth - texX - 1
}
if side == 1 && rayDirY < 0 {
texX = c.texWidth - texX - 1
}
//--some supid hacks to make the houses render correctly--//
// this corrects textures on two sides of house since the textures are not symmetrical
if side == 0 {
if texNum == 3 {
c.lvls[levelNum].CurrTexNum[x] = 4
} else if texNum == 4 {
c.lvls[levelNum].CurrTexNum[x] = 3
}
if texNum == 1 {
c.lvls[levelNum].CurrTexNum[x] = 4
} else if texNum == 2 {
c.lvls[levelNum].CurrTexNum[x] = 3
}
}
//--set current texture slice to be slice x--//
_cts[x] = c.s[texX]
//--set height of slice--//
_sv[x].Min.Y = drawStart
//--set draw start of slice--//
_sv[x].Max.Y = drawEnd
//--add a bit of tint to differentiate between walls of a corner--//
_st[x] = &color.RGBA{255, 255, 255, 255}
if side == 1 {
wallDiff := 12
_st[x].R -= byte(wallDiff)
_st[x].G -= byte(wallDiff)
_st[x].B -= byte(wallDiff)
}
//// LIGHTING ////
//--simulates torch light, as if player was carrying a radial light--//
var lightFalloff float64 = -100 //decrease value to make torch dimmer
//--sun brightness, illuminates whole level--//
var sunLight float64 = 300 //global illuminaion
//--distance based dimming of light--//
var shadowDepth float64
shadowDepth = math.Sqrt(perpWallDist) * lightFalloff
_st[x].R = byte(Clamp(int(float64(_st[x].R)+shadowDepth+sunLight), 0, 255))
_st[x].G = byte(Clamp(int(float64(_st[x].G)+shadowDepth+sunLight), 0, 255))
_st[x].B = byte(Clamp(int(float64(_st[x].B)+shadowDepth+sunLight), 0, 255))
}
// Moves camera by move speed
func (c *Camera) Move(mSpeed float64) {
if c.worldMap[int(c.pos.X+c.dir.X*mSpeed*12)][int(c.pos.Y)] <= 0 {
c.pos.X += (c.dir.X * mSpeed)
}
if c.worldMap[int(c.pos.X)][int(c.pos.Y+c.dir.Y*mSpeed*12)] <= 0 {
c.pos.Y += (c.dir.Y * mSpeed)
}
}
// Rotates camera by rotate speed
func (c *Camera) Rotate(rSpeed float64) {
//both camera direction and camera plane must be rotated
oldDirX := c.dir.X
c.dir.X = (c.dir.X*math.Cos(rSpeed) - c.dir.Y*math.Sin(rSpeed))
c.dir.Y = (oldDirX*math.Sin(rSpeed) + c.dir.Y*math.Cos(rSpeed))
oldPlaneX := c.plane.X
c.plane.X = (c.plane.X*math.Cos(rSpeed) - c.plane.Y*math.Sin(rSpeed))
c.plane.Y = (oldPlaneX*math.Sin(rSpeed) + c.plane.Y*math.Cos(rSpeed))
}
// Clamp - converted C# method MathHelper.Clamp
// Restricts a value to be within a specified range.
func Clamp(value int, min int, max int) int {
if value < min {
return min
} else if value > max {
return max
}
return value
}
|
package main
// Leetcode 237. (easy)
func deleteNode(node *ListNode) {
node.Val = node.Next.Val
node.Next = node.Next.Next
}
|
package model
import (
"context"
"gamesvr/manager"
"math"
"shared/common"
"shared/csv/entry"
"shared/csv/static"
"shared/global"
"shared/protobuf/pb"
"shared/statistic/logreason"
"shared/utility/coordinate"
"shared/utility/errors"
"shared/utility/servertime"
)
var (
objectHandles = map[int32]objectHandle{}
)
type objectHandle func(ctx context.Context, u *User, object *YggObject) error
func init() {
objectHandles[static.YggObjectTypeChest] = HandleChestObject
}
// YggdrasilDailyRefresh 每日刷新
func (u *User) YggdrasilDailyRefresh(refreshTime int64) {
// 每日探险次数清零
u.Yggdrasil.TravelTime = 0
// 刷日常怪物
u.Yggdrasil.DailyMonsters.DailyRefresh(context.Background(), u.Yggdrasil)
// 日常派遣刷新
u.Yggdrasil.Dispatch.YggDispatchDailyRefresh()
}
func (u *User) YggdrasilGetBlockInfo(ctx context.Context, positions []coordinate.Position) (*pb.S2CYggdrasilGetBlockInfo, error) {
h, v := manager.CSV.Yggdrasil.GetYggBlockLengthAndWidth()
for _, position := range positions {
if position.X%h != 0 || position.Y%v != 0 {
return nil, errors.WrapTrace(common.ErrParamError)
}
}
return &pb.S2CYggdrasilGetBlockInfo{
BlockAndArea: u.Yggdrasil.VOYggdrasilBlockAndArea(ctx, positions),
}, nil
}
func (u *User) YggdrasilExploreStart(characters []int32) error {
if u.Yggdrasil.Status == YggdrasilStateTravel {
return errors.WrapTrace(common.ErrYggdrasilInTravel)
}
now := servertime.Now().Unix()
// check 角色能否出行
for _, characterId := range characters {
chara, err := u.CharacterPack.Get(characterId)
if err != nil {
return errors.WrapTrace(err)
}
if chara.CanYggdrasilTime > now {
return errors.WrapTrace(common.ErrYggdrasilCharacterCannotCarry)
}
}
if len(characters) == 0 {
return errors.WrapTrace(common.ErrParamError)
}
if int32(len(characters)) > manager.CSV.Yggdrasil.GetYggEditTeamCount(u.Info.Level.Value()) {
return errors.WrapTrace(common.ErrParamError)
}
// check ap
levelConfig, ok := manager.CSV.TeamLevelCache.GetByLv(u.Info.Level.Value())
if !ok {
return errors.Swrapf(common.ErrNotFoundInCSV, entry.CfgTeamLevelConfig, u.Info.Level.Value())
}
// check 今日可探索次数
if u.Yggdrasil.TravelTime >= manager.CSV.Yggdrasil.GetYggDailyTravelTime() {
return errors.WrapTrace(common.ErrYggdrasilNoTravelTime)
}
// 消耗ap
consume := common.NewRewards()
consume.AddReward(common.NewReward(static.CommonResourceTypeAp, levelConfig.ExploreAp))
err := u.CheckRewardsEnough(consume)
if err != nil {
return errors.WrapTrace(err)
}
reason := logreason.NewReason(logreason.YggStartExplore)
err = u.CostRewards(consume, reason)
if err != nil {
return errors.WrapTrace(err)
}
u.Guild.AddTaskItem(static.GuildTaskYgg, 1)
// 初始化本次探索信息
return u.Yggdrasil.initTravelInfo(characters, levelConfig.ExploreAp)
}
func (u *User) YggdrasilExploreMove(ctx context.Context, position coordinate.Position) ([]*pb.VOPosition, []*pb.VOExploredPosCountUpdate, error) {
if u.Yggdrasil.Status == YggdrasilStateQuit {
return nil, nil, errors.WrapTrace(common.ErrYggdrasilNotInTravel)
}
return u.Yggdrasil.moveTo(ctx, u, position)
}
func (u *User) YggdrasilExploreQuit(ctx context.Context) ([]*pb.VOUserCharacter, error) {
// 旅行体力为0才可以
if u.Yggdrasil.TravelInfo.TravelAp != 0 {
return nil, errors.WrapTrace(common.ErrParamError)
}
if u.Yggdrasil.Status == YggdrasilStateQuit {
return nil, errors.WrapTrace(common.ErrYggdrasilNotInTravel)
}
return u.doYggdrasilExploreQuit(ctx)
}
func (u *User) doYggdrasilExploreQuit(ctx context.Context) ([]*pb.VOUserCharacter, error) {
now := servertime.Now().Unix()
// 本次携带角色进入休息状态
var characters []*pb.VOUserCharacter
for characterId := range u.Yggdrasil.TravelInfo.CharactersHp {
chara, err := u.CharacterPack.Get(characterId)
if err != nil {
return nil, errors.WrapTrace(err)
}
chara.CanYggdrasilTime = now + manager.CSV.Yggdrasil.GetYggCharacterRestSec()
characters = append(characters, chara.VOUserCharacter())
}
// 主动怪物回到起始点
for _, object := range u.Yggdrasil.Entities.Objects {
objectType, err := manager.CSV.Yggdrasil.GetYggdrasilObjectType(object.ObjectId, object.State)
if err != nil {
return nil, errors.WrapTrace(err)
}
if objectType == static.YggObjectTypeInitiativemonster {
err := u.Yggdrasil.ObjectMove(ctx, u, object, *object.OrgPos)
if err != nil {
return nil, errors.WrapTrace(err)
}
}
}
u.Yggdrasil.Status = YggdrasilStateQuit
return characters, nil
}
func (u *User) YggdrasilExploreReturnCity(ctx context.Context) error {
if u.Yggdrasil.Status == YggdrasilStateQuit {
return errors.WrapTrace(common.ErrYggdrasilNotInTravel)
}
if u.Yggdrasil.CityId != 0 {
return errors.WrapTrace(common.ErrYggdrasilInCityNow)
}
cityId := manager.CSV.Yggdrasil.IsCityEntrance(u.Yggdrasil.TravelPos)
if cityId == 0 {
return errors.WrapTrace(common.ErrYggdrasilCannotReturnCityThisPos)
}
err := u.Yggdrasil.TransferToCity(ctx, u, cityId)
if err != nil {
return errors.WrapTrace(err)
}
return nil
}
func (u *User) YggdrasilExploreLeaveCity(ctx context.Context) error {
if u.Yggdrasil.CityId == 0 {
return errors.WrapTrace(common.ErrYggdrasilNotInCityNow)
}
areaCity, err := manager.CSV.Yggdrasil.GetYggCityById(u.Yggdrasil.CityId)
if err != nil {
return errors.WrapTrace(err)
}
_, _, err = u.Yggdrasil.ChangePos(ctx, u, *areaCity.CityExitPos)
if err != nil {
return errors.WrapTrace(err)
}
u.Yggdrasil.CityId = 0
return nil
}
func (u *User) YggdrasilGoodsDiscard(ctx context.Context, replace bool, goodsId int64) error {
if u.Yggdrasil.Status == YggdrasilStateQuit {
return errors.WrapTrace(common.ErrYggdrasilNotInTravel)
}
goods, ok := u.Yggdrasil.Pack.Get(goodsId)
if !ok {
return errors.WrapTrace(common.ErrYggdrasilPackGoodsNotFound)
}
if !replace {
if !u.Yggdrasil.Entities.IsBlankPos(*u.Yggdrasil.TravelPos) {
return errors.WrapTrace(common.ErrYggdrasilOverlapping)
}
}
u.Yggdrasil.DeletePackGoods(goodsId)
yggDiscardGoods, err := NewYggDiscardGoods(*u.Yggdrasil.TravelPos, goods)
if err != nil {
return errors.WrapTrace(err)
}
return u.Yggdrasil.AppendDiscardGoods(ctx, u.ID, yggDiscardGoods)
}
func (u *User) YggdrasilGoodsPickUp(ctx context.Context, replaceGoodsId int64) error {
if u.Yggdrasil.Status == YggdrasilStateQuit {
return errors.WrapTrace(common.ErrYggdrasilNotInTravel)
}
discard, ok := u.Yggdrasil.Entities.FindDiscardGoodsByPos(*u.Yggdrasil.TravelPos)
if !ok {
return errors.WrapTrace(common.ErrYggdrasilDiscardGoodsNotFound)
}
//走替换流程
if replaceGoodsId > 0 {
err := u.YggdrasilGoodsDiscard(ctx, true, replaceGoodsId)
if err != nil {
return errors.WrapTrace(err)
}
} else {
if u.Yggdrasil.Pack.IsFull(u.Info.Level.Value()) {
return errors.WrapTrace(common.ErrYggdrasilBagIsFull)
}
}
err := u.Yggdrasil.PickUp(discard)
if err != nil {
return errors.WrapTrace(err)
}
return u.Yggdrasil.RemoveDiscardGoods(ctx, u.ID, discard)
}
func (u *User) YggdrasilObjectHandle(ctx context.Context) error {
if u.Yggdrasil.Status == YggdrasilStateQuit {
return errors.WrapTrace(common.ErrYggdrasilNotInTravel)
}
object, err := u.Yggdrasil.Entities.FindObjectByPosAndType(*u.Yggdrasil.TravelPos, static.YggObjectTypeMagictable, static.YggObjectTypeEffect, static.YggObjectTypeChest)
if err != nil {
return errors.WrapTrace(err)
}
state, err := manager.CSV.Yggdrasil.GetObjectState(object.ObjectId, object.State)
if err != nil {
return errors.WrapTrace(err)
}
if state.SubTaskID > 0 {
// 判断是否在subtask
if _, _, ok := u.Yggdrasil.Task.YggSubTaskProgressInfoInProcess(state.SubTaskID); !ok {
return errors.WrapTrace(common.ErrYggdrasilTaskNotInProgress)
}
}
handle, ok := objectHandles[state.ObjectType]
if ok {
err := handle(ctx, u, object)
if err != nil {
return errors.WrapTrace(err)
}
}
u.Yggdrasil.ObjectChangeToNextState(ctx, u, object)
return nil
}
func HandleChestObject(ctx context.Context, u *User, object *YggObject) error {
state, err := manager.CSV.Yggdrasil.GetObjectState(object.ObjectId, object.State)
if err != nil {
return errors.WrapTrace(err)
}
reason := logreason.NewReason(logreason.YggHandleChest)
err = u.Yggdrasil.AddRewardsByDropId(ctx, u, state.ObjectParam, 0, reason)
if err != nil {
return errors.WrapTrace(err)
}
return nil
}
func (u *User) YggdrasilObjectMove(ctx context.Context, posMap map[int64]*coordinate.Position) ([]*pb.VOYggdrasilObject, error) {
m := map[*YggObject]coordinate.Position{}
for uid, position := range posMap {
obj, ok := u.Yggdrasil.Entities.FindObjectByUid(uid)
if !ok {
return nil, errors.WrapTrace(common.ErrYggdrasilObjectNotFound)
}
_, err := checkObjectState(obj, static.YggObjectTypeInitiativemonster, static.YggObjectTypeNpc,
static.YggObjectTypeFollownpc, static.YggObjectTypeFollowbattlenpc)
if err != nil {
return nil, errors.WrapTrace(err)
}
m[obj] = *position
}
var voList []*pb.VOYggdrasilObject
for obj, to := range m {
err := u.Yggdrasil.ObjectMove(ctx, u, obj, to)
if err != nil {
return nil, errors.WrapTrace(err)
}
voList = append(voList, obj.VOYggdrasilObject())
}
return voList, nil
}
func (u *User) YggdrasilQueryPosition(ctx context.Context, objectId int32) ([]*pb.VOYggdrasilObject, error) {
// 初始化obj所在的area
for _, areaId := range manager.CSV.Yggdrasil.GetObjectInitArea(objectId) {
u.Yggdrasil.Areas.getByCreate(ctx, u.Yggdrasil, areaId)
}
objects, err := u.Yggdrasil.FindObjectById(objectId)
if err != nil {
return nil, errors.WrapTrace(err)
}
var vos []*pb.VOYggdrasilObject
for _, object := range objects {
vos = append(vos, object.VOYggdrasilObject())
}
return vos, nil
}
func (u *User) checkYggdrasilBattle(objectUid int64, levelId int32, characters []*pb.VOBattleCharacter, npcs []*pb.VOBattleNPC) error {
if u.Yggdrasil.Status == YggdrasilStateQuit {
return errors.WrapTrace(common.ErrYggdrasilNotInTravel)
}
object, ok := u.Yggdrasil.Entities.FindObjectByUid(objectUid)
if !ok {
return errors.WrapTrace(common.ErrYggdrasilObjectNotFound)
}
// todo:主动怪物位置有时候会和玩家不一致
//objPos := *object.Position
//travelPos := *u.Yggdrasil.TravelPos
//if objPos != travelPos {
// return errors.WrapTrace(common.ErrYggdrasilObjectNotFound)
//}
state, err := checkObjectState(object, static.YggObjectTypePassivermonster, static.YggObjectTypeInitiativemonster)
if err != nil {
return errors.WrapTrace(err)
}
if state.ObjectParam != levelId {
return errors.WrapTrace(common.ErrParamError)
}
// 血量为0的不可出战
for _, character := range characters {
hp, err := u.Yggdrasil.TravelInfo.GetCharacterHp(character.CharacterId)
if err != nil {
return errors.WrapTrace(err)
}
if hp <= 0 {
return errors.WrapTrace(errors.Swrapf(common.ErrYggdrasilCharacterHpErr, character.CharacterId, hp))
}
// 设置战斗血量
character.HpPercent = hp
}
return nil
}
func (u *User) onYggdrasilLevelFail(ctx context.Context, objectUid int64, charactersAfterBattle []*pb.VOBattleCharacter, result int32) error {
object, ok := u.Yggdrasil.Entities.FindObjectByUid(objectUid)
if !ok {
return errors.WrapTrace(common.ErrYggdrasilObjectNotFound)
}
state, err := checkObjectState(object, static.YggObjectTypePassivermonster, static.YggObjectTypeInitiativemonster)
if err != nil {
return errors.WrapTrace(err)
}
if state.ObjectType == static.YggObjectTypePassivermonster {
// 被动怪物
if result == static.BattleEndTypeGiveUp {
// 战斗中放弃
return u.YggBattleGiveUp(ctx, objectUid)
} else {
// 战斗失败
// 设置血量
u.SetYggCharacterHp(ctx, charactersAfterBattle)
return u.YggBattleGiveUp(ctx, objectUid)
}
} else if state.ObjectType == static.YggObjectTypeInitiativemonster {
// 主动怪物
return u.SetAllDeadAndReturnSafePos(ctx)
}
return nil
}
func (u *User) onYggdrasilLevelPass(ctx context.Context, objectUid int64, levelCfg *entry.Level, charactersAfterBattle []*pb.VOBattleCharacter) error {
object, ok := u.Yggdrasil.Entities.FindObjectByUid(objectUid)
if !ok {
return errors.WrapTrace(common.ErrYggdrasilObjectNotFound)
}
_, err := checkObjectState(object, static.YggObjectTypePassivermonster, static.YggObjectTypeInitiativemonster)
if err != nil {
return errors.WrapTrace(err)
}
// 设置血量
u.SetYggCharacterHp(ctx, charactersAfterBattle)
reason := logreason.NewReason(logreason.YggLevelPass)
err = u.Yggdrasil.AddRewardsByDropIds(ctx, u, levelCfg.YggdrasilDrop, 0, reason)
if err != nil {
return errors.WrapTrace(err)
}
u.Yggdrasil.ObjectChangeToNextState(ctx, u, object)
u.Yggdrasil.Task.ProcessNum(ctx, u, static.YggdrasilSubTaskTypeTypeChapter, 1, levelCfg.Id)
u.Yggdrasil.Task.ProcessNum(ctx, u, static.YggdrasilSubTaskTypeTypeMonster, 1, object.ObjectId)
return nil
}
func (u *User) checkChallengeAltarBattle(objectUid int64, levelId int32, characters []*pb.VOBattleCharacter, npcs []*pb.VOBattleNPC) error {
err := u.CheckActionUnlock(static.ActionIdTypeChallengealtarunlock)
if err != nil {
return errors.WrapTrace(err)
}
if u.Yggdrasil.Status == YggdrasilStateQuit {
return errors.WrapTrace(common.ErrYggdrasilNotInTravel)
}
object, ok := u.Yggdrasil.Entities.FindObjectByUid(objectUid)
if !ok {
return errors.WrapTrace(common.ErrYggdrasilObjectNotFound)
}
objPos := *object.Position
travelPos := *u.Yggdrasil.TravelPos
if objPos != travelPos {
return errors.WrapTrace(common.ErrYggdrasilObjectNotFound)
}
state, err := checkObjectState(object, static.YggObjectTypeChallengealtar)
if err != nil {
return errors.WrapTrace(err)
}
if state.ObjectParam != levelId {
return errors.WrapTrace(common.ErrParamError)
}
// 血量为0的不可出战
for _, character := range characters {
hp, err := u.Yggdrasil.TravelInfo.GetCharacterHp(character.CharacterId)
if err != nil {
return errors.WrapTrace(err)
}
if hp <= 0 {
return errors.WrapTrace(errors.Swrapf(common.ErrYggdrasilCharacterHpErr, character.CharacterId, hp))
}
// 设置战斗血量
character.HpPercent = hp
}
return nil
}
func (u *User) onChallengeAltarFail(ctx context.Context, objectUid int64, charactersAfterBattle []*pb.VOBattleCharacter, result int32) error {
object, ok := u.Yggdrasil.Entities.FindObjectByUid(objectUid)
if !ok {
return errors.WrapTrace(common.ErrYggdrasilObjectNotFound)
}
_, err := checkObjectState(object, static.YggObjectTypeChallengealtar)
if err != nil {
return errors.WrapTrace(err)
}
if result == static.BattleEndTypeGiveUp {
// 战斗中放弃
return u.YggBattleGiveUp(ctx, objectUid)
} else {
// 战斗失败
// 设置血量
u.SetYggCharacterHp(ctx, charactersAfterBattle)
}
return u.YggBattleGiveUp(ctx, objectUid)
}
func (u *User) onChallengeAltarPass(ctx context.Context, objectUid int64, levelCfg *entry.Level, charactersAfterBattle []*pb.VOBattleCharacter) error {
object, ok := u.Yggdrasil.Entities.FindObjectByUid(objectUid)
if !ok {
return errors.WrapTrace(common.ErrYggdrasilObjectNotFound)
}
_, err := checkObjectState(object, static.YggObjectTypeChallengealtar)
if err != nil {
return errors.WrapTrace(err)
}
// 设置血量
u.SetYggCharacterHp(ctx, charactersAfterBattle)
reason := logreason.NewReason(logreason.YggLevelPass)
err = u.Yggdrasil.AddRewardsByDropIds(ctx, u, levelCfg.YggdrasilDrop, 0, reason)
if err != nil {
return errors.WrapTrace(err)
}
u.Yggdrasil.ObjectChangeToNextState(ctx, u, object)
u.Yggdrasil.Task.ProcessNum(ctx, u, static.YggdrasilSubTaskTypeTypeChapter, 1, levelCfg.Id)
u.Yggdrasil.Task.ProcessNum(ctx, u, static.YggdrasilSubTaskTypeTypeMonster, 1, object.ObjectId)
return nil
}
func (u *User) YggdrasilAcceptTask(ctx context.Context, taskId int32) (*pb.VOYggdrasilTaskInfo, error) {
config, err := manager.CSV.Yggdrasil.GetTaskConfig(taskId)
if err != nil {
return nil, errors.WrapTrace(err)
}
err = u.CheckUserConditions(config.UnlockCondition)
if err != nil {
return nil, errors.WrapTrace(err)
}
info, err := u.Yggdrasil.Task.AcceptTask(ctx, u, config)
if err != nil {
return nil, errors.WrapTrace(err)
}
return info.VOYggdrasilTaskInfo(), nil
}
func (u *User) YggdrasilSetTrackTask(taskId int32) error {
return u.Yggdrasil.Task.SetTrackTask(taskId)
}
func (u *User) YggdrasilCompleteTask(ctx context.Context, taskId int32) error {
config, err := manager.CSV.Yggdrasil.GetTaskConfig(taskId)
if err != nil {
return errors.WrapTrace(err)
}
// 完成任务
err = u.Yggdrasil.Task.CompleteTask(ctx, u, taskId)
if err != nil {
return errors.WrapTrace(err)
}
// 发奖
if config.DropId > 0 {
reason := logreason.NewReason(logreason.YggTaskComplete)
err := u.Yggdrasil.AddRewardsByDropId(ctx, u, config.DropId, 0, reason)
if err != nil {
return errors.WrapTrace(err)
}
}
return nil
}
func (u *User) YggdrasilChooseNext(ctx context.Context, subTaskId int32) (*pb.VOYggdrasilTaskInfo, error) {
info, err := u.Yggdrasil.Task.ChooseNext(ctx, u, subTaskId)
if err != nil {
return nil, errors.WrapTrace(err)
}
return info.VOYggdrasilTaskInfo(), nil
}
//todo: complete_event 删掉 env可以跨多个subtask 添加删除
//todo: env中有的特效类永久保存
// 任务背包每个格子只有一个吗, 累积
// 休息时候会带背包里东西回去吗 ,不会,回城市或者用建筑
// 带回去的东西在“异界邮箱”里面吗 ,别人带回去的进“异界邮箱”,自己带的直接发奖,如果邮箱满了就不匹配(我的掉落物不进入其他人的匹配)
func (u *User) YggdrasilAbandonTask(ctx context.Context, taskId int32) error {
return u.Yggdrasil.Task.TaskAbandon(ctx, u, taskId)
}
func (u *User) YggdrasilDeliverTaskGoods(ctx context.Context, subTaskId int32, resources []*pb.VOResource) error {
return u.Yggdrasil.Task.DeliverTaskGoods(ctx, u, subTaskId, resources)
}
func (u *User) YggdrasilAreaProgressReward(ctx context.Context, areaId int32) (*pb.VOYggdrasilArea, error) {
return u.Yggdrasil.ProgressReward(ctx, u, areaId)
}
func (u *User) YggdrasilMailGetByPage(offset int64, num int32) []*pb.VOYggdrasilMail {
search := u.Yggdrasil.MailBox.PagingSearch(offset, int(num))
vos := make([]*pb.VOYggdrasilMail, 0, len(search))
for _, mail := range search {
vos = append(vos, mail.VOYggdrasilMail())
}
return vos
}
func (u *User) YggdrasilMailReceiveOne(ctx context.Context, uid int64) error {
yggdrasilMail, ok := u.Yggdrasil.MailBox.Get(uid)
if !ok {
return errors.WrapTrace(common.ErrYggdrasilMailNotExist)
}
reason := logreason.NewReason(logreason.YggMail)
_, err := u.addRewards(yggdrasilMail.Attachment, reason)
if err != nil {
return errors.WrapTrace(err)
}
err = u.Yggdrasil.MailBox.Delete(ctx, u.ID, uid)
if err != nil {
return errors.WrapTrace(err)
}
return nil
}
func (u *User) YggdrasilMailReceiveAll(ctx context.Context) ([]int64, error) {
all := u.Yggdrasil.MailBox.PagingSearch(math.MaxInt64, manager.CSV.Yggdrasil.GetMailReceiveMaxCount())
if len(all) == 0 {
return nil, errors.WrapTrace(common.ErrYggdrasilMailNotExist)
}
var ret []int64
for _, mail := range all {
err := u.YggdrasilMailReceiveOne(ctx, mail.Uid)
if err != nil {
return nil, errors.WrapTrace(err)
}
ret = append(ret, mail.Uid)
}
return ret, nil
}
func (u *User) YggdrasilMarkCreate(ctx context.Context, markId int32, pos coordinate.Position) (*pb.VOYggdrasilMark, error) {
_, err := manager.CSV.Yggdrasil.GetMark(markId)
if err != nil {
return nil, errors.WrapTrace(err)
}
// mark上限
if u.Yggdrasil.GetMarkTotalCount() >= manager.CSV.Yggdrasil.GetYggMarkTotalCount() {
return nil, errors.WrapTrace(common.ErrYggdrasilMarkCountLimit)
}
mark, err := NewYggMark(ctx, markId, pos)
if err != nil {
return nil, errors.WrapTrace(err)
}
u.Yggdrasil.AppendMark(ctx, mark)
return mark.VOYggdrasilMark(), nil
}
func (u *User) YggdrasilMarkDestroy(markUId int64) error {
mark, ok := u.Yggdrasil.Entities.FindMarkByUid(markUId)
if !ok {
return errors.WrapTrace(common.ErrYggdrasilMarkNotExist)
}
u.Yggdrasil.RemoveMark(mark)
return nil
}
func (u *User) YggdrasilTrackMark(pos *coordinate.Position) {
u.Yggdrasil.TrackMark = pos
}
func (u *User) YggdrasilMatch(ctx context.Context, otherMembers []int64) error {
matchPool, err := GetYggdrasilInRedis(ctx, otherMembers)
if err != nil {
return errors.WrapTrace(err)
}
u.Yggdrasil.Entities.Match(ctx, u.Yggdrasil, u.Guild.GuildID, u.GetUserId(), matchPool)
return nil
}
func GetYggdrasilInRedis(ctx context.Context, userIds []int64) (*MatchPool, error) {
matchPool := NewMatchPool()
for _, userId := range userIds {
entities, err := LoadUserMatchEntities(ctx, userId)
if err != nil {
return nil, errors.WrapTrace(err)
}
(*matchPool)[userId] = entities
}
return matchPool, nil
}
func (u *User) GetTotalIntimacy(ctx context.Context) (int32, error) {
guildID := u.Guild.GuildID
return getTotalIntimacy(ctx, u.GetUserId(), guildID)
}
func getTotalIntimacy(ctx context.Context, userId, guildID int64) (int32, error) {
if guildID == 0 {
return 0, nil
}
intimacyMap, err := manager.Global.GetGuildIntimacyMap(ctx, guildID, userId)
if err != nil {
return 0, errors.WrapTrace(err)
}
var ret int32
for _, i := range intimacyMap {
ret += i
}
return ret, nil
}
// IntimacyChange 亲密度变化
func (u *User) IntimacyChange(ctx context.Context, other int64, changeVal int32) error {
caches, err := manager.Global.GetUserCachesExtension(ctx, []int64{other}, global.UserCacheWithOnline|global.UserCacheWithGuild)
if err != nil {
return errors.WrapTrace(err)
}
otherCache, ok := caches[other]
if !ok {
return nil
}
guildID := u.Guild.GuildID
// 不在公会
if guildID == 0 {
return nil
}
// 不在同一公会
if guildID != otherCache.GuildID {
return nil
}
intimacy, err := manager.Global.ChangeIntimacy(ctx, guildID, u.ID, other, changeVal)
if err != nil {
return errors.WrapTrace(err)
}
totalIntimacy, err := getTotalIntimacy(ctx, guildID, u.ID)
if err != nil {
return errors.WrapTrace(err)
}
u.AddYggPush(&pb.S2CYggdrasilIntimacyChange{
UserId: other,
IntimacyValue: intimacy,
TotalIntimacy: totalIntimacy,
})
// 其他玩家在线时才推
if otherCache.OnlineStatus == 1 {
othersTotalIntimacy, err := getTotalIntimacy(ctx, guildID, other)
if err != nil {
return errors.WrapTrace(err)
}
manager.EventQueue.Push(ctx, other, common.NewYggdrasilIntimacyChangeEvent(u.ID, intimacy, othersTotalIntimacy))
}
return nil
}
func (u *User) QuerySimpleInfo(ctx context.Context, userIds []int64) ([]*pb.VOUserInfoSimple, error) {
if len(userIds) == 0 {
return nil, errors.WrapTrace(common.ErrParamError)
}
for _, userId := range userIds {
_, ok := u.Yggdrasil.MatchUserIds[userId]
if !ok {
return nil, errors.WrapTrace(common.ErrNoPermissionError)
}
}
caches, err := manager.Global.GetUserCaches(ctx, userIds)
if err != nil {
return nil, errors.WrapTrace(err)
}
vos := make([]*pb.VOUserInfoSimple, 0, len(caches))
for _, cache := range caches {
vos = append(vos, cache.VOUserInfoSimple())
}
return vos, nil
}
func (u *User) YggBattleGiveUp(ctx context.Context, uid int64) error {
if u.Yggdrasil.Status == YggdrasilStateQuit {
return errors.WrapTrace(common.ErrYggdrasilNotInTravel)
}
object, ok := u.Yggdrasil.Entities.FindObjectByUid(uid)
if !ok {
return errors.WrapTrace(common.ErrYggdrasilObjectNotFound)
}
if *(object.Position) != *(u.Yggdrasil.TravelPos) {
return errors.WrapTrace(common.ErrYggdrasilObjectNotFound)
}
state, err := checkObjectState(object, static.YggObjectTypePassivermonster, static.YggObjectTypeInitiativemonster, static.YggObjectTypeChallengealtar)
if err != nil {
return errors.WrapTrace(err)
}
if state.ObjectType == static.YggObjectTypePassivermonster || state.ObjectType == static.YggObjectTypeChallengealtar {
// 被动怪物和挑战祭坛
pos := *u.Yggdrasil.TravelPos
if coordinate.CubeDistance(*u.Yggdrasil.TravelPosBefore, *u.Yggdrasil.TravelPos) == 1 {
pos = *u.Yggdrasil.TravelPosBefore
} else {
ring := coordinate.CubeRing(*u.Yggdrasil.TravelPos, 1)
for _, position := range ring {
if _, err := u.Yggdrasil.canMoveTo(*u.Yggdrasil.TravelPos, position); err != nil {
pos = position
break
}
}
}
_, _, err = u.Yggdrasil.ChangePos(ctx, u, pos)
if err != nil {
return errors.WrapTrace(err)
}
} else if state.ObjectType == static.YggObjectTypeInitiativemonster {
err := u.SetAllDeadAndReturnSafePos(ctx)
if err != nil {
return errors.WrapTrace(err)
}
}
return nil
}
func (u *User) SetAllDeadAndReturnSafePos(ctx context.Context) error {
pos, err := manager.CSV.Yggdrasil.GetClosestSafePos(*u.Yggdrasil.TravelPos)
if err != nil {
return errors.WrapTrace(err)
}
_, _, err = u.Yggdrasil.ChangePos(ctx, u, *pos)
if err != nil {
return errors.WrapTrace(err)
}
u.Yggdrasil.TravelInfo.SetAllDead()
u.Yggdrasil.TravelInfo.TravelAp = 0
return nil
}
func (u *User) YggMonsterInitPos(objectUid int64) (*pb.VOPosition, error) {
//if u.Yggdrasil.Status == YggdrasilStateQuit {
// return nil, errors.WrapTrace(common.ErrYggdrasilNotInTravel)
//}
object, ok := u.Yggdrasil.Entities.FindObjectByUid(objectUid)
if !ok {
return nil, errors.WrapTrace(common.ErrYggdrasilObjectNotFound)
}
_, err := checkObjectState(object, static.YggObjectTypeInitiativemonster)
if err != nil {
return nil, errors.WrapTrace(err)
}
return object.OrgPos.VOPosition(), nil
}
func (u *User) YggMonsterBackInitPos(ctx context.Context, objectUid int64) (*pb.VOYggdrasilObject, error) {
//if u.Yggdrasil.Status == YggdrasilStateQuit {
// return nil, errors.WrapTrace(common.ErrYggdrasilNotInTravel)
//}
object, ok := u.Yggdrasil.Entities.FindObjectByUid(objectUid)
if !ok {
return nil, errors.WrapTrace(common.ErrYggdrasilObjectNotFound)
}
_, err := checkObjectState(object, static.YggObjectTypeInitiativemonster)
if err != nil {
return nil, errors.WrapTrace(err)
}
err = u.Yggdrasil.ObjectMove(ctx, u, object, *object.OrgPos)
if err != nil {
return nil, errors.WrapTrace(err)
}
return object.VOYggdrasilObject(), nil
}
func (u *User) SetYggCharacterHp(ctx context.Context, charactersAfterBattle []*pb.VOBattleCharacter) {
for _, character := range charactersAfterBattle {
u.Yggdrasil.TravelInfo.SetCharacterHp(character.CharacterId, character.HpPercent)
}
if u.Yggdrasil.TravelInfo.AllDead() {
u.Yggdrasil.TravelInfo.TravelAp = 0
}
}
|
package controller
import (
"godson/controller/api"
"godson/controller/test"
"net/http"
"github.com/gin-gonic/gin"
swagger "github.com/swaggo/gin-swagger"
"github.com/swaggo/gin-swagger/swaggerFiles"
)
// Route 总路由
func Route(r *gin.Engine) {
r.GET("/", func(c *gin.Context) {
c.Redirect(http.StatusMovedPermanently, "/web/index")
})
r.GET("/swagger/*any", swagger.WrapHandler(swaggerFiles.Handler))
test.Route(r)
api.Route(r)
}
|
package store
import (
"bytes"
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
"github.com/tilt-dev/tilt/internal/k8s/testyaml"
"github.com/tilt-dev/tilt/internal/store/k8sconv"
"github.com/tilt-dev/tilt/internal/testutils/manifestbuilder"
"github.com/tilt-dev/tilt/internal/testutils/tempdir"
"github.com/tilt-dev/tilt/pkg/model"
)
func TestToJSON(t *testing.T) {
f := tempdir.NewTempDirFixture(t)
m := manifestbuilder.New(f, "fe").
WithK8sYAML(testyaml.SanchoYAML).
Build()
state := newState([]model.Manifest{m})
mState, _ := state.ManifestState("fe")
mState.MutableBuildStatus(m.K8sTarget().ID()).LastResult = NewK8sDeployResult(
m.K8sTarget().ID(), &k8sconv.KubernetesApplyFilter{})
buf := bytes.NewBuffer(nil)
encoder := CreateEngineStateEncoder(buf)
err := encoder.Encode(state)
if err != nil {
t.Fatal(err)
}
assert.Contains(t, buf.String(), "yaml")
assert.Contains(t, buf.String(), "kind: Deployment")
// Make sure the data can decode successfully.
decoder := json.NewDecoder(bytes.NewBufferString(buf.String()))
var v interface{}
err = decoder.Decode(&v)
if err != nil {
t.Fatalf("Error decoding JSON: %v\nSource:\n%s\n", err, buf.String())
}
}
|
package engine
type Color struct {
R, G, B, A uint8
}
|
package udwCryptoEncryptV3
import (
"bytes"
"crypto/cipher"
"crypto/rand"
"errors"
"github.com/tachyon-protocol/udw/AesCtr"
"github.com/tachyon-protocol/udw/udwBytes"
"github.com/tachyon-protocol/udw/udwNet"
"io"
"sync"
)
const ErrMsgDecryptKey = "decrypt key error magic buf not match"
var gMagicBuf = []byte{0xc6, 0x1f, 0x2d, 0xae}
func MustSymmetryConn(conn io.ReadWriteCloser, key *[32]byte) (outConn io.ReadWriteCloser) {
block, err := AesCtr.NewCipher((*key)[:])
if err != nil {
panic(err)
}
return &symmetryConn{
rwc: conn,
block: block,
}
}
func NewSymmetryConnWithBlock(conn io.ReadWriteCloser, block cipher.Block) io.ReadWriteCloser {
return &symmetryConn{
rwc: conn,
block: block,
}
}
type symmetryConn struct {
rwc io.ReadWriteCloser
block cipher.Block
wBuf udwBytes.BufWriter
wCtr cipher.Stream
rCtr cipher.Stream
hasWrite bool
hasRead bool
readLock sync.Mutex
writeLock sync.Mutex
rBuf [20]byte
}
func (c *symmetryConn) Write(src []byte) (n int, err error) {
c.writeLock.Lock()
if !c.hasWrite {
c.hasWrite = true
buf := c.wBuf.GetHeadBuffer(len(src) + 20)
_, err = io.ReadFull(rand.Reader, buf[:16])
if err != nil {
c.writeLock.Unlock()
c.Close()
return 0, err
}
ctr := AesCtr.PoolGetAesCtr(c.block, buf[:16])
ctr.XORKeyStream(buf[16:20], gMagicBuf)
ctr.XORKeyStream(buf[20:], src)
c.wCtr = ctr
n, err := c.rwc.Write(buf)
n = n - 20
if n < 0 {
n = 0
}
c.writeLock.Unlock()
if err != nil {
c.Close()
}
return n, err
}
if c.wCtr == nil {
c.writeLock.Unlock()
c.Close()
return 0, errors.New(udwNet.ErrMsgSocketCloseError + " e25qs67py8")
}
buf := c.wBuf.GetHeadBuffer(len(src))
c.wCtr.XORKeyStream(buf, src)
n, err = c.rwc.Write(buf)
if n != len(src) {
if err == nil {
err = io.ErrShortWrite
}
}
c.writeLock.Unlock()
return n, err
}
func (c *symmetryConn) Read(dst []byte) (n int, err error) {
c.readLock.Lock()
if !c.hasRead {
c.hasRead = true
buf := c.rBuf[:]
_, err := io.ReadFull(c.rwc, buf)
if err != nil {
c.readLock.Unlock()
c.Close()
return 0, err
}
ctr := AesCtr.PoolGetAesCtr(c.block, buf[:16])
ctr.XORKeyStream(buf[16:20], buf[16:20])
if !bytes.Equal(buf[16:20], gMagicBuf) {
c.readLock.Unlock()
c.Close()
return 0, errors.New(ErrMsgDecryptKey)
}
c.rCtr = ctr
}
if c.rCtr == nil {
c.readLock.Unlock()
c.Close()
return 0, errors.New(udwNet.ErrMsgSocketCloseError + " 4vg8b6g4rn")
}
n, err = c.rwc.Read(dst)
c.rCtr.XORKeyStream(dst[:n], dst[:n])
c.readLock.Unlock()
return n, err
}
func (c *symmetryConn) Close() (err error) {
err = c.rwc.Close()
c.writeLock.Lock()
if c.wCtr != nil {
AesCtr.PoolPutAesCtr(c.wCtr)
c.wCtr = nil
}
c.writeLock.Unlock()
c.readLock.Lock()
if c.rCtr != nil {
AesCtr.PoolPutAesCtr(c.rCtr)
c.rCtr = nil
}
c.readLock.Unlock()
return err
}
var gNewSymmetryConnWithBlockPool = sync.Pool{}
func PoolGetSymmetryConnWithBlock(conn io.ReadWriteCloser, block cipher.Block) io.ReadWriteCloser {
obj := gNewSymmetryConnWithBlockPool.Get()
if obj == nil {
return NewSymmetryConnWithBlock(conn, block)
}
obj2, ok := obj.(*symmetryConn)
if !ok {
return NewSymmetryConnWithBlock(conn, block)
}
obj2.rwc = conn
obj2.block = block
obj2.wBuf.Reset()
obj2.hasWrite = false
obj2.hasRead = false
return obj2
}
func PoolPutSymmetryConnAndClose(rwc io.ReadWriteCloser) {
if rwc == nil {
return
}
rwc.Close()
obj2, ok := rwc.(*symmetryConn)
if !ok {
return
}
obj2.rwc = nil
obj2.block = nil
gNewSymmetryConnWithBlockPool.Put(obj2)
}
|
package docker
import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"golang.org/x/net/context"
)
type Docker struct {
Containers []types.Container
}
func (d *Docker) Get() Docker {
return Docker{
Containers: d.List(),
}
}
func (d *Docker) List() []types.Container {
cli, err := client.NewEnvClient()
if err != nil {
panic(err)
}
containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
panic(err)
}
return containers
}
|
package emm
import (
"encoding/xml"
"io/ioutil"
"strings"
"testing"
"github.com/certeu/emmchan/rss"
)
const (
cd = `<directory>
<channel id="P_malekalssite">
<dc:format>rss</dc:format>
<dc:type>webnews</dc:type>
<dc:subject>eucert</dc:subject>
<dc:description>malekals site</dc:description>
<dc:identifier>http://www.malekal.com/</dc:identifier>
<iso:country>US</iso:country>
<region>Global</region>
<category>Specialist</category>
<ranking>1</ranking>
<iso:language>en</iso:language>
<ocs:schedule>
<ocs:updatePeriod>daily</ocs:updatePeriod>
<ocs:updateFrequency>2</ocs:updateFrequency>
</ocs:schedule>
<feed title="malekals site" url="http://www.malekal.com/feed/"/>
</channel>
</directory>`
)
var rssFeed *rss.Feed
func init() {
rssFeed = &rss.Feed{
XMLName: xml.Name{Space: "", Local: "rss"},
Channel: &rss.Channel{
Title: "Research Blog",
Link: "https://www.zscalaer.com/",
Links: []string{"https://www.zscaler.com/", "", ""},
Description: "",
Language: "en",
PubDate: "Wed, 04 Oct 2017 03:54:41 -0700",
LastBuildDate: "Fri, 06 Oct 2017 01:00:11 -0700",
Items: []rss.Item(nil),
},
}
}
func newDirectory(xmlstr string) *Directory {
d := NewDirectory(xmlstr)
return d
}
func TestLoadDump(t *testing.T) {
in := strings.NewReader(cd)
d := &Directory{}
if err := d.Load(in); err != nil {
t.Errorf("Could not load channel directory")
}
if err := d.Dump(ioutil.Discard); err != nil {
t.Errorf("Could not dump channel directory")
}
}
func TestIndex(t *testing.T) {
d := newDirectory(cd)
tests := []struct {
in string
want int
}{
{"http://www.malekal.com/", 0},
{"http://cert.europa.eu/", -1},
}
for _, test := range tests {
idx := d.Channels.Index(test.in)
if idx != test.want {
t.Errorf("Channels.Index(%q) = %d; want %d", test.in, idx, test.want)
}
}
}
func TestNewChannel(t *testing.T) {
c := NewChannel(rssFeed, "Public")
if c.ID != "ResearchBlog" && c.Identifier != "https://www.zscalaer.com/" {
t.Errorf("NewChannel() = %s; want ReserchBlog", c.ID)
}
}
func TestAdd(t *testing.T) {
d := newDirectory(cd)
c := NewChannel(rssFeed, d.Instance)
d.Add(c)
d.Add(c)
if len(d.Channels) != 2 {
t.Errorf("Channel wasn't properly added.")
}
}
|
package dbtoapi
import "github.com/spf13/viper"
type Config struct {
DBType string
DBServerIP string
DBServerPort string
DBName string
DBUsername string
DBPassword string
HttpServerPort string
}
var conf *Config
/*func init() {
loadConfig()
}*/
func loadConfig() {
//viper读取配置文件
viper.SetConfigFile("config.yml")
viper.SetConfigType("yaml")
err := viper.ReadInConfig()
checkErr(err)
conf = &Config{
DBType: viper.GetString("database.type"),
DBServerIP: viper.GetString("database.server-ip"),
DBServerPort: viper.GetString("database.server-port"),
DBName: viper.GetString("database.name"),
DBUsername: viper.GetString("database.username"),
DBPassword: viper.GetString("database.password"),
HttpServerPort: viper.GetString("server.port"),
}
}
|
// Copyright 2018 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ipcache
import (
"reflect"
"testing"
. "gopkg.in/check.v1"
identityPkg "github.com/cilium/cilium/pkg/identity"
)
// Hook up gocheck into the "go test" runner.
type IPCacheTestSuite struct{}
var _ = Suite(&IPCacheTestSuite{})
func Test(t *testing.T) {
TestingT(t)
}
func (s *IPCacheTestSuite) TestIPCache(c *C) {
endpointIP := "10.0.0.15"
identity := (identityPkg.NumericIdentity(68))
// Assure sane state at start.
c.Assert(len(IPIdentityCache.ipToIdentityCache), Equals, 0)
c.Assert(len(IPIdentityCache.identityToIPCache), Equals, 0)
// Deletion of key that doesn't exist doesn't cause panic.
IPIdentityCache.delete(endpointIP)
IPIdentityCache.upsert(endpointIP, identity)
// Assure both caches are updated..
c.Assert(len(IPIdentityCache.ipToIdentityCache), Equals, 1)
c.Assert(len(IPIdentityCache.identityToIPCache), Equals, 1)
cachedIdentity, exists := IPIdentityCache.LookupByIP(endpointIP)
c.Assert(cachedIdentity, Equals, identity)
c.Assert(exists, Equals, true)
IPIdentityCache.upsert(endpointIP, identity)
// No duplicates.
c.Assert(len(IPIdentityCache.ipToIdentityCache), Equals, 1)
c.Assert(len(IPIdentityCache.identityToIPCache), Equals, 1)
IPIdentityCache.delete(endpointIP)
// Assure deletion occurs across both mappings.
c.Assert(len(IPIdentityCache.ipToIdentityCache), Equals, 0)
c.Assert(len(IPIdentityCache.identityToIPCache), Equals, 0)
_, exists = IPIdentityCache.LookupByIP(endpointIP)
c.Assert(exists, Equals, false)
IPIdentityCache.upsert(endpointIP, identity)
newIdentity := identityPkg.NumericIdentity(69)
IPIdentityCache.upsert(endpointIP, newIdentity)
// Ensure that update of cache with new identity doesn't keep old identity-to-ip
// mapping around.
_, exists = IPIdentityCache.LookupByIdentity(identity)
c.Assert(exists, Equals, false)
cachedIPSet, exists := IPIdentityCache.LookupByIdentity(newIdentity)
c.Assert(exists, Equals, true)
for cachedIP := range cachedIPSet {
c.Assert(cachedIP, Equals, endpointIP)
}
IPIdentityCache.delete(endpointIP)
// Assure deletion occurs across both mappings.
c.Assert(len(IPIdentityCache.ipToIdentityCache), Equals, 0)
c.Assert(len(IPIdentityCache.identityToIPCache), Equals, 0)
// Test mapping of multiple IPs to same identity.
endpointIPs := []string{"192.168.0.1", "20.3.75.3", "27.2.2.2", "127.0.0.1", "127.0.0.1"}
identities := []identityPkg.NumericIdentity{5, 67, 29, 29, 29}
for index := range endpointIPs {
IPIdentityCache.upsert(endpointIPs[index], identities[index])
cachedIdentity, _ := IPIdentityCache.LookupByIP(endpointIPs[index])
c.Assert(cachedIdentity, Equals, identities[index])
}
expectedIPList := map[string]struct{}{
"27.2.2.2": {},
"127.0.0.1": {},
}
cachedEndpointIPs, _ := IPIdentityCache.LookupByIdentity(29)
c.Assert(reflect.DeepEqual(cachedEndpointIPs, expectedIPList), Equals, true)
IPIdentityCache.delete("27.2.2.2")
expectedIPList = map[string]struct{}{
"127.0.0.1": {},
}
cachedEndpointIPs, _ = IPIdentityCache.LookupByIdentity(29)
c.Assert(reflect.DeepEqual(cachedEndpointIPs, expectedIPList), Equals, true)
cachedIdentity, exists = IPIdentityCache.LookupByIP("127.0.0.1")
c.Assert(cachedIdentity, Equals, identityPkg.NumericIdentity(29))
IPIdentityCache.delete("127.0.0.1")
_, exists = IPIdentityCache.LookupByIdentity(29)
c.Assert(exists, Equals, false)
// Clean up.
for index := range endpointIPs {
IPIdentityCache.delete(endpointIPs[index])
_, exists = IPIdentityCache.LookupByIP(endpointIPs[index])
c.Assert(exists, Equals, false)
_, exists = IPIdentityCache.LookupByIdentity(identities[index])
c.Assert(exists, Equals, false)
}
c.Assert(len(IPIdentityCache.ipToIdentityCache), Equals, 0)
c.Assert(len(IPIdentityCache.identityToIPCache), Equals, 0)
}
|
package models
import "testing"
func TestConektaError_Error(t *testing.T) {
type fields struct {
Object string
Type string
LogId string
Details []Detail
}
tests := []struct {
name string
fields fields
want string
}{
{
name: "OK",
fields: fields{
Object: "Some Object",
Type: "Some Type",
Details: []Detail{},
},
want: "Conekta error. Object: Some Object\nType: Some Type \nDetails:[]",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &ConektaError{
Object: tt.fields.Object,
Type: tt.fields.Type,
LogId: tt.fields.LogId,
Details: tt.fields.Details,
}
if got := c.Error(); got != tt.want {
t.Errorf("ConektaError.Error() = %v, want %v", got, tt.want)
}
})
}
}
|
// SPDX-License-Identifier: ISC
// Copyright (c) 2014-2020 Bitmark Inc.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package currency
import (
"github.com/bitmark-inc/bitmarkd/fault"
)
// GetFee - returns the fee for a specific currency
func (currency Currency) GetFee() (uint64, error) {
switch currency {
case Nothing:
return 0, nil
case Bitcoin:
return 10000, nil
case Litecoin:
return 100000, nil // as of 2017-07-28 Litecoin penalises any Vout < 100,000 Satoshi
default:
return 0, fault.InvalidCurrency
}
}
|
package main
import (
"encoding/json"
"fmt"
"html/template"
"net/http"
)
type P struct {
Name string
Age int
}
func table(wr http.ResponseWriter, re *http.Request) {
t, _ := template.ParseFiles("view/table.html")
t.Execute(wr, nil)
}
func getUsers(wr http.ResponseWriter, re *http.Request) {
users := make([]P, 0)
users = append(users, P{"lee", 20})
users = append(users, P{"Ying", 19})
wr.Header().Set("Content-Type", "application/json;charset=utf-8")
b, _ := json.Marshal(users)
fmt.Fprintln(wr, string(b))
}
func main() {
server := http.Server{Addr: ":8899"}
http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("static"))))
http.HandleFunc("/table", table)
http.HandleFunc("/getUsers", getUsers)
err := server.ListenAndServe()
if err != nil {
fmt.Println(err)
}
}
|
/* {{{ Copyright (c) 2017, Paul R. Tagliamonte <paultag@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE. }}} */
package policy
import (
"io"
"crypto/x509"
)
type Preparer interface {
// This takes a template x509 Certificate, and prepares it for signing
// according to CA policy.
//
// Bare minimum things this ought to do:
//
// - Set the Serial Number to something secure
// - Set the NotBefore and NotAfter
// - Set any Key Usage bits
// - Set IsCA and friends
//
Prepare(io.Reader, *x509.Certificate) error
}
type Translator interface {
// This defines the process by which a CSR is turned into a Certificate
// template.
CSRToCertificate(*x509.CertificateRequest) (*x509.Certificate, error)
}
// vim: foldmethod=marker
|
package game
import (
"encoding/json"
"fmt"
)
//实现从客户端的请求中ClientMessage中提取data(string map[string]interface{})
//name 为 NetWork.Request("webcenter",data)中的第一个参数,包括请求的模块和方法,data为本次请求所带的数据
func (cm ClientMessage) getMsg() (string, map[string]interface{}) {
var name string
var data map[string]interface{} //声明变量,不分配内存
data = make(map[string]interface{})
for one := range cm.Data {
if one == "msg" {
data = cm.Data[one].(map[string]interface{})
}
if one == "name" {
name = cm.Data[one].(string)
}
}
return name, data
}
//统一的响应客户端请求的方法 每个模块里的返回
func (cm *ClientMessage) response(msg ResponseData) {
var res ResponseMessage
res.Name = "S_response"
tem := cm.Msg.(map[string]interface{})
msg.RequestID = tem["requestID"].(float64)
res.Data = msg
raw, e := json.Marshal(res)
if e != nil {
fmt.Println(e)
return
}
e = cm.Socket.wsWrite(1, raw)
if e != nil {
fmt.Println(e)
}
}
//PushMessage 主动推送消息
func (cm *WsConnection) PushMessage(name string, data interface{}) {
var res ResponseMessage
res.Name = name
res.Data = data
raw, e := json.Marshal(res)
if e != nil {
fmt.Println(e)
return
}
e = cm.wsWrite(1, raw)
if e != nil {
fmt.Println("推送消息失败")
fmt.Println(e)
}
}
//统一的响应客户端请求的方法 每个模块里的返回
func (cm *ClientMessage) error(info string) {
msg := ResponseData{}
var res ResponseMessage
res.Name = "S_error"
tem := cm.Msg.(map[string]interface{})
msg.RequestID = tem["requestID"].(float64)
msg.Msg = info
res.Data = msg
raw, e := json.Marshal(res)
if e != nil {
fmt.Println(e)
return
}
e = cm.Socket.wsWrite(1, raw)
if e != nil {
fmt.Println(e)
return
}
}
|
// Copyright (C) 2015-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under
// the terms of the under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package credhub_tests
import (
"encoding/json"
"log"
"strings"
"code.cloudfoundry.org/credhub-cli/credhub"
"code.cloudfoundry.org/credhub-cli/credhub/credentials"
"code.cloudfoundry.org/credhub-cli/credhub/credentials/values"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/pborman/uuid"
"github.com/pivotal-cf/on-demand-service-broker/boshdirector"
"github.com/pivotal-cf/on-demand-service-broker/broker"
odbcredhub "github.com/pivotal-cf/on-demand-service-broker/credhub"
)
var _ = Describe("Credential store", func() {
var (
subject *odbcredhub.Store
credhubClient *credhub.CredHub
logBuffer *gbytes.Buffer
logger *log.Logger
)
BeforeEach(func() {
subject = getCredhubStore()
credhubClient = underlyingCredhubClient()
logBuffer = gbytes.NewBuffer()
logger = log.New(logBuffer, "contract-tests", log.LstdFlags)
})
Describe("Set (and delete)", func() {
It("sets and deletes a key-value map credential", func() {
keyPath := makeKeyPath("new-name")
err := subject.Set(keyPath, map[string]interface{}{"hi": "there"})
Expect(err).NotTo(HaveOccurred())
err = subject.Delete(keyPath)
Expect(err).NotTo(HaveOccurred())
})
It("can store plain string values", func() {
keyPath := makeKeyPath("stringy-cred")
err := subject.Set(keyPath, "I JUST LOVE CREDENTIALS.")
Expect(err).NotTo(HaveOccurred())
})
It("can store JSON values", func() {
keyPath := makeKeyPath("JSON-cred")
err := subject.Set(keyPath, map[string]interface{}{"jsonKey": "jsonValue"})
Expect(err).NotTo(HaveOccurred())
})
It("produces error when storing other types", func() {
keyPath := makeKeyPath("esoteric-cred")
err := subject.Set(keyPath, []interface{}{"asdf"})
Expect(err).To(MatchError("Unknown credential type"))
})
It("overwrites existing values", func() {
path := makeKeyPath("secret")
err := subject.Set(path, map[string]interface{}{"hi": "there"})
Expect(err).NotTo(HaveOccurred())
defer func() {
credhubClient.Delete(path)
}()
cred1, err := credhubClient.GetLatestJSON(path)
Expect(err).NotTo(HaveOccurred(), path)
Expect(cred1.Value).To(Equal(values.JSON{"hi": "there"}))
err = subject.Set(path, map[string]interface{}{"hello": "again"})
Expect(err).NotTo(HaveOccurred())
updatedCred, err := credhubClient.GetLatestJSON(path)
Expect(err).NotTo(HaveOccurred(), path)
Expect(updatedCred.Value).To(Equal(values.JSON{"hello": "again"}))
})
})
Describe("BulkSet", func() {
It("sets multiple values", func() {
path1 := makeKeyPath("secret-1")
path2 := makeKeyPath("secret-2")
err := subject.BulkSet([]broker.ManifestSecret{
{Name: "secret-1", Path: path1, Value: map[string]interface{}{"hi": "there"}},
{Name: "secret-2", Path: path2, Value: "value2"},
})
Expect(err).NotTo(HaveOccurred())
defer func() {
credhubClient.Delete(path1)
credhubClient.Delete(path2)
}()
cred1, err := credhubClient.GetLatestJSON(path1)
Expect(err).NotTo(HaveOccurred(), path1)
cred2, err := credhubClient.GetLatestValue(path2)
Expect(err).NotTo(HaveOccurred(), path2)
Expect(cred1.Value).To(Equal(values.JSON{"hi": "there"}))
Expect(cred2.Value).To(Equal(values.Value("value2")))
})
})
Describe("Add permission", func() {
It("can add permissions", func() {
keyPath := makeKeyPath("new-name-" + uuid.New()[:8])
err := subject.Set(keyPath, map[string]interface{}{"hi": "there"})
Expect(err).NotTo(HaveOccurred())
_, err = subject.AddPermission(keyPath, "alice", []string{"read"})
Expect(err).NotTo(HaveOccurred())
Expect(subject.Delete(keyPath)).To(Succeed())
})
})
Describe("Build", func() {
It("can't be constructed with a bad URI", func() {
_, err := odbcredhub.Build("💩://hi.there#you", credhub.SkipTLSValidation(true))
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("cannot contain colon"))
})
})
Describe("BulkGet", func() {
var (
jsonSecret credentials.JSON
passwordSecret credentials.Password
certSecret credentials.Certificate
valueSecret credentials.Value
rsaSecret credentials.RSA
sshSecret credentials.SSH
userSecret credentials.User
)
BeforeEach(func() {
var err error
valueSecret, err = credhubClient.SetValue("value-name", values.Value("value-secret"))
Expect(err).NotTo(HaveOccurred())
passwordSecret, err = credhubClient.SetPassword("password-name", "password")
Expect(err).NotTo(HaveOccurred())
jsonSecret, err = credhubClient.SetJSON("jsonsecret", values.JSON{"value": "foo"})
Expect(err).NotTo(HaveOccurred())
val := values.Certificate{
Ca: "-----BEGIN CERTIFICATE-----\nMIIDSjCCAjKgAwIBAgIUIwnRYqjEnzeMzNYuoctat+bi818wDQYJKoZIhvcNAQEL\nBQAwGTEXMBUGA1UEAxMOdG9tLmRpY2suaGFycnkwHhcNMTgwNzE2MTU0MzQwWhcN\nMTkwNzE2MTU0MzQwWjAZMRcwFQYDVQQDEw50b20uZGljay5oYXJyeTCCASIwDQYJ\nKoZIhvcNAQEBBQADggEPADCCAQoCggEBALzyeXfpTM0ek6FVzTuOjpBYGLk2Kdl3\nAJ2gKx1FDqyeXS2Hn9nEEWAWYAQ4xvZzI1gnYm/2EXmZ1t4fY4fL6XXwjirNtOyF\n+R5UvG6uVdyfQU+FNnqnE2TQ37wNr8oWCfpoVr0T1Z9n7fPnZZg0+DRXv6x/1bzG\nqfl029bxxJMl64psR8Ew8UfrZ7zT+/URE7ex1XznwWM68rfllGaB7myPjXG6Io6I\nn7fptsCFqI7/EwofjNARIqoRwmbdpOOVz53kR0WeppfiafPsKEC0KT4hvJqgdVr7\nt4YDD4JDdCNTX/NL4BOl3pp9iBpCnz2Rk9E3tEd8JUkcjTc86KsQLYUCAwEAAaOB\niTCBhjAdBgNVHQ4EFgQU8RxuIlg9XT6/S+HDOWfUayaOvWUwVAYDVR0jBE0wS4AU\n8RxuIlg9XT6/S+HDOWfUayaOvWWhHaQbMBkxFzAVBgNVBAMTDnRvbS5kaWNrLmhh\ncnJ5ghQjCdFiqMSfN4zM1i6hy1q35uLzXzAPBgNVHRMBAf8EBTADAQH/MA0GCSqG\nSIb3DQEBCwUAA4IBAQCu50sl64yo8n8/JRDEVibFwjmJj8h+ajcFGcFK9/iBq1Do\n4q8wibMH35sP9kDTGPJqu0IPxKUBaxkzZgIFjf7ujmyv5zEVQIqj9TdJiZs1QwkA\nKUaSBsFLSH9pweZhLVOgYab/ywc3xaKiQCuLAFovFKgqhfW5K6z3XpTEwknfP2Sj\n3An9KN9ZTp+x0f85oCuB8MXHyRTBF+js1pAMdfBGD6VnAfxn3QFx72x3x7YgG2zh\nyGNByRONHukFlzraQQ986237DXdhcAedkMA+OIZl+drLbEXDuPJT/dWp255FasZ4\n+pjdblNisoHZhV3W36NWxoQycjES2siEm8xHO43f\n-----END CERTIFICATE-----\n",
Certificate: "-----BEGIN CERTIFICATE-----\nMIIDSjCCAjKgAwIBAgIUIwnRYqjEnzeMzNYuoctat+bi818wDQYJKoZIhvcNAQEL\nBQAwGTEXMBUGA1UEAxMOdG9tLmRpY2suaGFycnkwHhcNMTgwNzE2MTU0MzQwWhcN\nMTkwNzE2MTU0MzQwWjAZMRcwFQYDVQQDEw50b20uZGljay5oYXJyeTCCASIwDQYJ\nKoZIhvcNAQEBBQADggEPADCCAQoCggEBALzyeXfpTM0ek6FVzTuOjpBYGLk2Kdl3\nAJ2gKx1FDqyeXS2Hn9nEEWAWYAQ4xvZzI1gnYm/2EXmZ1t4fY4fL6XXwjirNtOyF\n+R5UvG6uVdyfQU+FNnqnE2TQ37wNr8oWCfpoVr0T1Z9n7fPnZZg0+DRXv6x/1bzG\nqfl029bxxJMl64psR8Ew8UfrZ7zT+/URE7ex1XznwWM68rfllGaB7myPjXG6Io6I\nn7fptsCFqI7/EwofjNARIqoRwmbdpOOVz53kR0WeppfiafPsKEC0KT4hvJqgdVr7\nt4YDD4JDdCNTX/NL4BOl3pp9iBpCnz2Rk9E3tEd8JUkcjTc86KsQLYUCAwEAAaOB\niTCBhjAdBgNVHQ4EFgQU8RxuIlg9XT6/S+HDOWfUayaOvWUwVAYDVR0jBE0wS4AU\n8RxuIlg9XT6/S+HDOWfUayaOvWWhHaQbMBkxFzAVBgNVBAMTDnRvbS5kaWNrLmhh\ncnJ5ghQjCdFiqMSfN4zM1i6hy1q35uLzXzAPBgNVHRMBAf8EBTADAQH/MA0GCSqG\nSIb3DQEBCwUAA4IBAQCu50sl64yo8n8/JRDEVibFwjmJj8h+ajcFGcFK9/iBq1Do\n4q8wibMH35sP9kDTGPJqu0IPxKUBaxkzZgIFjf7ujmyv5zEVQIqj9TdJiZs1QwkA\nKUaSBsFLSH9pweZhLVOgYab/ywc3xaKiQCuLAFovFKgqhfW5K6z3XpTEwknfP2Sj\n3An9KN9ZTp+x0f85oCuB8MXHyRTBF+js1pAMdfBGD6VnAfxn3QFx72x3x7YgG2zh\nyGNByRONHukFlzraQQ986237DXdhcAedkMA+OIZl+drLbEXDuPJT/dWp255FasZ4\n+pjdblNisoHZhV3W36NWxoQycjES2siEm8xHO43f\n-----END CERTIFICATE-----\n",
PrivateKey: "-----BEGIN RSA PRIVATE KEY-----\nMIIEogIBAAKCAQEAvPJ5d+lMzR6ToVXNO46OkFgYuTYp2XcAnaArHUUOrJ5dLYef\n2cQRYBZgBDjG9nMjWCdib/YReZnW3h9jh8vpdfCOKs207IX5HlS8bq5V3J9BT4U2\neqcTZNDfvA2vyhYJ+mhWvRPVn2ft8+dlmDT4NFe/rH/VvMap+XTb1vHEkyXrimxH\nwTDxR+tnvNP79RETt7HVfOfBYzryt+WUZoHubI+Ncboijoift+m2wIWojv8TCh+M\n0BEiqhHCZt2k45XPneRHRZ6ml+Jp8+woQLQpPiG8mqB1Wvu3hgMPgkN0I1Nf80vg\nE6Xemn2IGkKfPZGT0Te0R3wlSRyNNzzoqxAthQIDAQABAoIBAFjfjHb0i6VnnnUi\nkJhU44XNikOD0IdzTBzYO69WziIvkxBZXLznVmzl2V/i/OLrIVLTo5+aFHon/EMa\nbIxxQ2ywK47Clzkxgw3bOY6t/cD6P5QRyqBCegLPpI0luuvJFgRsk2/4JmEGV4yD\n6OuA7sZgB84xiu1yXHzzlHwz2AyF2JL8dXe82DM33DnlERdT93pvoOgd4G65fnlw\nUVj4qMXaLlCRX3kDVyLInNfUHfTBNLAd31K2pRbNfgh6/A+hszO2lOU4jY3C6dGl\nJvcjMl/MP1flwCd8sN5OqWaSw8vvDpKy3V0T/nbvVmkxBmIRWFNUGip0tzB739m0\noMHL1/kCgYEA42d3LzYp7Kq6bDCe4DNfuEN3KfFAgCV56mjXm3IG82G+qkwE5HX5\nlzsVI6CFzgLHIC0y5k36q3PN9YV3bVBzyumBLsGqfmYpc3n0RNsBdCSYFBWx8Skm\nMO6a2MBb+DO7VAFbNj66k8zSgUSxtnNETvVmdQ8DLfvk1Ygs5DORwR8CgYEA1LUC\n8b3y+JadEHX9cTmew8Hm5eEzna8UjQsEHdmsPwDkayNzoqEQc7dyZmAvxgLmPDtt\nT6co/Js2MLgzGwjlK9/Wxl4BhWdAJltIY4T43pCnpTI5gder5lYJXDwIDU/SSp08\nrxSr0KaFfrdXeku1I//wbUpR/J+O2PBzGuLJCNsCgYB+YRQFsu5dzwxH8EV7iFGc\nEDJ7ps4X6bv1oEqi4x4lyJ6z+geGCGKrv3QiFqYGNdkAct4kzBWRj4xY9NHIeLvB\ne0AGAi+Ei7ZhrNcqJSSLrYKvNtdrlVjaPODlsRHrwKRNLWvJm9cJKP2cRdcV9L1z\nvEIysCMuPR2R5lo8gMRyNQKBgHnqIfzi7W9UDEQSDKin6Pq0mZ4qvMXlQrcwmDRv\nvc0Cuuk5kZ6mCGL6w0QwX1Fz+fiN6zJbUh+u6pl0Cj61k3zZOCXMXbzTmC4j5dK8\ntVQDv0LtDY8BSZKkv4qxEcBnftWrV8vV4kCeISem+CmtWO6AVJKfpWxRG7P15VOE\npss/AoGASRnijgkQE8cOuzoUSkYcNaKhRxo3m6OC7j2h6/Y3kLq1R9HgziEfoBpk\nkc1zdGLK02jHXLndbq07PHxNX6UctZllS/UjKNNgPgEjrGpmCy5K3CCxVR74plwo\nbbOUktEp2PuBY28iHugtbFWKqsqEx1O0r2/1tRxkEKUdKumnnYU=\n-----END RSA PRIVATE KEY-----\n",
}
certSecret, err = credhubClient.SetCertificate("certsecret", val)
Expect(err).NotTo(HaveOccurred())
rsaSecret, err = credhubClient.SetRSA("rsa-name", values.RSA{
PublicKey: "-----BEGIN RSA PUBLIC KEY-----\nMIIEogIBAAKCAQEAvPJ5d+lMzR6ToVXNO46OkFgYuTYp2XcAnaArHUUOrJ5dLYef\n2cQRYBZgBDjG9nMjWCdib/YReZnW3h9jh8vpdfCOKs207IX5HlS8bq5V3J9BT4U2\neqcTZNDfvA2vyhYJ+mhWvRPVn2ft8+dlmDT4NFe/rH/VvMap+XTb1vHEkyXrimxH\nwTDxR+tnvNP79RETt7HVfOfBYzryt+WUZoHubI+Ncboijoift+m2wIWojv8TCh+M\n0BEiqhHCZt2k45XPneRHRZ6ml+Jp8+woQLQpPiG8mqB1Wvu3hgMPgkN0I1Nf80vg\nE6Xemn2IGkKfPZGT0Te0R3wlSRyNNzzoqxAthQIDAQABAoIBAFjfjHb0i6VnnnUi\nkJhU44XNikOD0IdzTBzYO69WziIvkxBZXLznVmzl2V/i/OLrIVLTo5+aFHon/EMa\nbIxxQ2ywK47Clzkxgw3bOY6t/cD6P5QRyqBCegLPpI0luuvJFgRsk2/4JmEGV4yD\n6OuA7sZgB84xiu1yXHzzlHwz2AyF2JL8dXe82DM33DnlERdT93pvoOgd4G65fnlw\nUVj4qMXaLlCRX3kDVyLInNfUHfTBNLAd31K2pRbNfgh6/A+hszO2lOU4jY3C6dGl\nJvcjMl/MP1flwCd8sN5OqWaSw8vvDpKy3V0T/nbvVmkxBmIRWFNUGip0tzB739m0\noMHL1/kCgYEA42d3LzYp7Kq6bDCe4DNfuEN3KfFAgCV56mjXm3IG82G+qkwE5HX5\nlzsVI6CFzgLHIC0y5k36q3PN9YV3bVBzyumBLsGqfmYpc3n0RNsBdCSYFBWx8Skm\nMO6a2MBb+DO7VAFbNj66k8zSgUSxtnNETvVmdQ8DLfvk1Ygs5DORwR8CgYEA1LUC\n8b3y+JadEHX9cTmew8Hm5eEzna8UjQsEHdmsPwDkayNzoqEQc7dyZmAvxgLmPDtt\nT6co/Js2MLgzGwjlK9/Wxl4BhWdAJltIY4T43pCnpTI5gder5lYJXDwIDU/SSp08\nrxSr0KaFfrdXeku1I//wbUpR/J+O2PBzGuLJCNsCgYB+YRQFsu5dzwxH8EV7iFGc\nEDJ7ps4X6bv1oEqi4x4lyJ6z+geGCGKrv3QiFqYGNdkAct4kzBWRj4xY9NHIeLvB\ne0AGAi+Ei7ZhrNcqJSSLrYKvNtdrlVjaPODlsRHrwKRNLWvJm9cJKP2cRdcV9L1z\nvEIysCMuPR2R5lo8gMRyNQKBgHnqIfzi7W9UDEQSDKin6Pq0mZ4qvMXlQrcwmDRv\nvc0Cuuk5kZ6mCGL6w0QwX1Fz+fiN6zJbUh+u6pl0Cj61k3zZOCXMXbzTmC4j5dK8\ntVQDv0LtDY8BSZKkv4qxEcBnftWrV8vV4kCeISem+CmtWO6AVJKfpWxRG7P15VOE\npss/AoGASRnijgkQE8cOuzoUSkYcNaKhRxo3m6OC7j2h6/Y3kLq1R9HgziEfoBpk\nkc1zdGLK02jHXLndbq07PHxNX6UctZllS/UjKNNgPgEjrGpmCy5K3CCxVR74plwo\nbbOUktEp2PuBY28iHugtbFWKqsqEx1O0r2/1tRxkEKUdKumnnYU=\n-----END RSA PRIVATE KEY-----\n",
PrivateKey: "-----BEGIN RSA PRIVATE KEY-----\nMIIEogIBAAKCAQEAvPJ5d+lMzR6ToVXNO46OkFgYuTYp2XcAnaArHUUOrJ5dLYef\n2cQRYBZgBDjG9nMjWCdib/YReZnW3h9jh8vpdfCOKs207IX5HlS8bq5V3J9BT4U2\neqcTZNDfvA2vyhYJ+mhWvRPVn2ft8+dlmDT4NFe/rH/VvMap+XTb1vHEkyXrimxH\nwTDxR+tnvNP79RETt7HVfOfBYzryt+WUZoHubI+Ncboijoift+m2wIWojv8TCh+M\n0BEiqhHCZt2k45XPneRHRZ6ml+Jp8+woQLQpPiG8mqB1Wvu3hgMPgkN0I1Nf80vg\nE6Xemn2IGkKfPZGT0Te0R3wlSRyNNzzoqxAthQIDAQABAoIBAFjfjHb0i6VnnnUi\nkJhU44XNikOD0IdzTBzYO69WziIvkxBZXLznVmzl2V/i/OLrIVLTo5+aFHon/EMa\nbIxxQ2ywK47Clzkxgw3bOY6t/cD6P5QRyqBCegLPpI0luuvJFgRsk2/4JmEGV4yD\n6OuA7sZgB84xiu1yXHzzlHwz2AyF2JL8dXe82DM33DnlERdT93pvoOgd4G65fnlw\nUVj4qMXaLlCRX3kDVyLInNfUHfTBNLAd31K2pRbNfgh6/A+hszO2lOU4jY3C6dGl\nJvcjMl/MP1flwCd8sN5OqWaSw8vvDpKy3V0T/nbvVmkxBmIRWFNUGip0tzB739m0\noMHL1/kCgYEA42d3LzYp7Kq6bDCe4DNfuEN3KfFAgCV56mjXm3IG82G+qkwE5HX5\nlzsVI6CFzgLHIC0y5k36q3PN9YV3bVBzyumBLsGqfmYpc3n0RNsBdCSYFBWx8Skm\nMO6a2MBb+DO7VAFbNj66k8zSgUSxtnNETvVmdQ8DLfvk1Ygs5DORwR8CgYEA1LUC\n8b3y+JadEHX9cTmew8Hm5eEzna8UjQsEHdmsPwDkayNzoqEQc7dyZmAvxgLmPDtt\nT6co/Js2MLgzGwjlK9/Wxl4BhWdAJltIY4T43pCnpTI5gder5lYJXDwIDU/SSp08\nrxSr0KaFfrdXeku1I//wbUpR/J+O2PBzGuLJCNsCgYB+YRQFsu5dzwxH8EV7iFGc\nEDJ7ps4X6bv1oEqi4x4lyJ6z+geGCGKrv3QiFqYGNdkAct4kzBWRj4xY9NHIeLvB\ne0AGAi+Ei7ZhrNcqJSSLrYKvNtdrlVjaPODlsRHrwKRNLWvJm9cJKP2cRdcV9L1z\nvEIysCMuPR2R5lo8gMRyNQKBgHnqIfzi7W9UDEQSDKin6Pq0mZ4qvMXlQrcwmDRv\nvc0Cuuk5kZ6mCGL6w0QwX1Fz+fiN6zJbUh+u6pl0Cj61k3zZOCXMXbzTmC4j5dK8\ntVQDv0LtDY8BSZKkv4qxEcBnftWrV8vV4kCeISem+CmtWO6AVJKfpWxRG7P15VOE\npss/AoGASRnijgkQE8cOuzoUSkYcNaKhRxo3m6OC7j2h6/Y3kLq1R9HgziEfoBpk\nkc1zdGLK02jHXLndbq07PHxNX6UctZllS/UjKNNgPgEjrGpmCy5K3CCxVR74plwo\nbbOUktEp2PuBY28iHugtbFWKqsqEx1O0r2/1tRxkEKUdKumnnYU=\n-----END RSA PRIVATE KEY-----\n",
})
Expect(err).NotTo(HaveOccurred())
sshSecret, err = credhubClient.SetSSH("ssh-name", values.SSH{
PublicKey: "-----BEGIN RSA PUBLIC KEY-----\nMIIEogIBAAKCAQEAvPJ5d+lMzR6ToVXNO46OkFgYuTYp2XcAnaArHUUOrJ5dLYef\n2cQRYBZgBDjG9nMjWCdib/YReZnW3h9jh8vpdfCOKs207IX5HlS8bq5V3J9BT4U2\neqcTZNDfvA2vyhYJ+mhWvRPVn2ft8+dlmDT4NFe/rH/VvMap+XTb1vHEkyXrimxH\nwTDxR+tnvNP79RETt7HVfOfBYzryt+WUZoHubI+Ncboijoift+m2wIWojv8TCh+M\n0BEiqhHCZt2k45XPneRHRZ6ml+Jp8+woQLQpPiG8mqB1Wvu3hgMPgkN0I1Nf80vg\nE6Xemn2IGkKfPZGT0Te0R3wlSRyNNzzoqxAthQIDAQABAoIBAFjfjHb0i6VnnnUi\nkJhU44XNikOD0IdzTBzYO69WziIvkxBZXLznVmzl2V/i/OLrIVLTo5+aFHon/EMa\nbIxxQ2ywK47Clzkxgw3bOY6t/cD6P5QRyqBCegLPpI0luuvJFgRsk2/4JmEGV4yD\n6OuA7sZgB84xiu1yXHzzlHwz2AyF2JL8dXe82DM33DnlERdT93pvoOgd4G65fnlw\nUVj4qMXaLlCRX3kDVyLInNfUHfTBNLAd31K2pRbNfgh6/A+hszO2lOU4jY3C6dGl\nJvcjMl/MP1flwCd8sN5OqWaSw8vvDpKy3V0T/nbvVmkxBmIRWFNUGip0tzB739m0\noMHL1/kCgYEA42d3LzYp7Kq6bDCe4DNfuEN3KfFAgCV56mjXm3IG82G+qkwE5HX5\nlzsVI6CFzgLHIC0y5k36q3PN9YV3bVBzyumBLsGqfmYpc3n0RNsBdCSYFBWx8Skm\nMO6a2MBb+DO7VAFbNj66k8zSgUSxtnNETvVmdQ8DLfvk1Ygs5DORwR8CgYEA1LUC\n8b3y+JadEHX9cTmew8Hm5eEzna8UjQsEHdmsPwDkayNzoqEQc7dyZmAvxgLmPDtt\nT6co/Js2MLgzGwjlK9/Wxl4BhWdAJltIY4T43pCnpTI5gder5lYJXDwIDU/SSp08\nrxSr0KaFfrdXeku1I//wbUpR/J+O2PBzGuLJCNsCgYB+YRQFsu5dzwxH8EV7iFGc\nEDJ7ps4X6bv1oEqi4x4lyJ6z+geGCGKrv3QiFqYGNdkAct4kzBWRj4xY9NHIeLvB\ne0AGAi+Ei7ZhrNcqJSSLrYKvNtdrlVjaPODlsRHrwKRNLWvJm9cJKP2cRdcV9L1z\nvEIysCMuPR2R5lo8gMRyNQKBgHnqIfzi7W9UDEQSDKin6Pq0mZ4qvMXlQrcwmDRv\nvc0Cuuk5kZ6mCGL6w0QwX1Fz+fiN6zJbUh+u6pl0Cj61k3zZOCXMXbzTmC4j5dK8\ntVQDv0LtDY8BSZKkv4qxEcBnftWrV8vV4kCeISem+CmtWO6AVJKfpWxRG7P15VOE\npss/AoGASRnijgkQE8cOuzoUSkYcNaKhRxo3m6OC7j2h6/Y3kLq1R9HgziEfoBpk\nkc1zdGLK02jHXLndbq07PHxNX6UctZllS/UjKNNgPgEjrGpmCy5K3CCxVR74plwo\nbbOUktEp2PuBY28iHugtbFWKqsqEx1O0r2/1tRxkEKUdKumnnYU=\n-----END RSA PRIVATE KEY-----\n",
PrivateKey: "-----BEGIN RSA PRIVATE KEY-----\nMIIEogIBAAKCAQEAvPJ5d+lMzR6ToVXNO46OkFgYuTYp2XcAnaArHUUOrJ5dLYef\n2cQRYBZgBDjG9nMjWCdib/YReZnW3h9jh8vpdfCOKs207IX5HlS8bq5V3J9BT4U2\neqcTZNDfvA2vyhYJ+mhWvRPVn2ft8+dlmDT4NFe/rH/VvMap+XTb1vHEkyXrimxH\nwTDxR+tnvNP79RETt7HVfOfBYzryt+WUZoHubI+Ncboijoift+m2wIWojv8TCh+M\n0BEiqhHCZt2k45XPneRHRZ6ml+Jp8+woQLQpPiG8mqB1Wvu3hgMPgkN0I1Nf80vg\nE6Xemn2IGkKfPZGT0Te0R3wlSRyNNzzoqxAthQIDAQABAoIBAFjfjHb0i6VnnnUi\nkJhU44XNikOD0IdzTBzYO69WziIvkxBZXLznVmzl2V/i/OLrIVLTo5+aFHon/EMa\nbIxxQ2ywK47Clzkxgw3bOY6t/cD6P5QRyqBCegLPpI0luuvJFgRsk2/4JmEGV4yD\n6OuA7sZgB84xiu1yXHzzlHwz2AyF2JL8dXe82DM33DnlERdT93pvoOgd4G65fnlw\nUVj4qMXaLlCRX3kDVyLInNfUHfTBNLAd31K2pRbNfgh6/A+hszO2lOU4jY3C6dGl\nJvcjMl/MP1flwCd8sN5OqWaSw8vvDpKy3V0T/nbvVmkxBmIRWFNUGip0tzB739m0\noMHL1/kCgYEA42d3LzYp7Kq6bDCe4DNfuEN3KfFAgCV56mjXm3IG82G+qkwE5HX5\nlzsVI6CFzgLHIC0y5k36q3PN9YV3bVBzyumBLsGqfmYpc3n0RNsBdCSYFBWx8Skm\nMO6a2MBb+DO7VAFbNj66k8zSgUSxtnNETvVmdQ8DLfvk1Ygs5DORwR8CgYEA1LUC\n8b3y+JadEHX9cTmew8Hm5eEzna8UjQsEHdmsPwDkayNzoqEQc7dyZmAvxgLmPDtt\nT6co/Js2MLgzGwjlK9/Wxl4BhWdAJltIY4T43pCnpTI5gder5lYJXDwIDU/SSp08\nrxSr0KaFfrdXeku1I//wbUpR/J+O2PBzGuLJCNsCgYB+YRQFsu5dzwxH8EV7iFGc\nEDJ7ps4X6bv1oEqi4x4lyJ6z+geGCGKrv3QiFqYGNdkAct4kzBWRj4xY9NHIeLvB\ne0AGAi+Ei7ZhrNcqJSSLrYKvNtdrlVjaPODlsRHrwKRNLWvJm9cJKP2cRdcV9L1z\nvEIysCMuPR2R5lo8gMRyNQKBgHnqIfzi7W9UDEQSDKin6Pq0mZ4qvMXlQrcwmDRv\nvc0Cuuk5kZ6mCGL6w0QwX1Fz+fiN6zJbUh+u6pl0Cj61k3zZOCXMXbzTmC4j5dK8\ntVQDv0LtDY8BSZKkv4qxEcBnftWrV8vV4kCeISem+CmtWO6AVJKfpWxRG7P15VOE\npss/AoGASRnijgkQE8cOuzoUSkYcNaKhRxo3m6OC7j2h6/Y3kLq1R9HgziEfoBpk\nkc1zdGLK02jHXLndbq07PHxNX6UctZllS/UjKNNgPgEjrGpmCy5K3CCxVR74plwo\nbbOUktEp2PuBY28iHugtbFWKqsqEx1O0r2/1tRxkEKUdKumnnYU=\n-----END RSA PRIVATE KEY-----\n",
})
Expect(err).NotTo(HaveOccurred())
userSecret, err = credhubClient.SetUser("user-name", values.User{
Username: "bob",
Password: "pass",
})
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
Expect(credhubClient.Delete(passwordSecret.Name)).To(Succeed())
Expect(credhubClient.Delete(jsonSecret.Name)).To(Succeed())
Expect(credhubClient.Delete(certSecret.Name)).To(Succeed())
Expect(credhubClient.Delete(sshSecret.Name)).To(Succeed())
Expect(credhubClient.Delete(rsaSecret.Name)).To(Succeed())
Expect(credhubClient.Delete(userSecret.Name)).To(Succeed())
Expect(credhubClient.Delete(valueSecret.Name)).To(Succeed())
})
Describe("types returned by credhub cli library", func() {
It("returns a password type as a string, and not a values.Password", func() {
secret, err := credhubClient.GetLatestVersion(passwordSecret.Name)
Expect(err).NotTo(HaveOccurred())
secretValue := secret.Value
_, ok := secretValue.(string)
Expect(ok).To(BeTrue(), "secret is not a string")
_, ok = secretValue.(values.Password)
Expect(ok).To(BeFalse(), "secret is actually a values.Password")
})
It("returns a json type as a map[string]interface{}, and not a values.JSON", func() {
secret, err := credhubClient.GetLatestVersion(jsonSecret.Name)
Expect(err).NotTo(HaveOccurred())
secretValue := secret.Value
_, ok := secretValue.(map[string]interface{})
Expect(ok).To(BeTrue(), "secret is not a map[string]interface{}")
_, ok = secretValue.(values.JSON)
Expect(ok).To(BeFalse(), "secret is actually a values.JSON")
})
It("returns a certificate type as a map[string]interface{}, and not a values.Certificate", func() {
secret, err := credhubClient.GetLatestVersion(certSecret.Name)
Expect(err).NotTo(HaveOccurred())
secretValue := secret.Value
secretValueMap, ok := secretValue.(map[string]interface{})
Expect(ok).To(BeTrue(), "secret is not a map[string]interface{}")
_, ok = secretValue.(values.Certificate)
Expect(ok).To(BeFalse(), "secret is actually a values.Certificate")
_, ok = secretValueMap["private_key"]
Expect(ok).To(BeTrue(), "secret doesn't have a private_key key")
})
It("returns a value type as a string, and not a values.Value", func() {
secret, err := credhubClient.GetLatestVersion(valueSecret.Name)
Expect(err).NotTo(HaveOccurred())
secretValue := secret.Value
_, ok := secretValue.(string)
Expect(ok).To(BeTrue(), "secret is not a string")
_, ok = secretValue.(values.Value)
Expect(ok).To(BeFalse(), "secret is actually a values.Value")
})
It("returns an SSH type as a map[string]interface{}, and not a values.SSH", func() {
secret, err := credhubClient.GetLatestVersion(sshSecret.Name)
Expect(err).NotTo(HaveOccurred())
secretValue := secret.Value
_, ok := secretValue.(map[string]interface{})
Expect(ok).To(BeTrue(), "secret is not a map[string]interface{}")
_, ok = secretValue.(values.SSH)
Expect(ok).To(BeFalse(), "secret is actually a values.SSH")
})
It("returns an RSA type as a map[string]interface{}, and not a values.RSA", func() {
secret, err := credhubClient.GetLatestVersion(rsaSecret.Name)
Expect(err).NotTo(HaveOccurred())
secretValue := secret.Value
_, ok := secretValue.(map[string]interface{})
Expect(ok).To(BeTrue(), "secret is not a map[string]interface{}")
_, ok = secretValue.(values.RSA)
Expect(ok).To(BeFalse(), "secret is actually a values.RSA")
})
It("returns a user type as a map[string]interface{}, and not a values.User", func() {
secret, err := credhubClient.GetLatestVersion(userSecret.Name)
Expect(err).NotTo(HaveOccurred())
secretValue := secret.Value
secretValueMap, ok := secretValue.(map[string]interface{})
Expect(ok).To(BeTrue(), "secret is not a map[string]interface{}")
_, ok = secretValue.(values.User)
Expect(ok).To(BeFalse(), "secret is actually a values.User")
_, ok = secretValueMap["username"]
Expect(ok).To(BeTrue(), "secret does not have a 'username' key")
})
})
It("can fetch secrets from credhub", func() {
secretWithSubkeyName := strings.Join([]string{certSecret.Name, "private_key"}, ".")
secretsToFetch := map[string]boshdirector.Variable{
passwordSecret.Name: {Path: passwordSecret.Name},
jsonSecret.Name: {Path: jsonSecret.Name, ID: jsonSecret.Id},
certSecret.Name: {Path: certSecret.Name},
secretWithSubkeyName: {Path: certSecret.Name},
}
jsonSecretValue, err := json.Marshal(jsonSecret.Value)
Expect(err).NotTo(HaveOccurred())
certSecretValue, err := json.Marshal(certSecret.Value)
Expect(err).NotTo(HaveOccurred())
expectedSecrets := map[string]string{
passwordSecret.Name: string(passwordSecret.Value),
jsonSecret.Name: string(jsonSecretValue),
certSecret.Name: string(certSecretValue),
secretWithSubkeyName: string(certSecret.Value.PrivateKey),
}
actualSecrets, err := subject.BulkGet(secretsToFetch, logger)
Expect(err).NotTo(HaveOccurred())
Expect(actualSecrets).To(Equal(expectedSecrets))
})
It("should use ID when present", func() {
By("creating two versions of the same secret")
newPasswordSecret, err := credhubClient.SetPassword(passwordSecret.Name, "newthepass")
Expect(err).NotTo(HaveOccurred())
By("fetching the secret by ID when present")
secretsToFetch := map[string]boshdirector.Variable{
passwordSecret.Name: {Path: "foo", ID: passwordSecret.Id},
}
expectedSecrets := map[string]string{
passwordSecret.Name: string(passwordSecret.Value),
}
actualSecrets, err := subject.BulkGet(secretsToFetch, logger)
Expect(err).NotTo(HaveOccurred())
Expect(actualSecrets).To(Equal(expectedSecrets))
By("fetching the secret by Path when Id isn't present")
secretsToFetch = map[string]boshdirector.Variable{
passwordSecret.Name: {Path: passwordSecret.Name},
}
expectedSecrets = map[string]string{
passwordSecret.Name: string(newPasswordSecret.Value),
}
actualSecrets, err = subject.BulkGet(secretsToFetch, logger)
Expect(err).NotTo(HaveOccurred())
Expect(actualSecrets).To(Equal(expectedSecrets))
})
It("logs when the credential doesn't exist", func() {
secretsToFetch := map[string]boshdirector.Variable{
"blah": {Path: "blah"},
}
_, err := subject.BulkGet(secretsToFetch, logger)
Expect(err).ToNot(HaveOccurred())
Expect(logBuffer).To(gbytes.Say("Could not resolve blah"))
})
})
Describe("FindNameLike", func() {
var (
randomGuid string
paths []string
)
BeforeEach(func() {
randomGuid = uuid.New()[:7]
paths = []string{
"/odb/path/" + randomGuid + "/instance/secret",
"/pizza/" + randomGuid + "/pie",
}
for p := range paths {
_, err := credhubClient.SetValue(paths[p], values.Value("someValue"))
Expect(err).NotTo(HaveOccurred())
}
})
AfterEach(func() {
for p := range paths {
err := credhubClient.Delete(paths[p])
Expect(err).NotTo(HaveOccurred())
}
})
It("can find all secrets containing a portion of a path in their path", func() {
actualPaths, err := subject.FindNameLike(randomGuid, nil)
Expect(err).NotTo(HaveOccurred())
Expect(len(actualPaths)).To(Equal(2))
Expect(actualPaths).To(ConsistOf(paths))
})
})
Describe("BulkDelete", func() {
var (
randomGuid string
paths []string
)
BeforeEach(func() {
randomGuid = uuid.New()[:7]
paths = []string{
"/odb/path/" + randomGuid + "/instance/secret",
"/pizza/" + randomGuid + "/pie",
}
for p := range paths {
_, err := credhubClient.SetValue(paths[p], values.Value("someValue"))
Expect(err).NotTo(HaveOccurred())
}
})
AfterEach(func() {
for p := range paths {
credhubClient.Delete(paths[p])
}
})
It("can delete all secrets", func() {
err := subject.BulkDelete(paths, nil)
Expect(err).NotTo(HaveOccurred())
for p := range paths {
_, err := credhubClient.GetLatestValue(paths[p])
Expect(err).To(MatchError(ContainSubstring("credential does not exist")))
}
})
})
})
|
package main
import (
"fmt"
)
func repeatedSubstringPattern(s string) bool {
f := make([]int, len(s))
f[0] = -1
for i, j := 1, -1; i < len(s); i++ {
for j >= 0 && s[j+1] != s[i] {
j = f[j]
}
if s[j+1] == s[i] {
j++
}
f[i] = j
}
return f[len(s)-1] != -1 && len(s)%(len(s)-f[len(s)-1]-1) == 0
}
func main() {
fmt.Println(repeatedSubstringPattern("abcabcabc"))
}
|
package main
import (
"github.com/bitwurx/jrpc2"
)
func main() {
InitDatabase()
s := jrpc2.NewServer(":8080", "/rpc", nil)
NewApiV1(&PriorityQueueModel{}, s)
s.Start()
}
|
package utils
type Column struct {
name string
typeData string
constraint string
}
type columnBuilder struct {
name string
typeData string
constraint string
}
type ColumnBuilder interface {
Name(string) ColumnBuilder
Primary() ColumnBuilder
TypeData(string) ColumnBuilder
NotNull() ColumnBuilder
Build() Column
}
func NewColumn() ColumnBuilder {
return &columnBuilder{
name: "",
typeData: "",
constraint: "",
}
}
func (c *columnBuilder) Name(name string) ColumnBuilder {
c.name = name
return c
}
func (c *columnBuilder) TypeData(typeData string) ColumnBuilder {
c.typeData = typeData
return c
}
func (c *columnBuilder) Primary() ColumnBuilder {
c.typeData = "serial PRIMARY KEY"
return c
}
func (c *columnBuilder) NotNull() ColumnBuilder {
c.constraint = "NOT NULL"
return c
}
func (c *columnBuilder) Build() Column {
return Column{
name: c.name,
typeData: c.typeData,
constraint: c.constraint,
}
}
|
package bauxebotdiscord
import (
"log"
"os"
"os/signal"
"strings"
"github.com/Chris-SG/BauxeBot_Go/discord/commands"
"github.com/bwmarrin/discordgo"
)
// Session for discord bot
var (
discord *discordgo.Session
err error
prefix string
cmdList *cmd.Commands
bot *discordgo.User
)
func onMessageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {
// Echo out response
var channel, _ = discord.State.Channel(m.ChannelID)
log.Printf("chan %s msg %s by id %s", channel.Name, m.Content, m.Author.ID)
// We don't want bot-loop sort things, maybe add support for disabling commands for all bots
if m.Author.ID == bot.ID {
return
}
// Check all dummy commands
for _, dummyCmd := range cmdList.DummyCommands {
if strings.HasPrefix(m.Content, (prefix + dummyCmd.Common.Caller)) {
go dummyCmd.Execute(s, m)
return
}
}
// Check all color commands
for _, colorCmd := range cmdList.ColorCommands {
if strings.HasPrefix(m.Content, (prefix + colorCmd.Common.Caller)) {
go colorCmd.Execute(s, m)
return
}
}
// Check all debug commands
for _, debugCmd := range cmdList.DebugCommands {
if strings.HasPrefix(m.Content, (prefix + debugCmd.Common.Caller)) {
go debugCmd.Execute(s, m)
return
}
}
// Check all moderation commands
for _, moderationCmd := range cmdList.ModerationCommands {
if strings.HasPrefix(m.Content, (prefix + moderationCmd.Common.Caller)) {
go moderationCmd.Execute(s, m)
}
}
}
func onReady(s *discordgo.Session, event *discordgo.Ready) {
log.Print("Ready!")
discord.UpdateStatus(0, "Trying to make this work...")
bot, _ = discord.User("@me")
}
func init() {
// Get token from env variables. %%TO ADD TO XML%%
log.Printf("Initializing session with env token DISCORD_BOT_TOKEN: %s", os.Getenv("DISCORD_BOT_TOKEN"))
discord, err = discordgo.New()
discord.Token = "Bot " + os.Getenv("DISCORD_BOT_TOKEN")
// Test commands, will make more elegant in time
cmdList = cmd.CmdList
var c cmd.Command
c = cmd.CommandColor{Common: cmd.CommandCommon{Caller: "color", Response: "Setting {HL_NAME}'s color to #{ARG1}.", Description: "Sets user's color", Structure: "!setcolor <color> (hex)", Action: "setcolor", Channels: []string{}, RequiredPermissions: 0, RequiredUsers: []string{}}}
cmdList.ColorCommands = append(cmdList.ColorCommands, c.(cmd.CommandColor))
c = cmd.CommandColor{Common: cmd.CommandCommon{Caller: "removecolor", Response: "Removing color from {HL_NAME}", Description: "Remove user's color", Structure: "!removecolor", Action: "removecolor", Channels: []string{}, RequiredPermissions: 0, RequiredUsers: []string{}}}
cmdList.ColorCommands = append(cmdList.ColorCommands, c.(cmd.CommandColor))
c = cmd.CommandDummy{Common: cmd.CommandCommon{Caller: "helo", Response: "helo", Description: "helo", Structure: "!helo", Channels: []string{}, RequiredPermissions: 0, RequiredUsers: []string{}}}
cmdList.DummyCommands = append(cmdList.DummyCommands, c.(cmd.CommandDummy))
c = cmd.CommandDebug{Common: cmd.CommandCommon{Caller: "debug", Response: "", Description: "debug", Structure: "!debug <param>", Action: "debug", Channels: []string{}, RequiredPermissions: 2146958591, RequiredUsers: []string{}}}
cmdList.DebugCommands = append(cmdList.DebugCommands, c.(cmd.CommandDebug))
c = cmd.CommandModeration{Common: cmd.CommandCommon{Caller: "delete", Response: "", Description: "Deletes specified message amount", Structure: "!delete <count>", Action: "deletebulk", Channels: []string{}, RequiredPermissions: 8192, RequiredUsers: []string{}}}
cmdList.ModerationCommands = append(cmdList.ModerationCommands, c.(cmd.CommandModeration))
}
// StartBotDiscord will Start Discord bot
func StartBotDiscord(cmdPrefix string) {
log.Print("Starting session...")
prefix = cmdPrefix
discord.State.User, err = discord.User("@me")
if err != nil {
log.Printf("Error: %s", err)
}
// Add discord handlers
discord.AddHandler(onReady)
discord.AddHandler(onMessageCreate)
// Open discord session
log.Print("Opening session...")
err = discord.Open()
if err != nil {
log.Printf("Failed to open session: %s", err)
}
// Wait for a signal to quit
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, os.Kill)
<-c
}
|
package compute_test
import (
"errors"
"github.com/genevieve/leftovers/gcp/compute"
"github.com/genevieve/leftovers/gcp/compute/fakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
gcpcompute "google.golang.org/api/compute/v1"
)
var _ = Describe("Instances", func() {
var (
client *fakes.InstancesClient
logger *fakes.Logger
zones map[string]string
instances compute.Instances
)
BeforeEach(func() {
client = &fakes.InstancesClient{}
logger = &fakes.Logger{}
zones = map[string]string{"https://zone-1": "zone-1"}
instances = compute.NewInstances(client, logger, zones)
})
Describe("List", func() {
var filter string
BeforeEach(func() {
logger.PromptWithDetailsCall.Returns.Proceed = true
client.ListInstancesCall.Returns.InstanceSlice = []*gcpcompute.Instance{{
Name: "banana-instance",
Zone: "https://zone-1",
}}
filter = "banana"
})
It("lists, filters, and prompts for instances to delete", func() {
list, err := instances.List(filter, false)
Expect(err).NotTo(HaveOccurred())
Expect(client.ListInstancesCall.CallCount).To(Equal(1))
Expect(client.ListInstancesCall.Receives.Zone).To(Equal("zone-1"))
Expect(logger.PromptWithDetailsCall.CallCount).To(Equal(1))
Expect(logger.PromptWithDetailsCall.Receives.ResourceType).To(Equal("Compute Instance"))
Expect(logger.PromptWithDetailsCall.Receives.ResourceName).To(Equal("banana-instance"))
Expect(list).To(HaveLen(1))
})
Context("when the vm name does not contain the filter, but the network does", func() {
BeforeEach(func() {
client.ListInstancesCall.Returns.InstanceSlice = []*gcpcompute.Instance{{
Name: "banana-instance",
Zone: "https://zone-1",
NetworkInterfaces: []*gcpcompute.NetworkInterface{{Network: "global/networks/kiwi"}},
}}
client.GetNetworkNameCall.Returns.Name = "kiwi-network"
filter = "kiwi"
})
It("will add it to the list to delete", func() {
list, err := instances.List(filter, false)
Expect(err).NotTo(HaveOccurred())
Expect(list).To(HaveLen(1))
})
})
Context("when the client fails to list instances", func() {
BeforeEach(func() {
client.ListInstancesCall.Returns.Error = errors.New("some error")
})
It("returns the error", func() {
_, err := instances.List(filter, false)
Expect(err).To(MatchError("List Instances for zone zone-1: some error"))
})
})
Context("when the clearer name for the instance group does not contain the filter", func() {
It("does not add it to the list", func() {
list, err := instances.List("grape", false)
Expect(err).NotTo(HaveOccurred())
Expect(logger.PromptWithDetailsCall.CallCount).To(Equal(0))
Expect(list).To(HaveLen(0))
})
})
Context("when the user says no to the prompt", func() {
BeforeEach(func() {
logger.PromptWithDetailsCall.Returns.Proceed = false
})
It("does not add it to the list", func() {
list, err := instances.List(filter, false)
Expect(err).NotTo(HaveOccurred())
Expect(list).To(HaveLen(0))
})
})
})
})
|
// Copyright 2020 Insolar Network Ltd.
// All rights reserved.
// This material is licensed under the Insolar License version 1.0,
// available at https://github.com/insolar/block-explorer/blob/master/LICENSE.md.
// +build heavy_mock_integration
package api
import (
"fmt"
"math"
"strings"
"testing"
"github.com/insolar/block-explorer/instrumentation/converter"
"github.com/insolar/block-explorer/test/heavymock"
"github.com/insolar/block-explorer/test/integration"
"github.com/insolar/block-explorer/testutils"
"github.com/insolar/insolar/insolar"
"github.com/insolar/insolar/insolar/gen"
"github.com/insolar/spec-insolar-block-explorer-api/v1/client"
"github.com/stretchr/testify/require"
)
const (
typePulse = "pulse"
typeJetDrop = "jet-drop"
typeRef = "lifeline"
typeRecord = "record"
)
func TestSearchApi(t *testing.T) {
ts := integration.NewBlockExplorerTestSetup(t).WithHTTPServer(t)
defer ts.Stop(t)
pulsesCount, recordsCount := 3, 2
lifeline := testutils.GenerateObjectLifeline(pulsesCount, recordsCount)
records := lifeline.GetAllRecords()
ts.BE.PulseClient.SetNextFinalizedPulseFunc(ts.ConMngr.Importer)
ts.StartBE(t)
defer ts.StopBE(t)
require.NoError(t, heavymock.ImportRecords(ts.ConMngr.ImporterClient, records))
ts.WaitRecordsCount(t, len(records), 5000)
c := GetHTTPClient()
record := lifeline.GetStateRecords()[0]
pn := record.Record.ID.Pulse()
randomJetID := func() string {
for {
jd := converter.JetIDToString(gen.JetID())
if len(jd) < 20 {
continue
}
return jd
}
}
jetID := converter.JetIDToString(record.Record.JetID)
jetDropID := fmt.Sprintf("%v:%v", jetID, pn.String())
objRef := insolar.NewReference(lifeline.ObjID).String()
t.Run("get pulse", func(t *testing.T) {
t.Log("C5157 Search by existing pulse_number")
response := c.Search(t, pn.String())
exp := client.SearchResponse200{
Type: typePulse,
Meta: client.SearchResponse200Meta{
PulseNumber: int64(pn.AsUint32()),
},
}
require.Equal(t, exp, response)
})
t.Run("get random pulse", func(t *testing.T) {
t.Log("C5163 Search by nonexistent pulse_number")
wrongPulse := lifeline.GetStateRecords()[0].Record.ID.Pulse() + 1000
response := c.Search(t, wrongPulse.String())
exp := client.SearchResponse200{
Type: typePulse,
Meta: client.SearchResponse200Meta{
PulseNumber: int64(wrongPulse.AsUint32()),
},
}
require.Equal(t, exp, response)
})
t.Run("get jetDrop", func(t *testing.T) {
t.Log("C5159 Search by existing jetdrop_id")
response := c.Search(t, jetDropID)
exp := client.SearchResponse200{
Type: typeJetDrop,
Meta: client.SearchResponse200Meta{
JetDropId: jetDropID,
},
}
require.Equal(t, exp, response)
})
t.Run("get nonexisting jetDrop", func(t *testing.T) {
t.Log("C5165 Search by nonexisting jetdrop_id")
jetDrop := fmt.Sprintf("%v:%v",
strings.Split(converter.JetIDToString(records[0].Record.JetID), ":")[0],
records[2].Record.ID.Pulse())
response := c.Search(t, jetDrop)
exp := client.SearchResponse200{
Type: typeJetDrop,
Meta: client.SearchResponse200Meta{
JetDropId: jetDrop,
},
}
require.Equal(t, exp, response)
})
t.Run("get object ref", func(t *testing.T) {
t.Log("C5160 Search by existing object_reference")
response := c.Search(t, objRef)
exp := client.SearchResponse200{
Type: typeRef,
Meta: client.SearchResponse200Meta{
ObjectReference: objRef,
},
}
require.Equal(t, exp, response)
})
t.Run("get random object ref", func(t *testing.T) {
t.Log("C5166 Search by nonexisting object_reference")
randomRef := gen.Reference().String()
response := c.Search(t, randomRef)
exp := client.SearchResponse200{
Type: typeRef,
Meta: client.SearchResponse200Meta{
ObjectReference: randomRef,
},
}
require.Equal(t, exp, response)
})
t.Run("get record", func(t *testing.T) {
t.Log("C5158 Search by existing record_ref")
objRef := insolar.NewReference(lifeline.ObjID).String()
r := lifeline.GetStateRecords()[0]
id := r.Record.ID.Bytes()
ref := insolar.NewRecordReference(*insolar.NewIDFromBytes(id)).String()
pn := r.Record.ID.Pulse()
response := c.Search(t, ref)
exp := client.SearchResponse200{
Type: typeRecord,
Meta: client.SearchResponse200Meta{
ObjectReference: objRef,
Index: fmt.Sprintf("%v:%v", pn, "0"),
},
}
require.Equal(t, exp, response)
})
id := lifeline.ObjID
invalidValue := "0qwerty123:!@:#$%^"
jetDropWithBigLengthPrefix := fmt.Sprintf("%v:%v",
strings.Repeat(jetDropID, 20),
records[0].Record.ID.Pulse())
jetDropWithBigLengthPulse := fmt.Sprintf("%v:%v",
strings.Split(converter.JetIDToString(records[0].Record.JetID), ":")[0],
string(math.MaxInt64)+"1")
randomNumbers := fmt.Sprintf("%v:%v",
testutils.RandNumberOverRange(1, math.MaxInt32),
testutils.RandNumberOverRange(1, math.MaxInt32))
randomRecordRef := gen.RecordReference().String()
tcs := []testCases{
{"C5286 Search by zero value", "0", badRequest400, "zero value"},
{"C5287 Search by empty value", "", badRequest400, "empty"},
{"C5288 Search by random reference", id.String(), badRequest400, "reference"},
{"C5161 Search by existing jet_Id, get error", randomJetID(), badRequest400, "jetID"},
{"C5162 Search by invalid value", invalidValue, badRequest400, "invalid value"},
{"C5168 Search by value with 1k chars", jetDropWithBigLengthPrefix, badRequest400, "big length jd pref"},
{"C5289 Search by invalid jetdrop_id with very big pulse number, get error", jetDropWithBigLengthPulse, badRequest400, "big length jd pulse"},
{"C5290 Search by very big number, get error", randomNumbers, badRequest400, "big number"},
{"C5164 Search by nonexisting record_ref, get error", randomRecordRef, badRequest400, "random record ref"},
}
for _, tc := range tcs {
t.Run(tc.testName, func(t *testing.T) {
t.Log(tc.trTestCaseName)
c.SearchWithError(t, tc.value, tc.expResult)
})
}
}
|
package ListOffsets
type Response struct {
ThrottleTimeMs int32
Responses []TopicResponse
}
type TopicResponse struct {
Topic string
Partitions []PartitionResponse
}
type PartitionResponse struct {
Partition int32
ErrorCode int16
Timestamp int64
Offset int64
}
func (r *Response) Offset(topic string, partition int32) int64 {
for _, t := range r.Responses {
if t.Topic != topic {
continue
}
for _, p := range t.Partitions {
if p.Partition != partition {
continue
}
return p.Offset
}
}
return -1
}
|
package main
import (
"fmt"
"runtime/debug"
"sync"
"time"
)
func init() {
debug.SetGCPercent(-1)
}
const count = 10000000
func dispatchBenchmark(c, buf, sender, receiver int) {
start := time.Now()
balanceNum := 5
chpool := make([]chan bool, 0, balanceNum)
for i := 0; i < balanceNum; i++ {
q := make(chan bool, buf)
chpool = append(chpool, q)
}
for index := 0; index < receiver; index++ {
idx := index
go func() {
n := idx % balanceNum
for {
select {
case _, ok := <-chpool[n]:
if !ok {
return
}
}
}
}()
}
wg := sync.WaitGroup{}
step := c / sender
for i := 0; i < sender; i++ {
idx := i
wg.Add(1)
go func() {
n := idx % balanceNum
for j := 0; j < step; j++ {
chpool[n] <- true
}
wg.Done()
}()
}
wg.Wait()
for _, q := range chpool {
close(q)
}
cost := time.Since(start)
fmt.Printf("dispatch count: %d, chan buf: %d, sender: %d, recevier: %d, time cost: %s, qps: %.2f \n",
c, buf, sender, receiver, time.Since(start), float64(count)/cost.Seconds(),
)
}
func directBenchmark(c, buf, sender, receiver int) {
start := time.Now()
ch := make(chan bool, buf)
for index := 0; index < receiver; index++ {
go func() {
for {
select {
case _, ok := <-ch:
if !ok {
return
}
}
}
}()
}
wg := sync.WaitGroup{}
step := c / sender
for i := 0; i < sender; i++ {
wg.Add(1)
go func() {
for j := 0; j < step; j++ {
ch <- true
}
wg.Done()
}()
}
wg.Wait()
cost := time.Since(start)
fmt.Printf("direct count: %d, chan buf: %d, sender: %d, recevier: %d, time cost: %s, qps: %.2f \n",
c, buf, sender, receiver, time.Since(start), float64(count)/cost.Seconds(),
)
}
func main() {
directBenchmark(count, 0, 10, 10)
dispatchBenchmark(count, 0, 10, 10)
directBenchmark(count, 50, 10, 10)
dispatchBenchmark(count, 50, 10, 10)
directBenchmark(count, 100, 10, 10)
dispatchBenchmark(count, 100, 10, 10)
directBenchmark(count, 1000, 10, 10)
dispatchBenchmark(count, 1000, 10, 10)
time.Sleep(1 * time.Second)
fmt.Println()
directBenchmark(count, 0, 50, 50)
dispatchBenchmark(count, 0, 50, 50)
directBenchmark(count, 100, 50, 50)
dispatchBenchmark(count, 100, 50, 50)
directBenchmark(count, 1000, 50, 50)
dispatchBenchmark(count, 1000, 50, 50)
time.Sleep(1 * time.Second)
fmt.Println()
directBenchmark(count, 0, 100, 100)
dispatchBenchmark(count, 0, 100, 100)
directBenchmark(count, 100, 100, 100)
dispatchBenchmark(count, 100, 100, 100)
directBenchmark(count, 1000, 100, 100)
dispatchBenchmark(count, 1000, 100, 100)
time.Sleep(1 * time.Second)
fmt.Println()
directBenchmark(count, 0, 200, 200)
dispatchBenchmark(count, 0, 200, 200)
directBenchmark(count, 100, 200, 200)
dispatchBenchmark(count, 100, 200, 200)
directBenchmark(count, 1000, 200, 200)
dispatchBenchmark(count, 1000, 200, 200)
time.Sleep(1 * time.Second)
fmt.Println()
directBenchmark(count, 0, 300, 300)
dispatchBenchmark(count, 0, 300, 300)
directBenchmark(count, 100, 300, 300)
dispatchBenchmark(count, 100, 300, 300)
directBenchmark(count, 1000, 300, 300)
dispatchBenchmark(count, 1000, 300, 300)
time.Sleep(1 * time.Second)
fmt.Println()
}
|
//+build wireinject
package server
import (
"io"
"github.com/google/wire"
"github.com/spf13/pflag"
"github.com/suse/carrier/shim/app"
"github.com/suse/carrier/shim/app/configuration"
)
func BuildApp(log io.Writer, flags *pflag.FlagSet) (*app.App, func(), error) {
wire.Build(
wire.Struct(new(app.App), "*"),
app.NewLogger,
configuration.NewConfig,
ShimServer,
)
return &app.App{}, func() {}, nil
}
|
/*
* Copyright 2021 American Express
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package postprocess
import (
"strconv"
"strings"
)
//ValidSSN checks if a SSN meets standard
func ValidSSN(ssn string) bool {
ssn = strings.Trim(ssn, "\"'\n ")
groups := strings.Split(ssn, "-")
if len(groups) != 3 {
return false
}
if first, _ := strconv.Atoi(groups[0]); first == 666 || first <= 0 || first > 999 {
return false
} else if second, _ := strconv.Atoi(groups[1]); second <= 0 || second > 99 {
return false
} else if third, _ := strconv.Atoi(groups[2]); third <= 0 || third > 9999 {
return false
}
return true
}
|
package models
type Author struct {
Firstname string `json:"firstname"`
Lastname string `json:"lastname"`
} |
package print
import (
"bytes"
"context"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tilt-dev/tilt/internal/tiltfile/starkit"
"github.com/tilt-dev/tilt/pkg/logger"
)
func TestWarn(t *testing.T) {
f := newFixture(t)
f.File("Tiltfile", "warn('problem 1')")
_, err := f.ExecFile("Tiltfile")
require.NoError(t, err)
assert.Contains(t, f.PrintOutput(), "problem 1")
}
func TestFail(t *testing.T) {
f := newFixture(t)
f.File("Tiltfile", "fail('problem 1')")
_, err := f.ExecFile("Tiltfile")
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "problem 1")
}
}
func TestExitArgTypes(t *testing.T) {
type tc struct {
name string
exitArg string
expectedLog string
}
tcs := []tc{
{"Omitted", ``, ""},
{"String", `"goodbye"`, "goodbye"},
{"StringNamed", `code='ciao'`, "ciao"},
{"Int", `123`, "123"},
{"Dict", `dict(foo='bar', baz=123)`, `{"foo": "bar", "baz": 123}`},
{"None", `None`, ""},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
f := newFixture(t)
f.File(
"Tiltfile", fmt.Sprintf(`
exit(%s)
fail("this can't happen!")
`, tc.exitArg))
_, err := f.ExecFile("Tiltfile")
require.NoError(t, err)
out := f.PrintOutput()
if tc.expectedLog == "" {
assert.Empty(t, out)
} else {
assert.Contains(t, out, tc.expectedLog)
assert.NotContains(t, out, "this can't happen!")
}
})
}
}
func TestExitLoadedTiltfile(t *testing.T) {
f := newFixture(t)
f.File("exit.tiltfile", `exit("later alligator")`)
// loaded Tiltfile can force the root Tiltfile to halt execution
// i.e. it's more like `sys.exit(0)` than `return`
f.File(
"Tiltfile", `
load("./exit.tiltfile", "this_symbol_does_not_exist")
fail("this can't happen!")
`)
_, err := f.ExecFile("Tiltfile")
require.NoError(t, err)
out := f.PrintOutput()
assert.Contains(t, out, "later alligator")
assert.NotContains(t, out, "this can't happen!")
}
func newFixture(tb testing.TB) *starkit.Fixture {
f := starkit.NewFixture(tb, NewPlugin())
out := bytes.NewBuffer(nil)
f.SetOutput(out)
log := logger.NewLogger(logger.VerboseLvl, out)
ctx := logger.WithLogger(context.Background(), log)
f.SetContext(ctx)
return f
}
|
package viewmodel
// JourneyPlanVM ...
type JourneyPlanVM struct {
ID uint ` json:"id"`
Code string ` json:"code"`
JourneyName string ` json:"journeyName"`
AssignedAuditor string ` json:"assignedAuditor"`
Auditors []string ` json:"auditors"`
DepartmentKey string ` json:"departmentID"`
Type string ` json:"type"`
// Sites []SitesVM ` json:"sites"`
Sites []string ` json:"sites"`
// Questionnaires []QuestionnairesVM ` json:"questionnaires"`
Questionnaires []string ` json:"questionnaires"`
Signatures string ` json:"signatures"`
RequireSelfie bool ` json:"requireSelfie"`
SelfieSignature []string ` json:"selfieSignature"`
Person string ` json:"person"`
EmailTargets []string ` json:"emailTargets"`
StartTimeJourney string ` json:"startTimeJourney"`
EndTimeJourney string ` json:"endTimeJourney"`
CreatedAt string ` json:"createdAt"`
CreatedBy string ` json:"createdBy"`
UpdatedAt string ` json:"updatedAt"`
UpdatedBy string ` json:"updatedBy"`
JourneySchedule int ` json:"journeySchedule"`
DateCustom []int ` json:"dateCustom"`
DaysOfWeek []int ` json:"daysOfWeek"`
DateOfMonth []int ` json:"datesOfMonth"`
// AssignedAuditor []AssignedAuditorVM ` json:"assignedAuditor"`
Activity []ActivityVM ` json:"activity"`
}
// SitesVM ...
type SitesVM struct {
SiteID string `json:"siteID"`
}
// QuestionnairesVM ...
type QuestionnairesVM struct {
QuestionnairesID string `json:"questionnairesID"`
// EmailTargets []string `json:"emailTargets"`
// EndTime *string `json:"endTime"`
// HasDeadline bool `json:"hasDeadline"`
// IsDoneToday bool `json:"isDoneToday"`
// IsQuestionnaireExists bool `json:"isQuestionnaireExists"`
// IsScheduledToday bool `json:"isScheduledToday"`
// Key string `json:"key"`
// QuestionnaireDetails QuestionnairesDetailsVM `json:"questionnaireDetails"`
// QuestionnaireTitle string `json:"questionnaireTitle"`
// ScheduledDates map[string]interface{} `json:"scheduledDates"`
// // ScheduledDates ScheduledDatesVM `json:"scheduledDates"`
// SelfieSignatures []string `json:"selfieSignatures"`
// Signatures int `json:"signatures"`
// StartTime *string `json:"startTime"`
// TitleLowercase string `json:"titleLowercase"`
// TotalCompleted int `json:"totalCompleted"`
// TotalScheduled int `json:"totalScheduled"`
// UnfinishedDates []string `json:"unfinishedDates"`
}
// QuestionnairesDetailsVM ...
type QuestionnairesDetailsVM struct {
Key string `json:"key"`
Value QuestionnairesDetailsValueVM `json:"value"`
}
// ScheduledDatesVM ...
type ScheduledDatesVM struct {
Date string `json:"date"`
IsComplete bool `json:"isComplete"`
}
// QuestionnairesDetailsValueVM ...
type QuestionnairesDetailsValueVM struct {
Disabled bool `json:"disabled"`
Latest string `json:"latest"`
OrganizationKey string `json:"organizationKey"`
Questionnaire QuestionnaireArrayVM `json:"questionnaire"`
Tags string `json:"tags"`
Title string `json:"title"`
Versions string `json:"versions"`
}
// QuestionnaireArrayVM ...
type QuestionnaireArrayVM struct {
DateCreated string `json:"dateCreated"`
DateUpdated string `json:"dateUpdated"`
Disabled bool `json:"disabled"`
ModifiedBy string `json:"modifiedBy"`
QuestionnaireIndex string `json:"questionnaireIndex"`
// Questions []string `json:"questions"`
Questions []QuestionVM `json:"questions"`
Status string `json:"status"`
Tags string `json:"tags"`
Title string `json:"title"`
Type string `json:"type"`
}
// QuestionVM ...
type QuestionVM struct {
Answer string `json:"answer"`
AnswerRequired bool `json:"answerRequired"`
Category string `json:"category"`
Comment string `json:"comment"`
Content string `json:"content"`
FlagLabel map[string]interface{} `json:"flagLabel"`
PhotoLimit int `json:"photoLimit"`
PhotoMinimum int `json:"photoMinimum"`
Reference string `json:"reference"`
Remedy string `json:"remedy"`
Score int `json:"score"`
ScoreWeight int `json:"scoreWeight"`
Sku string `json:"sku"`
Tags map[string]interface{} `json:"tags"`
Type string `json:"type"`
VideoLimit int `json:"videoLimit"`
VideoMinimum int `json:"videoMinimum"`
}
// AssignedAuditorVM ...
type AssignedAuditorVM struct {
UserID string `json:"userID"`
}
// EmailTargetsVM ...
type EmailTargetsVM struct {
Email string `json:"email"`
}
// ActivityVM ...
type ActivityVM struct {
// UserID string `json:"userID"`
Username string `json:"username"`
Datetime string `json:"datetime"`
}
// JourneyPlanMobileVM ...
type JourneyPlanMobileVM struct {
// ID uint ` json:"id"`
Code string ` json:"id"`
Name string ` json:"name"`
StartTime string ` json:"startTime"`
EndTime string ` json:"endTime"`
Type string ` json:"type"`
Schedule int ` json:"schedule"`
Priority bool ` json:"priority"`
Language string ` json:"language"`
Signatures string ` json:"signatures"`
SelfieSignature bool ` json:"selfieSignature"`
Person string ` json:"person"`
Questionnaires []QuestionnairesVM ` json:"questionnaires"`
Sites []SitesVM ` json:"site"`
IsDueToday bool ` json:"isDueToday"`
IsDraft bool ` json:"isDraft"`
IsMakeUp bool ` json:"isMakeUp"`
TodayCompletedCount int ` json:"todayCompletedCount"`
CompletedCount int ` json:"completedCount"`
TodayScheduleCount int ` json:"todayScheduleCount"`
IsCompletedToday bool ` json:"isCompletedToday"`
IsCompletedThisPeriod bool ` json:"isCompletedThisPeriod"`
ScheduleCount int ` json:"scheduleCount"`
IsScheduleThisPeriod bool ` json:"isScheduleThisPeriod"`
// CreatedAt string ` json:"createdAt"`
// CreatedBy string ` json:"createdBy"`
// UpdatedAt string ` json:"updatedAt"`
// UpdatedBy string ` json:"updatedBy"`
}
// ReportJourneyPlanVM ...
type ReportJourneyPlanVM struct {
ID uint ` json:"id"`
Code string ` json:"code"`
JourneyName string ` json:"journeyName"`
DepartmentKey string ` json:"departmentID"`
JourneySchedule int ` json:"journeySchedule"`
DateCustom []int ` json:"dateCustom"`
DaysOfWeek []int ` json:"daysOfWeek"`
DateOfMonth []int ` json:"dateOfMonth"`
AssignedAuditor []AssignedAuditorVM ` json:"assignedAuditor"`
Sites []SitesVM ` json:"sites"`
Questionnaires []QuestionnairesVM ` json:"questionnaires"`
// EmailTargets []EmailTargetsVM ` json:"emailTargets"`
// Activity []ActivityVM ` json:"activity"`
// Reports []ReportsVM ` json:"reports"`
Reports []ReportsVM ` json:"reports"`
TrackingTimeGPS []TrackingTimeGPSVM ` json:"trackingTimeGPS"`
Signatures string ` json:"signatures"`
StartJourney string ` json:"startJourney"`
FinishJourney string ` json:"finishJourney"`
CreatedAt string ` json:"createdAt"`
}
// TrackingTimeGPSVM ...
type TrackingTimeGPSVM struct {
TrackingTime string `json:"trackingTime"`
Coordinates CoordinatesVM `json:"coordinates"`
}
// CoordinatesVM ...
type CoordinatesVM struct {
Lat string `json:"lat"`
Long string `json:"long"`
}
// GetAllJourneyPlanMobileVM ...
type GetAllJourneyPlanMobileVM struct {
Code string ` json:"id"`
Name string ` json:"name"`
Type string ` json:"type"`
Schedule int ` json:"schedule"`
Priority bool ` json:"priority"`
Language string ` json:"language"`
// IsDueToday bool ` json:"isDueToday"`
// IsDraft bool ` json:"isDraft"`
// IsMakeUp bool ` json:"isMakeUp"`
TodayCompletedCount int ` json:"todayCompletedCount"`
CompletedCount int ` json:"completedCount"`
// TodayScheduleCount int ` json:"todayScheduleCount"`
// IsCompletedToday bool ` json:"isCompletedToday"`
// IsCompletedThisPeriod bool ` json:"isCompletedThisPeriod"`
// ScheduleCount int ` json:"scheduleCount"`
// IsScheduleThisPeriod bool ` json:"isScheduleThisPeriod"`
}
// ReportsVM ...
type ReportsVM struct {
ReportID string `json:"reportID"`
UserID string `json:"userID"`
JourneyID string `json:"journeyID"`
URL string `json:"url"`
Start string `json:"start"`
End string `json:"end"`
ReportDate string `json:"reportDate"`
}
// GetIntervalVM ...
type GetIntervalVM struct {
TimePerSecond int ` json:"timePerSecond"`
}
|
package timekey
import (
"os"
"testing"
"time"
)
var (
testFile = []string{"mime_data.go", "fid.go"}
)
func TestKey(t *testing.T) {
now := time.Now()
t.Log("now:", now)
fid, err := NewFid("2", testFile[1])
if err != nil {
t.Fatal(err)
}
t.Log("fid.Time()", fid.Time())
if now.Sub(fid.Time()).Minutes() > 1.0 {
t.Fatal("time not match")
}
}
func TestCookie(t *testing.T) {
fid, err := NewFid("2", testFile[0])
if err != nil {
t.Fatal(err)
}
t.Log("fid.MimeType():", fid.MimeType())
t.Log("fid.Size():", fid.Size(), "KB")
info, err := os.Stat(testFile[0])
if err != nil {
t.Fatal(err)
}
t.Log("real file size:", info.Size()/1024)
}
|
package retry
import (
"fmt"
"sync"
"sync/atomic"
"testing"
)
var count int64
func get() int64 {
return count
}
func incCAS() {
atomic.AddInt64(&count, 1)
}
func TestDo(t *testing.T) {
var limit int64
var errCnt int64
limit = 5
var wg sync.WaitGroup
worker := func() {
err := Do(func() error {
if get() < limit {
incCAS()
return nil
}
return fmt.Errorf("exceed limit: %d", limit)
}, Condition(func() (bool, error) {
if get() >= limit {
return false, fmt.Errorf("exit retry")
}
return true, nil
}), Delay(10))
if err != nil {
errCnt++
}
wg.Done()
}
workers := int(limit * 2)
wg.Add(workers)
for i := 0; i < workers; i++ {
go worker()
}
wg.Wait()
if get() != limit {
t.Errorf("expected %d, got %d", limit, get())
}
if errCnt != limit {
t.Errorf("expected %d, got %d", limit, errCnt)
}
}
|
package format
import (
"regexp"
"strings"
"github.com/Karitham/handlergen/gen"
)
type Oapi struct {
Paths map[string]map[string]Route `yaml:"paths"`
}
type Route struct {
operationID string `yaml:"operationId"`
Parameters []Parameter `yaml:"parameters"`
}
type Parameter struct {
Schema Schema `yaml:"schema"`
In string `yaml:"in"`
Name string `yaml:"name"`
}
type Schema struct {
Type string `yaml:"type"`
}
func mapOapi(f *Oapi, pkg string) gen.Template {
t := gen.Template{
Imports: []gen.Import{
{Path: "net/http"},
},
PkgName: pkg,
}
for p, path := range f.Paths {
for k, route := range path {
gf := gen.Function{
Name: formatOAPIName(k, p, route.operationID),
}
for _, param := range route.Parameters {
switch param.In {
case "query":
gf.QueryParams = append(gf.QueryParams, gen.QueryParam{
Name: param.Name,
Type: parseTypesQuery(&t, param.Schema.Type),
})
case "path":
gf.QueryParams = append(gf.QueryParams, gen.QueryParam{
Name: param.Name,
Type: parseTypesPath(&t, param.Schema.Type),
})
case "header":
gf.QueryParams = append(gf.QueryParams, gen.QueryParam{
Name: param.Name,
Type: parseTypesHeader(&t, param.Schema.Type),
})
case "body":
gf.Body = gen.Body{
Type: param.Schema.Type,
}
t.Imports = appendOnceImports(t.Imports, gen.Import{Path: "encoding/json"})
if gf.Body.Type != "" {
t.Imports = appendOnceImports(t.Imports, gen.Import{Path: gf.Body.Type})
}
}
}
gf.HasBody = gf.Body.Type != ""
gf.HasQueryParams = len(gf.QueryParams) > 0
if !gf.HasBody && !gf.HasQueryParams {
continue
}
t.Functions = append(t.Functions, gf)
}
}
return t
}
var sanitizeRegex = regexp.MustCompile(`\{([\w\d]+)\}`)
func formatOAPIName(op, path, name string) string {
if name != "" {
return strings.Title(name)
}
path = sanitizeRegex.ReplaceAllString(path, "By/$1")
paths := strings.Split(strings.Title(path), "/")
new := strings.Builder{}
new.WriteString(strings.Title(op))
for _, p := range paths {
new.WriteString(strings.Title(p))
}
return new.String()
}
func parseTypesQuery(t *gen.Template, typ string) string {
switch typ {
case "int", "integer":
t.Imports = appendOnceImports(t.Imports, gen.Import{Path: "strconv"})
return "int"
case "array":
t.Imports = appendOnceImports(t.Imports, gen.Import{Path: "strings"})
return "[]string"
case "string":
return "string"
}
return typ
}
func parseTypesPath(t *gen.Template, typ string) string {
t.Imports = appendOnceImports(t.Imports, gen.Import{Path: "github.com/go-chi/chi/v5"})
switch typ {
case "int", "integer":
t.Imports = appendOnceImports(t.Imports, gen.Import{Path: "strconv"})
return "int_query"
case "array":
t.Imports = appendOnceImports(t.Imports, gen.Import{Path: "strings"})
return "[]string_query"
case "string":
return "string_query"
}
return typ
}
func parseTypesHeader(t *gen.Template, typ string) string {
switch typ {
case "int", "integer":
t.Imports = appendOnceImports(t.Imports, gen.Import{Path: "strconv"})
return "int_header"
case "array":
t.Imports = appendOnceImports(t.Imports, gen.Import{Path: "strings"})
return "[]string_header"
case "string":
return "string_header"
}
return typ
}
func appendOnceImports(imps []gen.Import, i ...gen.Import) []gen.Import {
for _, s := range imps {
for _, j := range i {
if s == j {
continue
}
imps = append(imps, j)
}
}
return imps
}
|
package main
/*
struct CredentialSpec{
char *Name;
char *Get;
char *From;
};
*/
import "C"
import (
"fmt"
"reflect"
"unsafe"
"github.com/cyberark/secretless-broker/internal/plugin"
"github.com/cyberark/secretless-broker/internal/plugin/connectors/tcp/mysql/protocol"
pluginv1 "github.com/cyberark/secretless-broker/internal/plugin/v1"
"github.com/cyberark/secretless-broker/internal/providers"
configv2 "github.com/cyberark/secretless-broker/pkg/secretless/config/v2"
)
// ZeroizeByteSlice sets every byte to zero.
func ZeroizeByteSlice(bs []byte) {
for byteIndex := range bs {
bs[byteIndex] = 0
}
}
// ByteBoundString returns a string backed by the given []byte.
func ByteBoundString(b []byte) string {
header := (*reflect.SliceHeader)(unsafe.Pointer(&b))
bytesHeader := &reflect.StringHeader{
Data: header.Data,
Len: header.Len,
}
return *(*string)(unsafe.Pointer(bytesHeader))
}
// NewCredential creates a Credential from the given C struct.
func NewCredential(ref C.struct_CredentialSpec) *configv2.Credential {
return &configv2.Credential{
Name: C.GoString(ref.Name),
Get: C.GoString(ref.ID),
From: C.GoString(ref.Provider),
}
}
// GetCredentialValues returns credential values. Specifically, a map whose keys are the
// credential IDs requested, and whose values are the values of those credentials.
func GetCredentialValues(credentialSpecs []*configv2.Credential) (map[string][]byte, error) {
// Load all internal Providers
providerFactories := make(map[string]func(pluginv1.ProviderOptions) (pluginv1.Provider, error))
for providerID, providerFactory := range providers.ProviderFactories {
providerFactories[providerID] = providerFactory
}
resolver := plugin.NewResolver(providerFactories, nil, nil)
return resolver.Resolve(credentialSpecs)
}
// GetCredentialValue returns a C *char with the given credential's value
// export GetCredentialValue
func GetCredentialValue(cRef C.struct_CredentialSpec) *C.char {
return C.CString(GetCredentialValueByteString(cRef))
}
// GetCredentialValueByteString return the credential value for the given CredentialSpec ref.
func GetCredentialValueByteString(cRef C.struct_CredentialSpec) string {
ref := NewCredential(cRef)
credentials, err := GetCredentialValues([]*configv2.Credential{ref})
if err != nil {
fmt.Println("Error fetching credential")
return ByteBoundString(nil)
}
return ByteBoundString(credentials[ref.Name])
}
// NativePassword returns the given CredentialSpec value as C *char.
// export NativePassword
func NativePassword(cRef C.struct_CredentialSpec, salt *C.char) *C.char {
passwordBytes := []byte(GetCredentialValueByteString(cRef))
defer ZeroizeByteSlice(passwordBytes)
saltBytes := C.GoBytes(unsafe.Pointer(salt), C.int(8))
defer ZeroizeByteSlice(saltBytes)
// nativePassword = passwordSHA1 ^ randomSHA1
nativePassword, _ := protocol.NativePassword(passwordBytes, saltBytes)
defer ZeroizeByteSlice(nativePassword)
return C.CString(ByteBoundString(nativePassword))
}
func main() {}
|
package utils
type Object interface{}
|
package client
import (
"fmt"
"strings"
"time"
)
func (c *Handler) handleSYST() {
c.WriteMessage(StatusSystemType, "UNIX Type: L8")
}
func (c *Handler) handleSTAT() {
if c.param == "" { // Without a file, it's the server stat.
c.handleSTATServer()
} else { // With a file/dir it's the file or the dir's files stat.
c.handleSTATFile()
}
}
func (c *Handler) handleSTATServer() {
c.writeLine("213- FTP server status:")
duration := time.Now().UTC().Sub(c.connectedAt).Round(time.Second)
c.writeLine(fmt.Sprintf(
"Connected to %s:%d from %s for %s",
c.serverSetting.ListenHost, c.serverSetting.ListenPort,
c.remoteAddr,
duration,
))
c.writeLine(fmt.Sprintf("Logged in as %s", c.loginUser))
c.writeLine("ftpserver - golang FTP server")
c.WriteMessage(StatusFileStatus, "End")
}
func (c *Handler) handleOPTS() {
args := strings.SplitN(c.param, " ", 2)
if strings.ToUpper(args[0]) == "UTF8" {
c.WriteMessage(StatusOK, "I'm in UTF8 only anyway")
} else {
c.WriteMessage(StatusSyntaxErrorNotRecognised, "Don't know this option")
}
}
func (c *Handler) handleNOOP() {
c.WriteMessage(StatusOK, "OK")
}
func (c *Handler) handleFEAT() {
c.writeLine("211- These are my features")
defer c.WriteMessage(StatusSystemStatus, "End")
features := []string{
"UTF8",
"SIZE",
"MDTM",
"REST STREAM",
}
for _, f := range features {
c.writeLine(" " + f)
}
}
func (c *Handler) handleTYPE() {
switch c.param {
case "I":
c.WriteMessage(StatusOK, "Type set to binary")
case "A":
c.WriteMessage(StatusOK, "Type set to ASCII")
default:
c.WriteMessage(StatusSyntaxErrorNotRecognised, "Not understood")
}
}
func (c *Handler) handleQUIT() {
c.WriteMessage(StatusClosingControlConn, "Goodbye")
c.disconnect()
}
func (c *Handler) handleABOR() {
c.commandAbortCancelFn() // abort command
c.TransferClose() // close transfer connection
c.commandRunningWg.Wait() // wait for command abort
c.WriteMessage(StatusClosingDataConn, "abort command was successfully processed")
}
|
package wire
import (
"errors"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
// mockFIBeneficiary creates a FIBeneficiary
func mockFIBeneficiary() *FIBeneficiary {
fib := NewFIBeneficiary()
fib.FIToFI.LineOne = "Line One"
fib.FIToFI.LineTwo = "Line Two"
fib.FIToFI.LineThree = "Line Three"
fib.FIToFI.LineFour = "Line Four"
fib.FIToFI.LineFive = "Line Five"
fib.FIToFI.LineSix = "Line Six"
return fib
}
// TestMockFIBeneficiary validates mockFIBeneficiary
func TestMockFIBeneficiary(t *testing.T) {
fib := mockFIBeneficiary()
require.NoError(t, fib.Validate(), "mockFIBeneficiary does not validate and will break other tests")
}
// TestFIBeneficiaryLineOneAlphaNumeric validates FIBeneficiary LineOne is alphanumeric
func TestFIBeneficiaryLineOneAlphaNumeric(t *testing.T) {
fib := mockFIBeneficiary()
fib.FIToFI.LineOne = "®"
err := fib.Validate()
require.EqualError(t, err, fieldError("LineOne", ErrNonAlphanumeric, fib.FIToFI.LineOne).Error())
}
// TestFIBeneficiaryLineTwoAlphaNumeric validates FIBeneficiary LineTwo is alphanumeric
func TestFIBeneficiaryLineTwoAlphaNumeric(t *testing.T) {
fib := mockFIBeneficiary()
fib.FIToFI.LineTwo = "®"
err := fib.Validate()
require.EqualError(t, err, fieldError("LineTwo", ErrNonAlphanumeric, fib.FIToFI.LineTwo).Error())
}
// TestFIBeneficiaryLineThreeAlphaNumeric validates FIBeneficiary LineThree is alphanumeric
func TestFIBeneficiaryLineThreeAlphaNumeric(t *testing.T) {
fib := mockFIBeneficiary()
fib.FIToFI.LineThree = "®"
err := fib.Validate()
require.EqualError(t, err, fieldError("LineThree", ErrNonAlphanumeric, fib.FIToFI.LineThree).Error())
}
// TestFIBeneficiaryLineFourAlphaNumeric validates FIBeneficiary LineFour is alphanumeric
func TestFIBeneficiaryLineFourAlphaNumeric(t *testing.T) {
fib := mockFIBeneficiary()
fib.FIToFI.LineFour = "®"
err := fib.Validate()
require.EqualError(t, err, fieldError("LineFour", ErrNonAlphanumeric, fib.FIToFI.LineFour).Error())
}
// TestFIBeneficiaryLineFiveAlphaNumeric validates FIBeneficiary LineFive is alphanumeric
func TestFIBeneficiaryLineFiveAlphaNumeric(t *testing.T) {
fib := mockFIBeneficiary()
fib.FIToFI.LineFive = "®"
err := fib.Validate()
require.EqualError(t, err, fieldError("LineFive", ErrNonAlphanumeric, fib.FIToFI.LineFive).Error())
}
// TestFIBeneficiaryLineSixAlphaNumeric validates FIBeneficiary LineSix is alphanumeric
func TestFIBeneficiaryLineSixAlphaNumeric(t *testing.T) {
fib := mockFIBeneficiary()
fib.FIToFI.LineSix = "®"
err := fib.Validate()
require.EqualError(t, err, fieldError("LineSix", ErrNonAlphanumeric, fib.FIToFI.LineSix).Error())
}
// TestParseFIBeneficiaryWrongLength parses a wrong FIBeneficiary record length
func TestParseFIBeneficiaryWrongLength(t *testing.T) {
var line = "{6400}Line Six "
r := NewReader(strings.NewReader(line))
r.line = line
err := r.parseFIBeneficiary()
require.EqualError(t, err, r.parseError(fieldError("LineSix", ErrValidLength)).Error())
}
// TestParseFIBeneficiaryReaderParseError parses a wrong FIBeneficiary reader parse error
func TestParseFIBeneficiaryReaderParseError(t *testing.T) {
var line = "{6400}Line Si® "
r := NewReader(strings.NewReader(line))
r.line = line
err := r.parseFIBeneficiary()
expected := r.parseError(fieldError("LineOne", ErrNonAlphanumeric, "Line Si®")).Error()
require.EqualError(t, err, expected)
_, err = r.Read()
expected = r.parseError(fieldError("LineOne", ErrNonAlphanumeric, "Line Si®")).Error()
require.EqualError(t, err, expected)
}
// TestFIBeneficiaryTagError validates a FIBeneficiary tag
func TestFIBeneficiaryTagError(t *testing.T) {
fib := mockFIBeneficiary()
fib.tag = "{9999}"
err := fib.Validate()
require.EqualError(t, err, fieldError("tag", ErrValidTagForType, fib.tag).Error())
}
// TestStringFIBeneficiaryVariableLength parses using variable length
func TestStringFIBeneficiaryVariableLength(t *testing.T) {
var line = "{6400}"
r := NewReader(strings.NewReader(line))
r.line = line
err := r.parseFIBeneficiary()
require.Nil(t, err)
line = "{6400} NNN"
r = NewReader(strings.NewReader(line))
r.line = line
err = r.parseFIBeneficiary()
require.ErrorContains(t, err, r.parseError(NewTagMaxLengthErr(errors.New(""))).Error())
line = "{6400}********"
r = NewReader(strings.NewReader(line))
r.line = line
err = r.parseFIBeneficiary()
require.ErrorContains(t, err, r.parseError(NewTagMaxLengthErr(errors.New(""))).Error())
line = "{6400}*"
r = NewReader(strings.NewReader(line))
r.line = line
err = r.parseFIBeneficiary()
require.Equal(t, err, nil)
}
// TestStringFIBeneficiaryOptions validates Format() formatted according to the FormatOptions
func TestStringFIBeneficiaryOptions(t *testing.T) {
var line = "{6400}*"
r := NewReader(strings.NewReader(line))
r.line = line
err := r.parseFIBeneficiary()
require.Equal(t, err, nil)
record := r.currentFEDWireMessage.FIBeneficiary
require.Equal(t, record.String(), "{6400} ")
require.Equal(t, record.Format(FormatOptions{VariableLengthFields: true}), "{6400}*")
require.Equal(t, record.String(), record.Format(FormatOptions{VariableLengthFields: false}))
}
|
package routing_rules
import (
"go_chaos/http_util"
"go_chaos/util"
"testing"
)
type MockHttpRequest struct {
path string
headers map[string]string
}
func NewMockHttpRequest(path string, headers map[string]string) http_util.HttpRequest {
return &MockHttpRequest{
path: path,
headers: headers,
}
}
func (t MockHttpRequest) Path() string {
return t.path
}
func (t MockHttpRequest) Headers() map[string]string {
return t.headers
}
func TestNewPathEqualRuleApplies(t *testing.T) {
// setup
pathRule := NewPathRule(util.Equal, "/test/athena", Route{
Host: "localhost",
Port: 9000,
})
// execute
rule1 := pathRule.Applies(NewMockHttpRequest("/test/", nil))
rule2 := pathRule.Applies(NewMockHttpRequest("/test/athena", nil))
rule3 := pathRule.Applies(NewMockHttpRequest("/tester/athena", nil))
// verify
if rule1 {
t.Fail()
}
if !rule2 {
t.Fail()
}
if rule3 {
t.Fail()
}
}
func TestNewHeaderEqualRuleApplies(t *testing.T) {
// setup
headerRule := NewHeaderRule("Connection", "keep", util.Equal, Route{
Host: "localhost",
Port: 8000,
})
rule1 := headerRule.Applies(NewMockHttpRequest("/no", map[string]string{
"Connection": "keep-alive",
}))
rule2 := headerRule.Applies(NewMockHttpRequest("/no", map[string]string{
"Connection": "keep",
}))
rule3 := headerRule.Applies(NewMockHttpRequest("/no", map[string]string{
"Connection": "sleep",
}))
if rule1 {
t.Fail()
}
if !rule2 {
t.Fail()
}
if rule3 {
t.Fail()
}
}
|
package main
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
)
func TestHandler(t *testing.T) {
body := strings.NewReader(`{"name":"Jesús","age":26}`)
req := httptest.NewRequest(
http.MethodPost,
"http://localhost:8080/",
body,
)
rec := httptest.NewRecorder()
Handler(rec, req)
if rec.Code != http.StatusOK {
t.Errorf("Unexpected status 200, got %d", rec.Code)
}
if !strings.Contains(rec.Body.String(), "Jesús, you can drink beer 🍺") {
t.Errorf("Unexpected body: %s", rec.Body.String())
}
}
func TestHandlerFail(t *testing.T) {
body := strings.NewReader(`{"name":"Jesús","age":-1}`)
req := httptest.NewRequest(
http.MethodPost,
"http://localhost:8080/",
body,
)
rec := httptest.NewRecorder()
Handler(rec, req)
if rec.Code != http.StatusBadRequest {
t.Errorf("Unexpected status 400, got %d", rec.Code)
}
if !strings.Contains(rec.Body.String(), "Error: age lower than zero 💥") {
t.Errorf("Unexpected body: %s", rec.Body.String())
}
}
func BenchmarkHandler(b *testing.B) {
for i := 0; i <= b.N; i++ {
body := strings.NewReader(`{"name":"Jesús","age":26}`)
req := httptest.NewRequest(
http.MethodPost,
"http://localhost:8080/",
body,
)
rec := httptest.NewRecorder()
Handler(rec, req)
if rec.Code != http.StatusOK {
b.Errorf("Unexpected status 200, got %d", rec.Code)
}
if !strings.Contains(rec.Body.String(), "Jesús, you can drink beer 🍺") {
b.Errorf("Unexpected body: %s", rec.Body.String())
}
}
}
|
package main
import (
"fmt"
"os"
)
func main() {
fmt.Println("Error Handling: To check a file exists or not\n")
f, err := os.Open("/test.txt")
if err != nil {
fmt.Println("Error: File not found")
return
}
fmt.Println(f.Name(), "Opened Successfully")
}
// - Sumeet Ranjan Parida (Batch - 9A)
//Output:
//Error Handling: To check a file exists or not
//Error: File not found
|
package excel
import (
"errors"
"fmt"
"path/filepath"
"strings"
"github.com/360EntSecGroup-Skylar/excelize"
"github.com/alexizzarevalo/grades_management/src/msg"
)
type Cells struct {
Grade string
Carne string
}
type ExcelOptions struct {
File string
Cells Cells
}
func getNameWithExt(fileName, ext string) string {
return fileName + ext
}
func extractGrades(xlsx *excelize.File, carneCell, gradeCell string) {
// Se extrae el carnet y la nota de las celdas espeficiadas
fmt.Println("Carne,Nota")
var omited = "Se omitio: "
for _, sheetName := range xlsx.GetSheetMap() {
carne := xlsx.GetCellValue(sheetName, carneCell)
grade := xlsx.GetCellValue(sheetName, gradeCell)
if strings.Compare(carne, "") != 0 && strings.Compare(grade, "") != 0 {
fmt.Printf("%v,%v\n", carne, grade)
} else {
omited += sheetName + ", "
}
}
if strings.Compare(omited, "Se omitio: ") != 0 {
msg.Warning(omited + "porque no se encontro carnet o nota.")
}
}
func GetGrades(opt ExcelOptions) {
ext := filepath.Ext(opt.File)
fileName := strings.Replace(opt.File, ext, "", 1)
original := getNameWithExt(fileName, ext)
xlsx, err := excelize.OpenFile(original)
if err != nil {
msg.Error(errors.New("no se pudo abrir el archivo " + original))
}
extractGrades(xlsx, opt.Cells.Carne, opt.Cells.Grade)
}
|
package e4
import (
"fmt"
)
type (
any = interface{}
)
var (
pt = fmt.Printf
)
|
package cryptoballot
import (
"bytes"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/sha512"
"encoding/hex"
"errors"
)
type SignatureRequest struct {
ElectionID string
RequestID []byte // SHA512 (hex) of base64 encoded public-key
PublicKey // base64 encoded PEM formatted public-key
BallotHash []byte // SHA512 (hex-encoded) of the ballot. This would generally be blinded.
Signature // Voter signature for the ballot request
}
// Given a raw Signature Request string (as a []byte -- see documentation for format), return a new SignatureRequest object.
// Generally the Signature Request string is coming from a voter in a POST body.
// This will also verify the signature on the SignatureRequest and return an error if the request does not pass crypto verification
func NewSignatureRequest(rawSignatureRequest []byte) (*SignatureRequest, error) {
var (
err error
hasSign bool
electionID string
requestID []byte
publicKey PublicKey
ballotHash []byte
signature Signature
)
// The SignatureRequest is composed of individual components seperated by double linebreaks
parts := bytes.Split(rawSignatureRequest, []byte("\n\n"))
numParts := len(parts)
switch {
case numParts == 4:
hasSign = false
case numParts == 5:
hasSign = true
default:
return &SignatureRequest{}, errors.New("Cannot read Signature Request. Invalid format")
}
electionID = string(parts[0])
publicKey, err = NewPublicKey(parts[2])
if err != nil {
return &SignatureRequest{}, err
}
requestID = parts[1]
if !bytes.Equal(requestID, publicKey.GetSHA512()) {
return &SignatureRequest{}, errors.New("Invalid Request ID. A Request ID must be the (hex encoded) SHA512 of the voters public key.")
}
ballotHash = parts[3]
n, err := hex.Decode(make([]byte, hex.DecodedLen(len(ballotHash))), ballotHash)
if err != nil {
return &SignatureRequest{}, errors.New("Ballot hash must be hex encoded.")
}
if n != sha512.Size {
return &SignatureRequest{}, errors.New("You must provide exactly 512 bits for the blinded SHA512 ballot hash")
}
if hasSign {
signature, err = NewSignature(parts[4])
if err != nil {
return &SignatureRequest{}, err
}
} else {
signature = nil
}
sigReq := SignatureRequest{
electionID,
requestID,
publicKey,
ballotHash,
signature,
}
// Verify the signature if it has been signed
if sigReq.HasSignature() {
if err = sigReq.VerifySignature(); err != nil {
return &SignatureRequest{}, errors.New("Invalid signature. The signature provided does not cryptographically sign this Signature Request or does not match the public-key provided. " + err.Error())
}
}
// All checks pass
return &sigReq, nil
}
// Verify the voter's signature attached to the SignatureRequest
func (sigReq *SignatureRequest) VerifySignature() error {
if !sigReq.HasSignature() {
return errors.New("Could not verify signature: Signature does not exist")
}
s := sigReq.ElectionID + "\n\n" + string(sigReq.RequestID) + "\n\n" + sigReq.PublicKey.String() + "\n\n" + string(sigReq.BallotHash)
return sigReq.Signature.VerifySignature(sigReq.PublicKey, []byte(s))
}
// Implements Stringer. Outputs the same text representation we are expecting the voter to POST in their Signature Request.
// This is also the same format that is expected in NewSignatureRequest
func (sigReq *SignatureRequest) String() string {
s := sigReq.ElectionID + "\n\n" + string(sigReq.RequestID) + "\n\n" + sigReq.PublicKey.String() + "\n\n" + string(sigReq.BallotHash)
if sigReq.HasSignature() {
s += "\n\n" + sigReq.Signature.String()
}
return s
}
// Sign the blinded ballot hash attached to the Signature Request. It is the hex-encoded blinded SHA512 hash of the ballot.
func (sigReq *SignatureRequest) SignBallot(key *rsa.PrivateKey) (Signature, error) {
rawBytes := make([]byte, hex.DecodedLen(len(sigReq.BallotHash))) //@@TODO: Make this a straight 64 bytes (512 bits)
_, err := hex.Decode(rawBytes, sigReq.BallotHash)
if err != nil {
return Signature{}, err
}
rawSignature, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA512, rawBytes)
if err != nil {
return Signature{}, err
}
signature := Signature(rawSignature)
return signature, nil
}
// Signatures are generally required, but are sometimes optional (for example, for working with the SignatureRequest before it is signed by the voter)
// This function checks to see if the SignatureRequest has been signed by the voter
func (sigReq *SignatureRequest) HasSignature() bool {
return sigReq.Signature != nil
}
|
package logic
import (
"jkt/gateway/global"
"jkt/gateway/hotel"
"jkt/gateway/lanuage"
"jkt/gateway/websocket"
"jkt/jktgo/log"
"jkt/jktgo/message"
"jkt/jktgo/redis"
)
// UnknownCodeCallBack 当发生未知码的时候需要调用的函数
func UnknownCodeCallBack(session *websocket.Session, m map[string]interface{}) {
log.Debug("客户端 发送了未知的消息")
session.SendMessage(lanuage.UnKnownCodeContent)
session.Logout()
}
// PingCallBack 用于检测连接是不是存在的
func PingCallBack(session *websocket.Session) {
log.Debug("ping 回调")
session.SendMessage(lanuage.PingContent)
}
// OpenConnectCallBack 在打开连接的时候回调用该函数
func OpenConnectCallBack(session *websocket.Session) {
log.Debug("有新的连接到来")
}
// CloseConnectCallBack 在连接关闭的时候回调用该函数
func CloseConnectCallBack(session *websocket.Session) {
if session.AttachData != nil {
hotel.GetInstance().DeleteSession(session.AttachData.HotelID, session.AttachData.UID)
redis.Lock(message.GetRedisSessionSetLock(global.GetInstance().GatewayName))
redis.HDel(message.GetRedisSessionSetKey(global.GetInstance().GatewayName), session.AttachData.UID)
redis.UnLock(message.GetRedisSessionSetLock(global.GetInstance().GatewayName))
}
}
// MaxConnectArrivedCallBack 当连接到达最大连接的时候回调用该函数
func MaxConnectArrivedCallBack(session *websocket.Session) {
session.SendMessage(lanuage.MaxConnectArrivedContent)
log.Debug("达到了最大的连接")
}
|
package models
type Link struct {
RealURL string `json:"real_url"`
Shortcut string `json:"shortcut,omitempty"`
}
|
package core
import (
"encoding/json"
"strings"
)
func (this *Int32) MarshalJSON() ([]byte, error) {
if this.Valid == false {
return []byte("null"), nil
}
return json.Marshal(this.int32)
}
// UnmarshalJSON implements json.Unmarshaler.
// It supports string and null input. Blank string input does not produce a null String.
// It also supports unmarshalling a sql.NullString.
func (this *Int32) UnmarshalJSON(data []byte) error {
var v int32
if data == nil || len(data) == 0 {
this.Valid = false
return nil
}
s := strings.Trim(string(data), "\"")
if len(s) == 0 {
this.Valid = false
return nil
}
if s == "null" {
this.Valid = false
return nil
}
err := json.Unmarshal([]byte(s), &v)
if err == nil {
this.int32 = v
this.Valid = true
return nil
} else {
return err
}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package cellular
import (
"context"
"time"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/local/cellular"
"chromiumos/tast/local/modemmanager"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: HostCellularStressEnableDisable,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Verifies that host has network connectivity via cellular interface",
Contacts: []string{"madhavadas@google.com", "chromeos-cellular-team@google.com"},
Attr: []string{"group:cellular", "cellular_unstable", "cellular_sim_active"},
Fixture: "cellular",
Timeout: 4 * time.Minute,
})
}
func HostCellularStressEnableDisable(ctx context.Context, s *testing.State) {
if _, err := modemmanager.NewModemWithSim(ctx); err != nil {
s.Fatal("Could not find MM dbus object with a valid sim: ", err)
}
helper, err := cellular.NewHelper(ctx)
if err != nil {
s.Fatal("Failed to create cellular.Helper: ", err)
}
stressTestHostIPConnectivity := func(ctx context.Context) error {
for i := 1; i < 5; i++ {
s.Logf("Test loop: %d", i)
s.Log("Disable")
if _, err := helper.Disable(ctx); err != nil {
return errors.Wrap(err, "failed to disable modem")
}
s.Log("Enable")
// Enable and get service to set autoconnect based on test parameters.
if _, err := helper.Enable(ctx); err != nil {
return errors.Wrap(err, "failed to enable modem")
}
ipv4, ipv6, err := helper.GetNetworkProvisionedCellularIPTypes(ctx)
if err != nil {
s.Fatal("Failed to read APN info: ", err)
}
s.Log("ipv4: ", ipv4, " ipv6: ", ipv6)
if err := cellular.VerifyIPConnectivity(ctx, testexec.CommandContext, ipv4, ipv6, "/bin"); err != nil {
return errors.Wrap(err, "failed connectivity test")
}
}
return nil
}
if err := helper.RunTestOnCellularInterface(ctx, stressTestHostIPConnectivity); err != nil {
s.Fatal("Failed to run test on cellular interface: ", err)
}
}
|
package health
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestRegisterDependency(t *testing.T) {
tests := []struct {
dependency Dependency
expectedErr error
expectedHealth bool
}{
// Passing - healthy
{
dependency: Dependency{
Name: "healthy service",
Level: LevelHard,
check: func() bool { return true },
},
expectedErr: nil,
expectedHealth: true,
},
// Passing - unhealthy
{
dependency: Dependency{
Name: "unhealthy service",
Level: LevelHard,
check: func() bool { return false },
},
expectedErr: nil,
expectedHealth: false,
},
// Passing - unhealthy soft
{
dependency: Dependency{
Name: "unhealthy service",
Level: LevelSoft,
check: func() bool { return false },
},
expectedErr: nil,
expectedHealth: true,
},
// Failing - no name
{Dependency{}, ErrNoDependency, true},
}
for i, test := range tests {
check, err := InitialiseServiceCheck("test", 50*time.Millisecond)
if err != nil {
t.Errorf("expected nil got %v", err)
}
err = check.RegisterDependency(test.dependency.Name, test.dependency.Level, test.dependency.check)
if err != test.expectedErr {
t.Errorf("expected %v got %v", test.expectedErr, err)
}
check.updateStatus()
if check.IsHealthy() != test.expectedHealth {
t.Errorf("expected %v got %v on test case #%d", test.expectedHealth, check.Healthy, i)
}
}
}
func TestInitialiseServiceCheck(t *testing.T) {
check, err := InitialiseServiceCheck("", 50*time.Millisecond)
if err == nil {
t.Errorf("expecting %v got %v", ErrNoServiceNameSupplied, err)
}
if check != nil {
t.Errorf("expected nil got %v", check)
}
}
func TestDependency(t *testing.T) {
tests := []struct {
dependency *Dependency
expectedErr error
}{
// Passing
{
dependency: &Dependency{
Name: "test",
Level: LevelHard,
check: func() bool { return false },
},
expectedErr: nil,
},
}
for _, test := range tests {
check := &ServiceCheck{}
if err := check.RegisterDependency(test.dependency.Name,
test.dependency.Level, test.dependency.check); err != nil {
t.Errorf("expected nil got %v", err)
}
dep, err := check.Dependency(test.dependency.Name)
if err != nil {
t.Errorf("expected %v got %v", test.expectedErr, err)
}
if dep.Name != test.dependency.Name {
t.Errorf("expected %v got %v", test.dependency.Name, dep.Name)
}
if dep.Level != test.dependency.Level {
t.Errorf("expected %v got %v", test.dependency.Level, dep.Level)
}
}
}
func TestEndpointHelper(t *testing.T) {
var (
testHTTPClient = &http.Client{
Timeout: 1 * time.Second,
Transport: http.DefaultTransport,
}
)
tests := []struct {
status int
expected bool
expectedErr error
}{
// Passing
{http.StatusOK, true, nil},
{http.StatusInternalServerError, false, nil},
}
for _, test := range tests {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(test.status)
}))
resp, err := Check200Helper(server.URL)
if err != test.expectedErr {
t.Errorf("expected %v got %v", test.expectedErr, err)
}
if resp != test.expected {
t.Errorf("expected %v got %v", test.expected, resp)
}
resp, err = Check200Helper(server.URL, testHTTPClient)
if err != test.expectedErr {
t.Errorf("expected %v got %v", test.expectedErr, err)
}
if resp != test.expected {
t.Errorf("expected %v got %v", test.expected, resp)
}
}
}
func TestHTTPHandler(t *testing.T) {
healthCheck := &ServiceCheck{
Name: "test",
Healthy: true,
duration: 0,
}
ts := httptest.NewServer(http.HandlerFunc(healthCheck.HTTPHandler))
defer ts.Close()
res, err := http.Get(ts.URL)
if err != nil {
t.Error(err)
}
if res.StatusCode != 200 {
t.Errorf("Unexpected status code returned, expected %d, found %d", 200, res.StatusCode)
}
healthCheck.Healthy = false
res, err = http.Get(ts.URL)
if err != nil {
t.Error(err)
}
if res.StatusCode != 503 {
t.Errorf("Unexpected status code returned, expected %d, found %d", 503, res.StatusCode)
}
}
func TestGet(t *testing.T) {
var (
testHTTPClient = &http.Client{
Timeout: 1 * time.Second,
Transport: http.DefaultTransport,
}
)
tests := []struct {
healthy bool
expected bool
}{
{true, true},
{false, false},
}
for _, test := range tests {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
json.NewEncoder(w).Encode(&ServiceCheck{
Name: "test",
Healthy: test.healthy,
})
}))
healthy, err := Get(server.URL)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if healthy != test.expected {
t.Errorf("expected %v, got %v", test.expected, healthy)
}
healthy, err = Get(server.URL, testHTTPClient)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if healthy != test.expected {
t.Errorf("expected %v, got %v", test.expected, healthy)
}
}
}
func TestWaitForDependencies(t *testing.T) {
t.Run("healthy", func(t *testing.T) {
healthCheck := &ServiceCheck{
Name: "test",
Healthy: true,
duration: 0,
}
returned := make(chan struct{})
healthCheck.RegisterDependency("redis", LevelHard, func() bool {
return true
})
go func() {
healthCheck.WaitForDependencies(10 * time.Second)
returned <- struct{}{}
}()
select {
case <-returned:
break
case <-time.After(time.Second * 2):
t.Error("Timeout even though the health check passes")
}
})
t.Run("unhealthy", func(t *testing.T) {
healthCheck := &ServiceCheck{
Name: "test",
Healthy: true,
duration: 0,
}
returned := make(chan struct{})
healthCheck.RegisterDependency("redis", LevelHard, func() bool {
return false
})
go func() {
healthCheck.WaitForDependencies(2 * time.Second)
returned <- struct{}{}
}()
select {
case <-returned:
break
case <-time.After(time.Second * 3):
t.Error("Context wasn't cancelled")
}
})
}
func TestGetHealth(t *testing.T) {
healthCheck := &ServiceCheck{
Name: "test",
Healthy: true,
duration: 0,
}
healthy := healthCheck.Healthy
if !healthy {
t.Error("expected to be healthy")
}
}
/*
* Benchmarks
*/
func BenchmarkGet(b *testing.B) {
success := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
json.NewEncoder(w).Encode(&ServiceCheck{
Name: "test",
Healthy: true,
})
}))
failure := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
json.NewEncoder(w).Encode(&ServiceCheck{
Name: "test",
Healthy: false,
})
}))
unavalible := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(503)
json.NewEncoder(w).Encode(&ServiceCheck{
Name: "test",
Healthy: false,
})
}))
optionalClient := &http.Client{
Timeout: 500 * time.Millisecond,
Transport: http.DefaultTransport,
}
for i := 0; i < b.N; i++ {
health, _ := Get(success.URL, optionalClient)
if !health {
b.Log(fmt.Sprintf("Expecting %t got %t", true, health))
}
}
for i := 0; i < b.N; i++ {
health, _ := Get(failure.URL, optionalClient)
if health {
b.Log(fmt.Sprintf("Expecting %t got %t", false, health))
}
}
for i := 0; i < b.N; i++ {
health, _ := Get(unavalible.URL, optionalClient)
if health {
b.Log(fmt.Sprintf("Expecting %t got %t", false, health))
}
}
}
func BenchmarkCheck200Helper(b *testing.B) {
success := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
json.NewEncoder(w).Encode(&ServiceCheck{
Name: "test",
Healthy: true,
})
}))
failure := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(503)
json.NewEncoder(w).Encode(&ServiceCheck{
Name: "test",
Healthy: false,
})
}))
optionalClient := &http.Client{
Timeout: 500 * time.Millisecond,
Transport: http.DefaultTransport,
}
for i := 0; i < b.N; i++ {
health, _ := Check200Helper(success.URL, optionalClient)
if !health {
b.Log(fmt.Sprintf("Expecting %t got %t", true, health))
}
}
for i := 0; i < b.N; i++ {
health, _ := Check200Helper(failure.URL, optionalClient)
if health {
b.Log(fmt.Sprintf("Expecting %t got %t", false, health))
continue
}
}
}
|
// Copyright © 2020. All rights reserved.
// Author: Ilya Stroy.
// Contacts: qioalice@gmail.com, https://github.com/qioalice
// License: https://opensource.org/licenses/MIT
package ekatime
import (
"time"
)
// TillNext returns how much ns (as time.Duration) must be passed until next time
// 'range_' will end for the current Timestamp 'ts'.
//
// In most cases you don't need to use this method, but use any of predefined
// instead: TillNextMinute(), TillNextHour(), etc.
//
// Using this function you may get how much time.Duration must be passed,
// until next requested time is passed since now. Range must be passed in seconds.
//
// Examples:
// d := NewDate(2012, MONTH_JANUARY, 12).WithTime(13, 14, 15) // -> 12 Jan 2012 13:14:15
// d.TillNext(2 * SECONDS_IN_HOUR) // -> 45m45s (till 14:00:00, range of 2h)
// d.TillNext(3 * SECONDS_IN_HOUR) // -> 1h45m45s (till 15:00:00, range of 3h)
// d.TillNext(30 * SECONDS_IN_MINUTE) // -> 15m45s (till 13:30:00, range of 30m).
func (ts Timestamp) TillNext(range_ Timestamp) time.Duration {
return time.Duration(ts + (range_- ts % range_) - ts) * time.Second
}
// TillNextMinute returns how much ns (as time.Duration) must be passed until
// next minute (for the current Timestamp 'ts') will came.
func (ts Timestamp) TillNextMinute() time.Duration {
return ts.TillNext(SECONDS_IN_MINUTE)
}
// TillNextHour returns how much ns (as time.Duration) must be passed until
// next hour (for the current Timestamp 'ts') will came.
func (ts Timestamp) TillNextHour() time.Duration {
return ts.TillNext(SECONDS_IN_HOUR)
}
// TillNext12h returns how much ns (as time.Duration) must be passed until
// next half day (12h) (for the current Timestamp 'ts') will came.
func (ts Timestamp) TillNext12h() time.Duration {
return ts.TillNext(SECONDS_IN_12H)
}
// TillNextNoon returns how much ns (as time.Duration) must be passed until
// next noon (12.00 PM) (for the current Timestamp 'ts') will came.
func (ts Timestamp) TillNextNoon() time.Duration {
d := ts.TillNext(SECONDS_IN_DAY) + 12 * time.Hour
if d >= 24 * time.Hour {
d -= 24 * time.Hour
}
return d
}
// TillNextMidnight returns how much ns (as time.Duration) must be passed until
// next midnight (12.00 AM) (for the current Timestamp 'ts') will came.
func (ts Timestamp) TillNextMidnight() time.Duration {
return ts.TillNextDay()
}
// TillNextDay returns how much ns (as time.Duration) must be passed until
// next day (for the current Timestamp 'ts') will came.
func (ts Timestamp) TillNextDay() time.Duration {
return ts.TillNext(SECONDS_IN_DAY)
}
// TillNextMonth returns how much ns (as time.Duration) must be passed until
// next month (for the current Timestamp 'ts') will came.
func (ts Timestamp) TillNextMonth() time.Duration {
y, m, _ := dateFromUnix(ts)
return ts.TillNext(InMonth(y, m))
}
// TillNextYear returns how much ns (as time.Duration) must be passed until
// next year (for the current Timestamp 'ts') will came.
func (ts Timestamp) TillNextYear() time.Duration {
return ts.TillNext(InYear(ts.Year()))
}
// TillNextMinute is the same as Timestamp.TillNextMinute() but for current time.
func TillNextMinute() time.Duration {
return Now().TillNextMinute()
}
// TillNextHour is the same as Timestamp.TillNextHour() but for current time.
func TillNextHour() time.Duration {
return Now().TillNextHour()
}
// TillNext12h is the same as Timestamp.TillNext12h() but for current time.
func TillNext12h() time.Duration {
return Now().TillNext12h()
}
// TillNextNoon is the same as Timestamp.TillNextNoon() but for current time.
func TillNextNoon() time.Duration {
return Now().TillNextNoon()
}
// TillNextMidnight is the same as Timestamp.TillNextMidnight() but for current time.
func TillNextMidnight() time.Duration {
return Now().TillNextMidnight()
}
// TillNextDay is the same as Timestamp.TillNextDay() but for current time.
func TillNextDay() time.Duration {
return Now().TillNextDay()
}
// TillNextMonth is the same as Timestamp.TillNextMonth() but for current time.
func TillNextMonth() time.Duration {
return Now().TillNextMonth()
}
// TillNextYear is the same as Timestamp.TillNextYear() but for current time.
func TillNextYear() time.Duration {
return Now().TillNextYear()
}
|
package main
import (
"github.com/cw35/eventsource"
"log"
"net/http"
"time"
)
func getSubscribeKey(req *http.Request) string {
return req.Header.Get("Authorization")
}
func getSessionKey(req *http.Request) string {
return req.Header.Get("Authorization")
}
func consumerStatusListener(subscribeKey, sessionKey string, status int) {
log.Println("consumerStatusListener", subscribeKey, sessionKey, status)
}
func messageSentListener(messageId, subscribeKey, sessionKey string) {
log.Println("messageSentListener", messageId, subscribeKey, sessionKey)
}
func customHeaders(req *http.Request) [][]byte {
return [][]byte{
[]byte("Cache-Control: no-cache"),
[]byte("Connection: keep-alive"),
}
}
func main() {
es := eventsource.New(nil, customHeaders, getSessionKey, getSubscribeKey)
es.AddMessageSentListener([]func(string, string, string){messageSentListener})
es.AddConsumerStatusListener([]func(string, string, int){consumerStatusListener})
defer es.Close()
http.Handle("/", http.FileServer(http.Dir("./public")))
http.Handle("/events", es)
go func() {
for {
es.BroadcastEventMessage("hello", "", "")
log.Printf("Hello has been sent (consumers: %d)", es.ConsumersCount())
time.Sleep(2 * time.Second)
}
}()
log.Print("Open URL http://localhost:8080/ in your browser.")
err := http.ListenAndServe(":8080", nil)
if err != nil {
log.Fatal(err)
}
}
|
// SPDX-License-Identifier: ISC
// Copyright (c) 2014-2020 Bitmark Inc.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockheader
import (
"encoding/binary"
"sync"
"github.com/bitmark-inc/bitmarkd/blockdigest"
"github.com/bitmark-inc/bitmarkd/blockrecord"
"github.com/bitmark-inc/bitmarkd/fault"
"github.com/bitmark-inc/bitmarkd/genesis"
"github.com/bitmark-inc/bitmarkd/mode"
"github.com/bitmark-inc/bitmarkd/storage"
)
const (
cacheSize = 10
)
type cachedBlockDigest struct {
blockNumber uint64
digest blockdigest.Digest
}
var cached [cacheSize]cachedBlockDigest
var cacheIndex int
var cacheLock sync.RWMutex
// DigestForBlock - return the digest for a specific block number
func DigestForBlock(number uint64) (blockdigest.Digest, error) {
// valid block number
if number <= genesis.BlockNumber {
if mode.IsTesting() {
return genesis.TestGenesisDigest, nil
}
return genesis.LiveGenesisDigest, nil
}
digest := digestFromCache(number)
if !digest.IsEmpty() {
return digest, nil
}
// fetch block and compute digest
n := make([]byte, 8)
binary.BigEndian.PutUint64(n, number)
digest = blockrecord.DigestFromHashPool(storage.Pool.BlockHeaderHash, n)
if !digest.IsEmpty() {
addToCache(number, digest)
return digest, nil
}
digest, err := genDigestFromPool(storage.Pool.Blocks, n)
if nil != err {
return blockdigest.Digest{}, err
}
addToCache(number, digest)
return digest, err
}
func ClearCache() {
cacheLock.Lock()
cached = *new([cacheSize]cachedBlockDigest)
cacheLock.Unlock()
}
func digestFromCache(blockNumber uint64) blockdigest.Digest {
cacheLock.RLock()
defer cacheLock.RUnlock()
for _, c := range cached {
if c.blockNumber == blockNumber {
return c.digest
}
}
return blockdigest.Digest{}
}
func addToCache(blockNumber uint64, digest blockdigest.Digest) {
cacheLock.Lock()
cached[cacheIndex] = cachedBlockDigest{
blockNumber: blockNumber,
digest: digest,
}
if cacheSize-1 == cacheIndex {
cacheIndex = 0
} else {
cacheIndex++
}
cacheLock.Unlock()
}
func genDigestFromPool(pool storage.Handle, blockNumber []byte) (blockdigest.Digest, error) {
packed := pool.Get(blockNumber)
if nil == packed {
return blockdigest.Digest{}, fault.BlockNotFound
}
br := blockrecord.Get()
_, digest, _, err := br.ExtractHeader(packed, 0, false)
return digest, err
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package wmp
import (
"context"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/bundles/cros/wmp/wmputils"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/cryptohome"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: RecordFullScreen,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Checks that full screen video record works correctly",
Contacts: []string{
"afakhry@chromium.org",
"chromeos-wmp@google.com",
"chromeos-sw-engprod@google.com",
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
Fixture: "chromeLoggedIn",
SearchFlags: []*testing.StringPair{
{
Key: "feature_id",
Value: "screenplay-936ea36a-b93f-4127-9260-9975e69365fa",
},
},
Params: []testing.Param{
{
Name: "clamshell_mode",
Val: false,
},
{
Name: "tablet_mode",
Val: true,
},
},
})
}
func RecordFullScreen(ctx context.Context, s *testing.State) {
tabletMode := s.Param().(bool)
cr := s.FixtValue().(*chrome.Chrome)
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create Test API connection: ", err)
}
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 15*time.Second)
defer cancel()
cleanup, err := ash.EnsureTabletModeEnabled(ctx, tconn, tabletMode)
if err != nil {
s.Fatal("Failed to ensure clamshell/tablet mode: ", err)
}
defer cleanup(cleanupCtx)
ac := uiauto.New(tconn)
kb, err := input.Keyboard(ctx)
if err != nil {
s.Fatal("Failed to create a keyboard: ", err)
}
// Starts full screen recording via UI.
screenRecordToggleButton := nodewith.HasClass("IconButton").Name("Screen record")
recordFullscreenToggleButton := nodewith.HasClass("IconButton").Name("Record full screen")
stopRecordButton := nodewith.HasClass("TrayBackgroundView").Name("Stop screen recording")
recordTakenLabel := nodewith.HasClass("Label").Name("Screen recording taken")
// Enter screen capture mode.
if err := wmputils.EnsureCaptureModeActivated(tconn, true)(ctx); err != nil {
s.Fatal("Failed to enable recording: ", err)
}
// Ensure case exit screen capture mode.
defer wmputils.EnsureCaptureModeActivated(tconn, false)(cleanupCtx)
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "ui_dump")
if err := uiauto.Combine(
"record full screen",
ac.LeftClick(screenRecordToggleButton),
ac.LeftClick(recordFullscreenToggleButton),
kb.AccelAction("Enter"),
// Records full screen for about 30 seconds.
uiauto.Sleep(30*time.Second),
ac.LeftClick(stopRecordButton),
// Checks if the screen record is taken.
ac.WaitUntilExists(recordTakenLabel),
)(ctx); err != nil {
s.Fatal("Failed to record full screen: ", err)
}
// Checks there is a screen record video file stored in Downloads folder.
downloadsPath, err := cryptohome.DownloadsPath(ctx, cr.NormalizedUser())
if err != nil {
s.Fatal("Failed to get user's Download path: ", err)
}
has, err := wmputils.HasScreenRecord(ctx, downloadsPath)
if err != nil {
s.Fatal("Failed to check whether screen record is present: ", err)
}
if !has {
s.Fatal("No screen record is stored in Downloads folder")
}
}
|
// Copyright (c) 2020 - for information on the respective copyright owner
// see the NOTICE file and/or the repository at
// https://github.com/hyperledger-labs/perun-node
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"time"
"github.com/abiosoft/ishell"
grpclib "google.golang.org/grpc"
"github.com/hyperledger-labs/perun-node/api/grpc/pb"
)
var (
nodeCmdUsage = "Usage: node [sub-command]"
nodeCmd = &ishell.Cmd{
Name: "node",
Help: "Use the command to access the node related functionalities." + nodeCmdUsage,
Func: nodeFn,
}
nodeConnectCmdUsage = "Usage: node connect [url]"
nodeConnectCmd = &ishell.Cmd{
Name: "connect",
Help: "Connect to a running perun node instance. Use tab completion to cycle through default values." +
nodeConnectCmdUsage,
Completer: func([]string) []string {
return []string{":50001"} // Provide default values as autocompletion.
},
Func: nodeConnectFn,
}
nodeTimeCmdUsage = "Usage: node time"
nodeTimeCmd = &ishell.Cmd{
Name: "time",
Help: "Print node time." + nodeTimeCmdUsage,
Func: nodeTimeFn,
}
nodeConfigCmdUsage = "Usage: node config"
nodeConfigCmd = &ishell.Cmd{
Name: "config",
Help: "Print node config." + nodeConfigCmdUsage,
Func: nodeConfigFn,
}
)
func init() {
nodeCmd.AddCmd(nodeConnectCmd)
nodeCmd.AddCmd(nodeTimeCmd)
nodeCmd.AddCmd(nodeConfigCmd)
}
func nodeFn(c *ishell.Context) {
c.Println(c.Cmd.HelpText())
}
func nodeConnectFn(c *ishell.Context) {
countReqArgs := 1
if len(c.Args) != countReqArgs {
printArgCountError(c, countReqArgs)
return
}
nodeAddr := c.Args[0]
conn, err := grpclib.Dial(nodeAddr, grpclib.WithInsecure())
if err != nil {
sh.Printf("Error connecting to perun node at %s: %v", nodeAddr, err)
}
client = pb.NewPayment_APIClient(conn)
t, err := getNodeTime()
if err != nil {
c.Printf("%s\n\n", redf("Error connecting to perun node: %v", err))
return
}
c.Printf("%s\n\n", greenf("Connected to perun node at %s. Node time is %v", nodeAddr, time.Unix(t, 0)))
}
func nodeTimeFn(c *ishell.Context) {
if client == nil {
printNodeNotConnectedError(c)
return
}
countReqArgs := 0
if len(c.Args) != countReqArgs {
printArgCountError(c, countReqArgs)
return
}
t, err := getNodeTime()
if err != nil {
printCommandSendingError(c, err)
return
}
c.Printf("%s\n\n", greenf("Perun node time: %s", time.Unix(t, 0)))
}
func getNodeTime() (int64, error) {
timeReq := pb.TimeReq{}
timeResp, err := client.Time(context.Background(), &timeReq)
if err != nil {
return 0, err
}
return timeResp.Time, err
}
func nodeConfigFn(c *ishell.Context) {
if client == nil {
printNodeNotConnectedError(c)
return
}
countReqArgs := 0
if len(c.Args) != countReqArgs {
printArgCountError(c, countReqArgs)
return
}
getConfigReq := pb.GetConfigReq{}
getConfigResp, err := client.GetConfig(context.Background(), &getConfigReq)
if err != nil {
printCommandSendingError(c, err)
return
}
c.Printf("%s\n\n", greenf("Perun node config:\n%v", prettify(getConfigResp)))
}
|
package handlers
import (
"net/http"
"github.com/dgrijalva/jwt-go"
"github.com/wu-xing/wood-serve/domain"
"github.com/labstack/echo"
)
func PostArticleBox() echo.HandlerFunc {
return func(c echo.Context) error {
user := c.Get("user").(*jwt.Token)
claims := user.Claims.(jwt.MapClaims)
userId := claims["id"].(string)
request := new(struct {
Name string `json:"name"`
})
c.Bind(&request)
error := domain.AddArticleBox(userId, request.Name)
if error != nil {
return error
}
return c.NoContent(http.StatusCreated)
}
}
func GetArticleBoxes() echo.HandlerFunc {
return func(c echo.Context) error {
user := c.Get("user").(*jwt.Token)
claims := user.Claims.(jwt.MapClaims)
userId := claims["id"].(string)
articleBoxs := domain.GetArticleBoxes(userId)
return c.JSON(http.StatusOK, articleBoxs)
}
}
|
package controllers
import (
// "fmt"
"github.com/astaxie/beego"
"openvpn/models"
)
type UserController struct {
beego.Controller
}
func (this *UserController) Post() {
//输入内容
//this.Ctx.WriteString(fmt.Sprint(this.Input()))
//检测登录
if !checkAccount(this.Ctx) {
this.Redirect("/login", 302)
return
}
var err error
id := this.Input().Get("uid")
beego.Debug(id)
user_name := this.Input().Get("username")
user_pwd := this.Input().Get("password")
if len(id) == 0 {
err = models.AddUser(user_name, user_pwd)
} else {
err = models.ModifyUser(id, user_name, user_pwd)
}
if err != nil {
beego.Error(err)
}
this.Redirect("/", 301)
return
}
func (this *UserController) Modify() {
//检测登录
if !checkAccount(this.Ctx) {
this.Redirect("/login", 302)
return
}
this.TplName = "user_modify.html"
uid := this.Input().Get("uid")
beego.Debug(uid)
user, err := models.GetUser(uid)
if err != nil {
beego.Error(err)
this.Redirect("/", 302)
return
}
this.Data["User"] = user
this.Data["Uid"] = uid
}
|
package main
import (
"sync"
"time"
"github.com/nsf/termbox-go"
)
var mu sync.Mutex
type environ struct {
sizeX int
sizeY int
field [][]bool
cursorX int
cursorY int
pause bool
duration int
}
func drawLine(x, y int, str string) {
runes := []rune(str)
for i := 0; i < len(runes); i++ {
termbox.SetCell(x+i, y, runes[i],
termbox.ColorDefault, termbox.ColorDefault)
}
}
func (env *environ) show(pause bool) {
mu.Lock()
for x := 0; x < env.sizeX+2; x++ {
termbox.SetCell(x*2, 0, '#',
termbox.ColorDefault, termbox.ColorDefault)
termbox.SetCell(x*2, env.sizeY+1, '#',
termbox.ColorDefault, termbox.ColorDefault)
}
for y := 0; y < env.sizeY; y++ {
termbox.SetCell(0, y+1, '#',
termbox.ColorDefault, termbox.ColorDefault)
termbox.SetCell(env.sizeX*2+2, y+1, '#',
termbox.ColorDefault, termbox.ColorDefault)
for x := 0; x < env.sizeX; x++ {
fgColor := termbox.ColorDefault
bgColor := termbox.ColorDefault
if pause && x == env.cursorX && y == env.cursorY {
fgColor = termbox.ColorWhite
bgColor = termbox.ColorMagenta
}
char := ' '
if env.field[y][x] {
char = '+'
}
termbox.SetCell(x*2+2, y+1, char, fgColor, bgColor)
}
}
message1 := "Move Cursor: [←][↓][↑][→] (or [h][j][k][l]), Flip Cell State: [SPACE]"
message2 := "Pause/Run: [ESC], Quit: [Ctrl]+[C]"
drawLine(1, env.sizeY+2, message1)
drawLine(1, env.sizeY+3, message2)
termbox.Flush()
mu.Unlock()
}
func (env *environ) moveCursor(dx, dy int) {
env.cursorX += dx
env.cursorY += dy
if env.cursorX < 0 {
env.cursorX = 0
}
if env.cursorX > env.sizeX-1 {
env.cursorX = env.sizeX - 1
}
if env.cursorY < 0 {
env.cursorY = 0
}
if env.cursorY > env.sizeY-1 {
env.cursorY = env.sizeY - 1
}
}
func (env *environ) neighbors(x, y int) int {
nb := 0
for dy := -1; dy < 2; dy++ {
for dx := -1; dx < 2; dx++ {
if dx == 0 && dy == 0 ||
x+dx < 0 || x+dx >= env.sizeX ||
y+dy < 0 || y+dy >= env.sizeY {
continue
}
if env.field[y+dy][x+dx] {
nb += 1
}
}
}
return nb
}
func (env *environ) evolve() {
newField := make([][]bool, env.sizeY)
for y := 0; y < env.sizeY; y++ {
newField[y] = make([]bool, env.sizeX)
for x := 0; x < env.sizeX; x++ {
nb := env.neighbors(x, y)
if env.field[y][x] && (nb == 2 || nb == 3) {
newField[y][x] = true
}
if !env.field[y][x] && nb == 3 {
newField[y][x] = true
}
}
}
env.field = newField
}
func newEnviron() *environ {
env := new(environ)
env.sizeX, env.sizeY = 38, 20
env.cursorX, env.cursorY = 0, 0
env.duration = 100
env.pause = true
env.field = make([][]bool, env.sizeY)
for y := 0; y < env.sizeY; y++ {
env.field[y] = make([]bool, env.sizeX)
}
return env
}
func getKey() (termbox.Key, rune) {
for {
ev := termbox.PollEvent()
if ev.Type == termbox.EventKey {
return ev.Key, ev.Ch
}
}
}
func evolve(env *environ, ch <-chan bool) {
tick := time.Tick(time.Duration(env.duration) * time.Millisecond)
for {
select {
case <-tick:
if !env.pause {
env.evolve()
env.show(env.pause)
}
case pause := <-ch:
env.pause = pause
}
}
}
func play() {
env := newEnviron()
err := termbox.Init()
if err != nil {
panic(err)
}
defer termbox.Close()
termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
pauseCh := make(chan bool)
go evolve(env, pauseCh)
for {
env.show(env.pause)
key, ch := getKey()
switch {
case key == termbox.KeyEsc:
env.pause = !env.pause
pauseCh <- env.pause
case key == termbox.KeyCtrlC:
return
}
if !env.pause {
continue
}
switch {
case key == termbox.KeyArrowLeft || ch == 'h': // Left
env.moveCursor(-1, 0)
case key == termbox.KeyArrowDown || ch == 'j': // Down
env.moveCursor(0, 1)
case key == termbox.KeyArrowUp || ch == 'k': // Up
env.moveCursor(0, -1)
case key == termbox.KeyArrowRight || ch == 'l': // Right
env.moveCursor(1, 0)
case key == termbox.KeySpace:
env.field[env.cursorY][env.cursorX] =
!env.field[env.cursorY][env.cursorX]
}
}
}
func main() {
play()
}
|
package main
import (
"fmt"
"log"
"net/http"
"os"
"strings"
"time"
)
var port string
var host string
var redirectPath bool
func createRedirectURL(r *http.Request) string {
var redirectURL = "https://"
if host == "" {
host = r.Host
}
redirectURL = redirectURL + host
if redirectPath {
redirectURL = redirectURL + r.URL.Path
}
return redirectURL
}
func redirect(w http.ResponseWriter, r *http.Request) {
var newURL = createRedirectURL(r)
http.Redirect(w, r, newURL, 301)
fmt.Println(time.Now().Format(time.RFC3339), r.Host, r.URL.Path, "->", newURL, r.UserAgent())
}
func readEnv() {
port = os.Getenv("PORT")
host = os.Getenv("HOST")
redirectPath = false
var envRedirectPath = os.Getenv("REDIRECTPATH")
if envRedirectPath != "" {
if strings.ToLower(envRedirectPath) == "true" {
redirectPath = true
}
}
if port == "" {
port = "80" // default
}
}
func main() {
readEnv()
http.HandleFunc("/", redirect)
err := http.ListenAndServe(":"+port, nil)
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
|
package logic
type Platform int
|
package benchmark
import "bytes"
import "strings"
/*
ベンチマークの機能がテスト用パッケージに標準で入っている
- 実行方法
go test -bench .
- 出力結果例
testing: warning: no tests to run
BenchmarkCat3-4 10000000 192 ns/op 54 B/op 3 allocs/op
BenchmarkBuf3-4 10000000 231 ns/op 163 B/op 3 allocs/op
BenchmarkJoin3-4 10000000 164 ns/op 54 B/op 3 allocs/op
BenchmarkCat100-4 200000 6542 ns/op 7392 B/op 100 allocs/op
BenchmarkBuf100-4 1000000 2294 ns/op 2032 B/op 4 allocs/op
BenchmarkJoin100-4 1000000 1640 ns/op 1888 B/op 3 allocs/op
BenchmarkCat10000-4 100 10611378 ns/op 53168049 B/op 10000 allocs/op
BenchmarkBuf10000-4 5000 210755 ns/op 208608 B/op 11 allocs/op
BenchmarkJoin1000-4 10000 157947 ns/op 184832 B/op 3 allocs/op
PASS
*/
// catは+=演算子をつかって文字列を結合する
func cat(ss ...string) string {
var r string
for _, s := range ss {
r += s
}
return r
}
// butはbytes.Bufferをつかって文字列を結合する
func buf(ss ...string) string {
var b bytes.Buffer
for _, s := range ss {
b.WriteString(s)
}
return b.String()
}
// joinはstrings.Joinをつかって文字列を結合する
func join(ss ...string) string {
return strings.Join(ss, "")
}
|
package random
import (
"regexp"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNumeric(t *testing.T) {
assert.Len(t, String(32), 32)
r := New()
assert.Regexp(t, regexp.MustCompile("[0-9]+$"), r.String(8, Numeric))
}
func TestLowercaseString(t *testing.T) {
assert.Len(t, String(32), 32)
r := New()
assert.Regexp(t, regexp.MustCompile("[a-z]+$"), r.String(8, Lowercase))
}
func TestAlphabeticString(t *testing.T) {
assert.Len(t, String(32), 32)
r := New()
assert.Regexp(t, regexp.MustCompile("[A-Za-z]+$"), r.String(8, Alphabetic))
}
func TestAlphaNumericString(t *testing.T) {
assert.Len(t, String(32), 32)
r := New()
assert.Regexp(t, regexp.MustCompile("[0-9A-Za-z]+$"), r.String(8, Alphanumeric))
}
|
package main
import "fmt"
import "strings"
// fungsi variadic
// func main() {
// var avg = calculate(2,3,4,5,6,2,4,6,3,5)
// var msg = fmt.Sprintf("Rata-rata : %.2f", avg)
// fmt.Println(msg)
// }
// func calculate(numbers ...int) float64 {
// var total int = 0
// for _, number := range numbers {
// total += number
// }
// var avg = float64(total) / float64(len(numbers))
// return avg
// }
// func main() {
// var numbers = []int{2,3,4,5,6,4,2,4,5,6,78}
// var avg = calculate(numbers ...)
// var msg = fmt.Sprintf("Rata-rata : %.2f", avg)
// fmt.Println(msg)
// }
// func calculate(numbers ...int) float64 {
// var total = 0
// for _, number := range numbers {
// total += number
// }
// var avg = float64(total) / float64(len(numbers))
// return avg
// }
// fungsi dengan parameter biasa dan variadic
// func main() {
// var hobbies = []string{"football", "reading", "badminton"}
// var name = "Razzi"
// yourHobbies(name, hobbies...)
// }
// func yourHobbies(name string, hobbies ...string) {
// var hobbiesString = strings.Join(hobbies, ", ")
// fmt.Printf("Hello, my name is : %s \n", name)
// fmt.Printf("My hobbies are : %s \n", hobbiesString)
// }
func main() {
var phones = []string{"xiaomi", "flagship", "mid-range"}
var name = "Xiaomi Redmi 9A"
handphone(name, phones...)
}
func handphone(name string, categories ...string) {
var phones = strings.Join(categories, ", ")
fmt.Printf("Phone name : %s \n", name)
fmt.Printf("Categories : %s", phones)
}
|
package server
import "time"
type ServerConfig struct {
Addr string
ReadTimeout time.Duration
WriteTimeout time.Duration
Cert string
Key string
Memory int64
Timeout time.Duration
Group string
Websocket bool
WebsocketGroup string
WebsocketPath string
WebsocketOnly bool
WebsocketReadTimeout time.Duration
WebsocketWriteTimeout time.Duration
}
|
package main
type ListNode struct {
Val int
Next *ListNode
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func build(a []int) *TreeNode {
if len(a) == 0 {
return nil
}
mid := len(a) / 2
return &TreeNode{Val: a[mid], Left: build(a[:mid]), Right: build(a[mid+1:])}
}
func sortedListToBST(head *ListNode) *TreeNode {
a := []int{}
for head != nil {
a, head = append(a, head.Val), head.Next
}
return build(a)
}
func main() {}
|
package skyobject
import (
"errors"
"fmt"
)
// Ref, Refs or Dynamic in updateStack
type commiter interface {
commit() (err error)
}
// track changes
type updateStack struct {
stack []commiter
contains map[commiter]struct{}
}
func (u *updateStack) init() {
u.contains = make(map[commiter]struct{})
}
func (u *updateStack) Push(ref interface{}) (err error) {
switch tt := ref.(type) {
case *Ref:
if tt.isInitialized() == false {
return errors.New("Push uninitialized Ref")
}
case *Refs:
if tt.isInitialized() == false {
return errors.New("Push uninitialized Refs")
}
case *Dynamic:
if tt.isInitialized() == false {
return errors.New("Push uninitialized Dynamic")
}
default:
return fmt.Errorf("push invalid type of reference %T", ref)
}
cm, ok := ref.(commiter)
if !ok {
panic(fmt.Errorf("%T does not implements commiter interface", ref))
}
if _, ok := u.contains[cm]; ok {
return // already have
}
u.stack = append(u.stack, cm)
u.contains[cm] = struct{}{}
return
}
// Push pointer to Ref, Refs or Dynamic to track changes
func (p *Pack) Push(ref interface{}) (err error) {
if p.flags&ViewOnly != 0 {
return ErrViewOnlyTree
}
return p.updateStack.Push(ref)
}
func (u updateStack) Pop() (last interface{}) {
if len(u.stack) == 0 {
return
}
last = u.stack[len(u.stack)-1]
u.stack[len(u.stack)-1] = nil // for golagn GC
u.stack = u.stack[len(u.stack)-1:]
delete(u.contains, last.(commiter))
return
}
// Pop last element to undo last Push (e.g. don't
// track changes for last element pushed)
func (p *Pack) Pop() (last interface{}) {
if p.flags&ViewOnly != 0 {
return
}
return p.updateStack.Pop()
}
func (u *updateStack) Commit() (err error) {
for i := len(u.stack) - 1; i >= 0; i-- {
if err = u.stack[i].commit(); err != nil {
u.stack = u.stack[:i]
}
delete(u.contains, u.stack[i])
u.stack[i] = nil // golang GC
}
u.stack = u.stack[:0]
return
}
// Commit all unsaved chagnes and clear the stack.
// It stops on first error. If there is an error then
// the stack will be cleared before the erroneous
// reference
func (p *Pack) Commit() (err error) {
if p.flags&ViewOnly != 0 {
return ErrViewOnlyTree
}
return p.updateStack.Commit()
}
func (u *updateStack) ClearStack() {
for i := range u.stack {
u.stack[i] = nil
}
u.stack = u.stack[:0]
u.contains = make(map[commiter]struct{})
}
// ClearStack clears the stack
func (p *Pack) ClearStack() {
if p.flags&ViewOnly != 0 {
return
}
p.updateStack.ClearStack()
}
|
package sortedSet_test
import (
"math/rand"
"testing"
"github.com/lleo/go-functional-collections/key"
"github.com/lleo/go-functional-collections/sortedSet"
)
func buildKeys(numKeys, numKeysXtra int) ([]key.Sort, []key.Sort) {
var keys = make([]key.Sort, numKeys+numKeysXtra)
for i := 0; i < numKeys+numKeysXtra; i++ {
keys[i] = key.Int(i)
}
//randomize keys
for i := len(keys) - 1; i > 0; i-- {
var j = rand.Intn(i + 1)
keys[i], keys[j] = keys[j], keys[i]
}
var xtra = keys[len(keys)-numKeysXtra:]
keys = keys[:len(keys)-numKeysXtra]
return keys, xtra
}
func buildSet(keys []key.Sort) *sortedSet.Set {
var s = sortedSet.New()
for _, key := range keys {
s = s.Set(key)
}
return s
}
const NumKeys10 = 1 * 10
const NumKeys100 = 1 * 100
const NumKeys1M = 1 * 1000
const NumKeys10M = 10 * 1000
const NumKeys100M = 100 * 1000
const NumKeys1MM = 1 * 1000 * 1000
const NumKeys10MM = 10 * 1000 * 1000
const NumKeysExtra10 = 2 * (NumKeys10 / 10)
const NumKeysExtra100 = 2 * (NumKeys100 / 10)
const NumKeysExtra1M = 2 * (NumKeys1M / 10)
const NumKeysExtra10M = 2 * (NumKeys10M / 10)
const NumKeysExtra100M = 20 * (NumKeys100M / 10)
const NumKeysExtra1MM = 20 * (NumKeys1MM / 10)
const NumKeysExtra10MM = 20 * (NumKeys10MM / 10)
var SSet10 *sortedSet.Set
var SSet100 *sortedSet.Set
var SSet1M *sortedSet.Set
var SSet10M *sortedSet.Set
var SSet100M *sortedSet.Set
var SSet1MM *sortedSet.Set
var SSet10MM *sortedSet.Set
var XtraKeys10 []key.Sort
var XtraKeys100 []key.Sort
var XtraKeys1M []key.Sort
var XtraKeys10M []key.Sort
var XtraKeys100M []key.Sort
var XtraKeys1MM []key.Sort
var XtraKeys10MM []key.Sort
func BenchmarkSetOne10(b *testing.B) {
//log.Printf("BenchmarkSetOne10: called b.N=%d\n", b.N)
var keys, XtraKeys10 []key.Sort
if SSet10 == nil || XtraKeys10 == nil {
//log.Println("Generating Sset10 & XtraKeys10")
keys, XtraKeys10 = buildKeys(NumKeys10, NumKeysExtra10)
SSet10 = buildSet(keys)
}
var s = SSet10
var xtraKeys = XtraKeys10
b.ResetTimer()
for i := 0; i < b.N; i++ {
var i = rand.Int() % NumKeysExtra10
_ = s.Set(xtraKeys[i])
}
}
func BenchmarkSetOne100(b *testing.B) {
//log.Printf("BenchmarkSetOne100: called b.N=%d\n", b.N)
var keys, XtraKeys100 []key.Sort
if SSet100 == nil || XtraKeys100 == nil {
//log.Println("Generating Sset100 & XtraKeys100")
keys, XtraKeys100 = buildKeys(NumKeys100, NumKeysExtra100)
SSet100 = buildSet(keys)
}
var s = SSet100
var xtraKeys = XtraKeys100
b.ResetTimer()
for i := 0; i < b.N; i++ {
var i = rand.Int() % NumKeysExtra100
_ = s.Set(xtraKeys[i])
}
}
func BenchmarkSetOne1M(b *testing.B) {
//log.Printf("BenchmarkSetOne1M: called b.N=%d\n", b.N)
var keys, XtraKeys1M []key.Sort
if SSet1M == nil || XtraKeys1M == nil {
//log.Println("Generating Sset1M & XtraKeys1M")
keys, XtraKeys1M = buildKeys(NumKeys1M, NumKeysExtra1M)
SSet1M = buildSet(keys)
}
var s = SSet1M
var xtraKeys = XtraKeys1M
b.ResetTimer()
for i := 0; i < b.N; i++ {
var i = rand.Int() % NumKeysExtra1M
_ = s.Set(xtraKeys[i])
}
}
func BenchmarkSetOne10M(b *testing.B) {
//log.Printf("BenchmarkSetOne10M: called b.N=%d\n", b.N)
var keys, XtraKeys10M []key.Sort
if SSet10M == nil || XtraKeys10M == nil {
//log.Println("Generating Sset10M & XtraKeys10M")
keys, XtraKeys10M = buildKeys(NumKeys10M, NumKeysExtra10M)
SSet10M = buildSet(keys)
}
var s = SSet10M
var xtraKeys = XtraKeys10M
b.ResetTimer()
for i := 0; i < b.N; i++ {
var i = rand.Int() % NumKeysExtra10M
_ = s.Set(xtraKeys[i])
}
}
func BenchmarkSetOne100M(b *testing.B) {
//log.Printf("BenchmarkSetOne100M: called b.N=%d\n", b.N)
var keys, XtraKeys100M []key.Sort
if SSet100M == nil || XtraKeys100M == nil {
//log.Println("Generating Sset100M & XtraKeys100M")
keys, XtraKeys100M = buildKeys(NumKeys100M, NumKeysExtra100M)
SSet100M = buildSet(keys)
}
var s = SSet100M
var xtraKeys = XtraKeys100M
b.ResetTimer()
for i := 0; i < b.N; i++ {
var i = rand.Int() % NumKeysExtra100M
_ = s.Set(xtraKeys[i])
}
}
func BenchmarkSetOne1MM(b *testing.B) {
//log.Printf("BenchmarkSetOne1MM: called b.N=%d\n", b.N)
var keys, XtraKeys1MM []key.Sort
if SSet1MM == nil || XtraKeys1MM == nil {
//log.Println("Generating Sset1MM & XtraKeys1MM")
keys, XtraKeys1MM = buildKeys(NumKeys1MM, NumKeysExtra1MM)
SSet1MM = buildSet(keys)
}
var s = SSet1MM
var xtraKeys = XtraKeys1MM
b.ResetTimer()
for i := 0; i < b.N; i++ {
var i = rand.Int() % NumKeysExtra1MM
_ = s.Set(xtraKeys[i])
}
}
func BenchmarkSetOne10MM(b *testing.B) {
//log.Printf("BenchmarkSetOne10MM: called b.N=%d\n", b.N)
var keys, XtraKeys10MM []key.Sort
if SSet10MM == nil || XtraKeys10MM == nil {
//log.Println("Generating Sset10MM & XtraKeys10MM")
keys, XtraKeys10MM = buildKeys(NumKeys10MM, NumKeysExtra10MM)
SSet10MM = buildSet(keys)
}
var s = SSet10MM
var xtraKeys = XtraKeys10MM
b.ResetTimer()
for i := 0; i < b.N; i++ {
//START HERE
var i = rand.Int() % NumKeysExtra10MM
_ = s.Set(xtraKeys[i])
}
}
|
package faker
// Hackier Interface
type Hackier interface {
Abbreviation() string
Adjective() string
Noun() string
Verb() string
Ingverb() string
Phrase() string
}
// Hacker struct
type Hacker struct {
*Fake
}
// Abbreviation Returns an abbreviation
func (h *Hacker) Abbreviation() string {
return h.pick(hackerPrefix + "/abbreviation")
}
// Adjective Returns an adjective
func (h *Hacker) Adjective() string {
return h.pick(hackerPrefix + "/adjective")
}
// Noun Returns a noun
func (h *Hacker) Noun() string {
return h.pick(hackerPrefix + "/noun")
}
// Verb Returns a verb
func (h *Hacker) Verb() string {
return h.pick(hackerPrefix + "/verb")
}
// Ingverb Returns an ingverb
func (h *Hacker) Ingverb() string {
return h.pick(hackerPrefix + "/ingverb")
}
// Phrase Returns a phrase made up of other hacker verbs
func (h *Hacker) Phrase() string {
hackerData := map[string]string{
"abbreviation": h.Hacker().Abbreviation(),
"adjective": h.Hacker().Adjective(),
"ingverb": h.Hacker().Ingverb(),
"noun": h.Hacker().Noun(),
"verb": h.Hacker().Verb(),
}
phrase := h.pick(hackerPrefix + "/phrase")
return mustache(phrase, hackerData)
}
|
package presigner
import (
"context"
"sync"
"time"
"github.com/Cloud-Foundations/golib/pkg/log"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/arn"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/sts"
)
const (
RefreshOnDemand = iota
RefreshAutomatically
)
type Params struct {
// Optional parameters.
AwsConfig *aws.Config
Logger log.DebugLogger
RefreshPolicy uint // Default is RefreshOnDemand.
StsClient *sts.Client
StsPresignClient *sts.PresignClient
}
type Presigner interface {
GetCallerARN() arn.ARN
PresignGetCallerIdentity(ctx context.Context) (
*v4.PresignedHTTPRequest, error)
}
type presignerT struct {
callerArn arn.ARN
params Params
mutex sync.Mutex // Protect everything below.
presignedExpiration time.Time
presignedRequest *v4.PresignedHTTPRequest
}
// Interface checks.
var _ Presigner = (*presignerT)(nil)
// New will create a presigner client which caches presigned URLs until they
// expire (~15 minutes).
func New(params Params) (*presignerT, error) {
return newPresigner(params)
}
// GetCallerARN will get the normalised ARN of the caller. The ARN will have the
// form: arn:aws:iam::$AccountId:role/$RoleName
func (p *presignerT) GetCallerARN() arn.ARN { return p.callerArn }
// PresignGetCallerIdentity will generate a presigned URL (token) which may be
// used to verify the AWS IAM identity of the token bearer.
func (p *presignerT) PresignGetCallerIdentity(ctx context.Context) (
*v4.PresignedHTTPRequest, error) {
return p.presignGetCallerIdentity(ctx)
}
|
package repo
import (
"errors"
"fmt"
"os"
"path"
"strings"
"github.com/ghodss/yaml"
"github.com/Clever/catapult/gen-go/models"
)
// DiscoverApplications finds any launch config files in the specified
// directory and returns a map with the application name as the key and
// the corresponding launch config file as the value. DB launch configs
// are ignored.
func DiscoverApplications(dir string) (map[string]*models.LaunchConfig, error) {
fe, err := os.ReadDir(dir)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, fmt.Errorf("directory %s not found: %w", dir, err)
}
return nil, fmt.Errorf("failed to read launch directory: %v", err)
}
m := map[string]*models.LaunchConfig{}
for _, f := range fe {
if f.IsDir() {
continue
}
if strings.HasSuffix(f.Name(), "-db.yml") {
continue
}
if path.Ext(f.Name()) != ".yml" {
continue
}
bs, err := os.ReadFile(path.Join(dir, f.Name()))
if err != nil {
return nil, fmt.Errorf("failed to read %s: %v", f.Name(), err)
}
lc := models.LaunchConfig{}
if err := yaml.Unmarshal(bs, &lc); err != nil {
return nil, fmt.Errorf("failed to unmarshal yaml in %s: %v", f.Name(), err)
}
m[strings.TrimSuffix(f.Name(), ".yml")] = &lc
}
return m, nil
}
// Dockerfile returns the dockerfile name specified in the launch config
// if any is present, otherwise it returns an empty string.
func Dockerfile(lc *models.LaunchConfig) string {
if lc.Build != nil && lc.Build.Docker != nil {
return lc.Build.Docker.File
}
return ""
}
// IsDockerRunType returns true if the launch config specifies a run
// type of docker.
func IsDockerRunType(lc *models.LaunchConfig) bool {
if r := lc.Run; r != nil {
switch r.Type {
case models.RunTypeDocker:
return true
// for legacy support reasons, an empty run type is treated as a
// run type of docker.
case "":
return true
default:
return false
}
}
// no run object also counts as docker.
return true
}
// IsLambdaRunType returns true if the launch config specifies a run
// type of lambda.
func IsLambdaRunType(lc *models.LaunchConfig) bool {
if r := lc.Run; r != nil {
switch r.Type {
case models.RunTypeLambda:
return true
default:
return false
}
}
return false
}
// ArtifactName returns the correct artifact name for the application.
// The default pattern is the app name. There is an optional launch
// config override in order to enable sharing one artifact between
// multiple applications. This may happen with for example, sso and
// non-sso, where the application is the same or only differs at run
// time based on environmental configuration.
func ArtifactName(appName string, lc *models.LaunchConfig) string {
artifactName := appName
if lc.Build != nil && lc.Build.Artifact != nil && lc.Build.Artifact.Name != "" {
artifactName = lc.Build.Artifact.Name
}
return artifactName
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package storage
import (
"bytes"
"context"
"fmt"
"io"
"math/rand"
"strconv"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/uint128"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/datadriven"
"github.com/cockroachdb/errors"
)
// TestMVCCHistories verifies that sequences of MVCC reads and writes
// perform properly.
//
// The input files use the following DSL:
//
// txn_begin t=<name> [ts=<int>[,<int>]] [globalUncertaintyLimit=<int>[,<int>]]
// txn_remove t=<name>
// txn_restart t=<name>
// txn_update t=<name> t2=<name>
// txn_step t=<name> [n=<int>]
// txn_advance t=<name> ts=<int>[,<int>]
// txn_status t=<name> status=<txnstatus>
//
// resolve_intent t=<name> k=<key> [status=<txnstatus>]
// check_intent k=<key> [none]
//
// cput [t=<name>] [ts=<int>[,<int>]] [resolve [status=<txnstatus>]] k=<key> v=<string> [raw] [cond=<string>]
// del [t=<name>] [ts=<int>[,<int>]] [resolve [status=<txnstatus>]] k=<key>
// del_range [t=<name>] [ts=<int>[,<int>]] [resolve [status=<txnstatus>]] k=<key> [end=<key>] [max=<max>] [returnKeys]
// get [t=<name>] [ts=<int>[,<int>]] [resolve [status=<txnstatus>]] k=<key> [inconsistent] [tombstones] [failOnMoreRecent] [localUncertaintyLimit=<int>[,<int>]]
// increment [t=<name>] [ts=<int>[,<int>]] [resolve [status=<txnstatus>]] k=<key> [inc=<val>]
// put [t=<name>] [ts=<int>[,<int>]] [resolve [status=<txnstatus>]] k=<key> v=<string> [raw]
// scan [t=<name>] [ts=<int>[,<int>]] [resolve [status=<txnstatus>]] k=<key> [end=<key>] [inconsistent] [tombstones] [reverse] [failOnMoreRecent] [localUncertaintyLimit=<int>[,<int>]] [max=<max>] [targetbytes=<target>]
//
// merge [ts=<int>[,<int>]] k=<key> v=<string> [raw]
//
// clear_range k=<key> end=<key>
//
// Where `<key>` can be a simple string, or a string
// prefixed by the following characters:
//
// - `=foo` means exactly key `foo`
// - `+foo` means `Key(foo).Next()`
// - `-foo` means `Key(foo).PrefixEnd()`
// - `%foo` means `append(LocalRangePrefix, "foo")`
//
// Additionally, the pseudo-command `with` enables sharing
// a group of arguments between multiple commands, for example:
// with t=A
// txn_begin
// with k=a
// put v=b
// resolve_intent
// Really means:
// txn_begin t=A
// put v=b k=a t=A
// resolve_intent k=a t=A
//
func TestMVCCHistories(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
// Everything reads/writes under the same prefix.
span := roachpb.Span{Key: keys.LocalMax, EndKey: roachpb.KeyMax}
datadriven.Walk(t, "testdata/mvcc_histories", func(t *testing.T, path string) {
// Default to random behavior wrt cluster version and separated
// intents.
oldClusterVersion := rand.Intn(2) == 0
enabledSeparated := rand.Intn(2) == 0
overridden := false
if strings.Contains(path, "_disallow_separated") {
oldClusterVersion = true
enabledSeparated = false
overridden = true
}
if strings.Contains(path, "_allow_separated") {
oldClusterVersion = false
enabledSeparated = false
overridden = true
}
if strings.Contains(path, "_enable_separated") {
oldClusterVersion = false
enabledSeparated = true
overridden = true
}
if !overridden {
log.Infof(context.Background(),
"randomly setting oldClusterVersion: %t, enableSeparated: %t",
oldClusterVersion, enabledSeparated)
}
settings := makeSettingsForSeparatedIntents(oldClusterVersion, enabledSeparated)
// We start from a clean slate in every test file.
engine := createTestPebbleEngineWithSettings(settings)
defer engine.Close()
reportDataEntries := func(buf *bytes.Buffer) error {
hasData := false
err := engine.MVCCIterate(span.Key, span.EndKey, MVCCKeyAndIntentsIterKind, func(r MVCCKeyValue) error {
hasData = true
if r.Key.Timestamp.IsEmpty() {
// Meta is at timestamp zero.
meta := enginepb.MVCCMetadata{}
if err := protoutil.Unmarshal(r.Value, &meta); err != nil {
fmt.Fprintf(buf, "meta: %v -> error decoding proto from %v: %v\n", r.Key, r.Value, err)
} else {
fmt.Fprintf(buf, "meta: %v -> %+v\n", r.Key, &meta)
}
} else {
fmt.Fprintf(buf, "data: %v -> %s\n", r.Key, roachpb.Value{RawBytes: r.Value}.PrettyPrint())
}
return nil
})
if !hasData {
buf.WriteString("<no data>\n")
}
return err
}
e := newEvalCtx(ctx, engine)
datadriven.RunTest(t, path, func(t *testing.T, d *datadriven.TestData) string {
// We'll be overriding cmd/cmdargs below, because the
// datadriven reader does not know about sub-commands.
defer func(pos, cmd string, cmdArgs []datadriven.CmdArg) {
d.Pos = pos
d.Cmd = cmd
d.CmdArgs = cmdArgs
}(d.Pos, d.Cmd, d.CmdArgs)
// The various evalCtx helpers want access to the current test
// and testdata structs.
e.t = t
e.td = d
switch d.Cmd {
case "skip":
if len(d.CmdArgs) == 0 {
skip.IgnoreLint(e.t, "skipped")
}
return d.Expected
case "run":
// Syntax: run [trace] [error]
// (other words - in particular "ok" - are accepted but ignored)
//
// "run" executes a script of zero or more operations from
// the commands library defined below.
// It stops upon the first error encountered, if any.
//
// Options:
// "trace" means detail each operation in the output.
// "error" means expect an error to occur. The specific error type/
// message to expect is spelled out in the expected output.
//
trace := false
if e.hasArg("trace") {
trace = true
}
expectError := false
if e.hasArg("error") {
expectError = true
}
// buf will accumulate the actual output, which the
// datadriven driver will use to compare to the expected
// output.
var buf bytes.Buffer
e.results.buf = &buf
e.results.traceIntentWrites = trace
// foundErr remembers which error was last encountered while
// executing the script under "run".
var foundErr error
// pos is the original <file>:<lineno> prefix computed by
// datadriven. It points to the top "run" command itself.
// We editing d.Pos in-place below by extending `pos` upon
// each new line of the script.
pos := d.Pos
// dataChange indicates whether some command in the script
// has modified the stored data. When this becomes true, the
// current content of storage is printed in the results
// buffer at the end.
dataChange := false
// txnChange indicates whether some command has modified
// a transaction object. When set, the last modified txn
// object is reported in the result buffer at the end.
txnChange := false
reportResults := func(printTxn, printData bool) {
if printTxn && e.results.txn != nil {
fmt.Fprintf(&buf, "txn: %v\n", e.results.txn)
}
if printData {
err := reportDataEntries(&buf)
if err != nil {
if foundErr == nil {
// Handle the error below.
foundErr = err
} else {
fmt.Fprintf(&buf, "error reading data: (%T:) %v\n", err, err)
}
}
}
}
// sharedCmdArgs is updated by "with" pseudo-commands,
// to pre-populate common arguments for the following
// indented commands.
var sharedCmdArgs []datadriven.CmdArg
// The lines of the script under "run".
lines := strings.Split(d.Input, "\n")
for i, line := range lines {
if short := strings.TrimSpace(line); short == "" || strings.HasPrefix(short, "#") {
// Comment or empty line. Do nothing.
continue
}
// Compute a line prefix, to clarify error message. We
// prefix a newline character because some text editor do
// not know how to jump to the location of an error if
// there are multiple file:line prefixes on the same line.
d.Pos = fmt.Sprintf("\n%s: (+%d)", pos, i+1)
// Trace the execution in testing.T, to clarify where we
// are in case an error occurs.
log.Infof(context.Background(), "TestMVCCHistories:\n\t%s: %s", d.Pos, line)
// Decompose the current script line.
var err error
d.Cmd, d.CmdArgs, err = datadriven.ParseLine(line)
if err != nil {
e.t.Fatalf("%s: %v", d.Pos, err)
}
// Expand "with" commands:
// with t=A
// txn_begin
// resolve_intent k=a
// is equivalent to:
// txn_begin t=A
// resolve_intent k=a t=A
isIndented := strings.TrimLeft(line, " \t") != line
if d.Cmd == "with" {
if !isIndented {
// Reset shared args.
sharedCmdArgs = d.CmdArgs
} else {
// Prefix shared args. We use prefix so that the
// innermost "with" can override/shadow the outermost
// "with".
sharedCmdArgs = append(d.CmdArgs, sharedCmdArgs...)
}
continue
} else if isIndented {
// line is indented. Inherit arguments.
if len(sharedCmdArgs) == 0 {
// sanity check.
e.Fatalf("indented command without prior 'with': %s", line)
}
// We prepend the args that are provided on the command
// itself so it's possible to override those provided
// via "with".
d.CmdArgs = append(d.CmdArgs, sharedCmdArgs...)
} else {
// line is not indented. Clear shared arguments.
sharedCmdArgs = nil
}
cmd := e.getCmd()
txnChange = txnChange || cmd.typ == typTxnUpdate
dataChange = dataChange || cmd.typ == typDataUpdate
if trace {
// If tracing is also requested by the datadriven input,
// we'll trace the statement in the actual results too.
fmt.Fprintf(&buf, ">> %s", d.Cmd)
for i := range d.CmdArgs {
fmt.Fprintf(&buf, " %s", &d.CmdArgs[i])
}
buf.WriteByte('\n')
}
// Run the command.
foundErr = cmd.fn(e)
if trace {
// If tracing is enabled, we report the intermediate results
// after each individual step in the script.
// This may modify foundErr too.
reportResults(cmd.typ == typTxnUpdate, cmd.typ == typDataUpdate)
}
if foundErr != nil {
// An error occurred. Stop the script prematurely.
break
}
}
// End of script.
if !trace {
// If we were not tracing, no results were printed yet. Do it now.
if txnChange || dataChange {
buf.WriteString(">> at end:\n")
}
reportResults(txnChange, dataChange)
}
signalError := e.t.Errorf
if txnChange || dataChange {
// We can't recover from an error and continue
// to proceed further tests, because the state
// may have changed from what the test may be expecting.
signalError = e.t.Fatalf
}
// Check for errors.
if foundErr == nil && expectError {
signalError("%s: expected error, got success", d.Pos)
return d.Expected
} else if foundErr != nil {
if expectError {
fmt.Fprintf(&buf, "error: (%T:) %v\n", foundErr, foundErr)
} else /* !expectError */ {
signalError("%s: expected success, found: (%T:) %v", d.Pos, foundErr, foundErr)
return d.Expected
}
}
// We're done. Report the actual results and errors to the
// datadriven executor.
return buf.String()
default:
e.t.Errorf("%s: unknown command: %s", d.Pos, d.Cmd)
return d.Expected
}
})
})
}
// getCmd retrieves the cmd entry for the current script line.
func (e *evalCtx) getCmd() cmd {
e.t.Helper()
c, ok := commands[e.td.Cmd]
if !ok {
e.Fatalf("unknown command: %s", e.td.Cmd)
}
return c
}
// cmd represents one supported script command.
type cmd struct {
typ cmdType
fn func(e *evalCtx) error
}
type cmdType int
const (
typReadOnly cmdType = iota
typTxnUpdate
typDataUpdate
)
// commands is the list of all supported script commands.
var commands = map[string]cmd{
"txn_advance": {typTxnUpdate, cmdTxnAdvance},
"txn_begin": {typTxnUpdate, cmdTxnBegin},
"txn_ignore_seqs": {typTxnUpdate, cmdTxnIgnoreSeqs},
"txn_remove": {typTxnUpdate, cmdTxnRemove},
"txn_restart": {typTxnUpdate, cmdTxnRestart},
"txn_status": {typTxnUpdate, cmdTxnSetStatus},
"txn_step": {typTxnUpdate, cmdTxnStep},
"txn_update": {typTxnUpdate, cmdTxnUpdate},
"resolve_intent": {typDataUpdate, cmdResolveIntent},
// TODO(nvanbenschoten): test "resolve_intent_range".
"check_intent": {typReadOnly, cmdCheckIntent},
"clear_range": {typDataUpdate, cmdClearRange},
"cput": {typDataUpdate, cmdCPut},
"del": {typDataUpdate, cmdDelete},
"del_range": {typDataUpdate, cmdDeleteRange},
"get": {typReadOnly, cmdGet},
"increment": {typDataUpdate, cmdIncrement},
"merge": {typDataUpdate, cmdMerge},
"put": {typDataUpdate, cmdPut},
"scan": {typReadOnly, cmdScan},
}
func cmdTxnAdvance(e *evalCtx) error {
txn := e.getTxn(mandatory)
ts := e.getTs(txn)
if ts.Less(txn.ReadTimestamp) {
e.Fatalf("cannot advance txn to earlier (%s) than its ReadTimestamp (%s)",
ts, txn.ReadTimestamp)
}
txn.WriteTimestamp = ts
e.results.txn = txn
return nil
}
func cmdTxnBegin(e *evalCtx) error {
var txnName string
e.scanArg("t", &txnName)
ts := e.getTs(nil)
globalUncertaintyLimit := e.getTsWithName(nil, "globalUncertaintyLimit")
key := roachpb.KeyMin
if e.hasArg("k") {
key = e.getKey()
}
txn, err := e.newTxn(txnName, ts, globalUncertaintyLimit, key)
e.results.txn = txn
return err
}
func cmdTxnIgnoreSeqs(e *evalCtx) error {
txn := e.getTxn(mandatory)
seql := e.getList("seqs")
is := []enginepb.IgnoredSeqNumRange{}
for _, s := range seql {
parts := strings.Split(s, "-")
if len(parts) != 2 {
e.Fatalf("syntax error: expected 'a-b', got: '%s'", s)
}
a, err := strconv.ParseInt(parts[0], 10, 32)
if err != nil {
e.Fatalf("%v", err)
}
b, err := strconv.ParseInt(parts[1], 10, 32)
if err != nil {
e.Fatalf("%v", err)
}
is = append(is, enginepb.IgnoredSeqNumRange{Start: enginepb.TxnSeq(a), End: enginepb.TxnSeq(b)})
}
txn.IgnoredSeqNums = is
e.results.txn = txn
return nil
}
func cmdTxnRemove(e *evalCtx) error {
txn := e.getTxn(mandatory)
delete(e.txns, txn.Name)
e.results.txn = nil
return nil
}
func cmdTxnRestart(e *evalCtx) error {
txn := e.getTxn(mandatory)
ts := e.getTs(txn)
up := roachpb.NormalUserPriority
tp := enginepb.MinTxnPriority
txn.Restart(up, tp, ts)
e.results.txn = txn
return nil
}
func cmdTxnSetStatus(e *evalCtx) error {
txn := e.getTxn(mandatory)
status := e.getTxnStatus()
txn.Status = status
e.results.txn = txn
return nil
}
func cmdTxnStep(e *evalCtx) error {
txn := e.getTxn(mandatory)
n := 1
if e.hasArg("seq") {
e.scanArg("seq", &n)
txn.Sequence = enginepb.TxnSeq(n)
} else {
if e.hasArg("n") {
e.scanArg("n", &n)
}
txn.Sequence += enginepb.TxnSeq(n)
}
e.results.txn = txn
return nil
}
func cmdTxnUpdate(e *evalCtx) error {
txn := e.getTxn(mandatory)
var txnName2 string
e.scanArg("t2", &txnName2)
txn2, err := e.lookupTxn(txnName2)
if err != nil {
e.Fatalf("%v", err)
}
txn.Update(txn2)
e.results.txn = txn
return nil
}
type intentPrintingReadWriter struct {
ReadWriter
buf io.Writer
}
func (rw intentPrintingReadWriter) PutIntent(
ctx context.Context,
key roachpb.Key,
value []byte,
state PrecedingIntentState,
txnDidNotUpdateMeta bool,
txnUUID uuid.UUID,
) (int, error) {
fmt.Fprintf(rw.buf, "called PutIntent(%v, _, %v, TDNUM(%t), %v)\n",
key, state, txnDidNotUpdateMeta, txnUUID)
return rw.ReadWriter.PutIntent(ctx, key, value, state, txnDidNotUpdateMeta, txnUUID)
}
func (rw intentPrintingReadWriter) ClearIntent(
key roachpb.Key, state PrecedingIntentState, txnDidNotUpdateMeta bool, txnUUID uuid.UUID,
) (int, error) {
fmt.Fprintf(rw.buf, "called ClearIntent(%v, %v, TDNUM(%t), %v)\n",
key, state, txnDidNotUpdateMeta, txnUUID)
return rw.ReadWriter.ClearIntent(key, state, txnDidNotUpdateMeta, txnUUID)
}
func (e *evalCtx) tryWrapForIntentPrinting(rw ReadWriter) ReadWriter {
if e.results.traceIntentWrites {
return intentPrintingReadWriter{ReadWriter: rw, buf: e.results.buf}
}
return rw
}
func cmdResolveIntent(e *evalCtx) error {
txn := e.getTxn(mandatory)
key := e.getKey()
status := e.getTxnStatus()
return e.resolveIntent(e.tryWrapForIntentPrinting(e.engine), key, txn, status)
}
func (e *evalCtx) resolveIntent(
rw ReadWriter, key roachpb.Key, txn *roachpb.Transaction, resolveStatus roachpb.TransactionStatus,
) error {
intent := roachpb.MakeLockUpdate(txn, roachpb.Span{Key: key})
intent.Status = resolveStatus
_, err := MVCCResolveWriteIntent(e.ctx, rw, nil, intent)
return err
}
func cmdCheckIntent(e *evalCtx) error {
key := e.getKey()
wantIntent := true
if e.hasArg("none") {
wantIntent = false
}
metaKey := mvccKey(key)
var meta enginepb.MVCCMetadata
ok, _, _, err := e.engine.MVCCGetProto(metaKey, &meta)
if err != nil {
return err
}
if !ok && wantIntent {
return errors.Newf("meta: %v -> expected intent, found none", key)
}
if ok {
fmt.Fprintf(e.results.buf, "meta: %v -> %+v\n", key, &meta)
if !wantIntent {
return errors.Newf("meta: %v -> expected no intent, found one", key)
}
}
return nil
}
func cmdClearRange(e *evalCtx) error {
key, endKey := e.getKeyRange()
return e.engine.ClearMVCCRangeAndIntents(key, endKey)
}
func cmdCPut(e *evalCtx) error {
txn := e.getTxn(optional)
ts := e.getTs(txn)
key := e.getKey()
val := e.getVal()
// Condition val is optional.
var expVal []byte
if e.hasArg("cond") {
rexpVal := e.getValInternal("cond")
expVal = rexpVal.TagAndDataBytes()
}
behavior := CPutFailIfMissing
if e.hasArg("allow_missing") {
behavior = CPutAllowIfMissing
}
resolve, resolveStatus := e.getResolve()
return e.withWriter("cput", func(rw ReadWriter) error {
if err := MVCCConditionalPut(e.ctx, rw, nil, key, ts, val, expVal, behavior, txn); err != nil {
return err
}
if resolve {
return e.resolveIntent(rw, key, txn, resolveStatus)
}
return nil
})
}
func cmdDelete(e *evalCtx) error {
txn := e.getTxn(optional)
key := e.getKey()
ts := e.getTs(txn)
resolve, resolveStatus := e.getResolve()
return e.withWriter("del", func(rw ReadWriter) error {
if err := MVCCDelete(e.ctx, rw, nil, key, ts, txn); err != nil {
return err
}
if resolve {
return e.resolveIntent(rw, key, txn, resolveStatus)
}
return nil
})
}
func cmdDeleteRange(e *evalCtx) error {
txn := e.getTxn(optional)
key, endKey := e.getKeyRange()
ts := e.getTs(txn)
returnKeys := e.hasArg("returnKeys")
max := 0
if e.hasArg("max") {
e.scanArg("max", &max)
}
resolve, resolveStatus := e.getResolve()
return e.withWriter("del_range", func(rw ReadWriter) error {
deleted, resumeSpan, num, err := MVCCDeleteRange(e.ctx, rw, nil, key, endKey, int64(max), ts, txn, returnKeys)
if err != nil {
return err
}
fmt.Fprintf(e.results.buf, "del_range: %v-%v -> deleted %d key(s)\n", key, endKey, num)
for _, key := range deleted {
fmt.Fprintf(e.results.buf, "del_range: returned %v\n", key)
}
if resumeSpan != nil {
fmt.Fprintf(e.results.buf, "del_range: resume span [%s,%s)\n", resumeSpan.Key, resumeSpan.EndKey)
}
if resolve {
return e.resolveIntent(rw, key, txn, resolveStatus)
}
return nil
})
}
func cmdGet(e *evalCtx) error {
txn := e.getTxn(optional)
key := e.getKey()
ts := e.getTs(txn)
opts := MVCCGetOptions{Txn: txn}
if e.hasArg("inconsistent") {
opts.Inconsistent = true
opts.Txn = nil
}
if e.hasArg("tombstones") {
opts.Tombstones = true
}
if e.hasArg("failOnMoreRecent") {
opts.FailOnMoreRecent = true
}
if e.hasArg("localUncertaintyLimit") {
opts.LocalUncertaintyLimit = e.getTsWithName(nil, "localUncertaintyLimit")
}
val, intent, err := MVCCGet(e.ctx, e.engine, key, ts, opts)
// NB: the error is returned below. This ensures the test can
// ascertain no result is populated in the intent when an error
// occurs.
if intent != nil {
fmt.Fprintf(e.results.buf, "get: %v -> intent {%s}\n", key, intent.Txn)
}
if val != nil {
fmt.Fprintf(e.results.buf, "get: %v -> %v @%v\n", key, val.PrettyPrint(), val.Timestamp)
} else {
fmt.Fprintf(e.results.buf, "get: %v -> <no data>\n", key)
}
return err
}
func cmdIncrement(e *evalCtx) error {
txn := e.getTxn(optional)
ts := e.getTs(txn)
key := e.getKey()
inc := int64(1)
if e.hasArg("inc") {
var incI int
e.scanArg("inc", &incI)
inc = int64(incI)
}
resolve, resolveStatus := e.getResolve()
return e.withWriter("increment", func(rw ReadWriter) error {
curVal, err := MVCCIncrement(e.ctx, rw, nil, key, ts, txn, inc)
if err != nil {
return err
}
fmt.Fprintf(e.results.buf, "inc: current value = %d\n", curVal)
if resolve {
return e.resolveIntent(rw, key, txn, resolveStatus)
}
return nil
})
}
func cmdMerge(e *evalCtx) error {
key := e.getKey()
var value string
e.scanArg("v", &value)
var val roachpb.Value
if e.hasArg("raw") {
val.RawBytes = []byte(value)
} else {
val.SetString(value)
}
ts := e.getTs(nil)
return e.withWriter("merge", func(rw ReadWriter) error {
return MVCCMerge(e.ctx, rw, nil, key, ts, val)
})
}
func cmdPut(e *evalCtx) error {
txn := e.getTxn(optional)
ts := e.getTs(txn)
key := e.getKey()
val := e.getVal()
resolve, resolveStatus := e.getResolve()
return e.withWriter("put", func(rw ReadWriter) error {
if err := MVCCPut(e.ctx, rw, nil, key, ts, val, txn); err != nil {
return err
}
if resolve {
return e.resolveIntent(rw, key, txn, resolveStatus)
}
return nil
})
}
func cmdScan(e *evalCtx) error {
txn := e.getTxn(optional)
key, endKey := e.getKeyRange()
ts := e.getTs(txn)
opts := MVCCScanOptions{Txn: txn}
if e.hasArg("inconsistent") {
opts.Inconsistent = true
opts.Txn = nil
}
if e.hasArg("tombstones") {
opts.Tombstones = true
}
if e.hasArg("reverse") {
opts.Reverse = true
}
if e.hasArg("failOnMoreRecent") {
opts.FailOnMoreRecent = true
}
if e.hasArg("localUncertaintyLimit") {
opts.LocalUncertaintyLimit = e.getTsWithName(nil, "localUncertaintyLimit")
}
if e.hasArg("max") {
var n int
e.scanArg("max", &n)
opts.MaxKeys = int64(n)
}
if key := "targetbytes"; e.hasArg(key) {
var tb int
e.scanArg(key, &tb)
opts.TargetBytes = int64(tb)
}
res, err := MVCCScan(e.ctx, e.engine, key, endKey, ts, opts)
// NB: the error is returned below. This ensures the test can
// ascertain no result is populated in the intents when an error
// occurs.
for _, intent := range res.Intents {
fmt.Fprintf(e.results.buf, "scan: %v -> intent {%s}\n", key, intent.Txn)
}
for _, val := range res.KVs {
fmt.Fprintf(e.results.buf, "scan: %v -> %v @%v\n", val.Key, val.Value.PrettyPrint(), val.Value.Timestamp)
}
if res.ResumeSpan != nil {
fmt.Fprintf(e.results.buf, "scan: resume span [%s,%s)\n", res.ResumeSpan.Key, res.ResumeSpan.EndKey)
}
if opts.TargetBytes > 0 {
fmt.Fprintf(e.results.buf, "scan: %d bytes (target %d)\n", res.NumBytes, opts.TargetBytes)
}
if len(res.KVs) == 0 {
fmt.Fprintf(e.results.buf, "scan: %v-%v -> <no data>\n", key, endKey)
}
return err
}
// evalCtx stored the current state of the environment of a running
// script.
type evalCtx struct {
results struct {
buf io.Writer
txn *roachpb.Transaction
traceIntentWrites bool
}
ctx context.Context
engine Engine
t *testing.T
td *datadriven.TestData
txns map[string]*roachpb.Transaction
txnCounter uint128.Uint128
}
func newEvalCtx(ctx context.Context, engine Engine) *evalCtx {
return &evalCtx{
ctx: ctx,
engine: engine,
txns: make(map[string]*roachpb.Transaction),
txnCounter: uint128.FromInts(0, 1),
}
}
func (e *evalCtx) getTxnStatus() roachpb.TransactionStatus {
status := roachpb.COMMITTED
if e.hasArg("status") {
var sn string
e.scanArg("status", &sn)
s, ok := roachpb.TransactionStatus_value[sn]
if !ok {
e.Fatalf("invalid status: %s", sn)
}
status = roachpb.TransactionStatus(s)
}
return status
}
func (e *evalCtx) scanArg(key string, dests ...interface{}) {
e.t.Helper()
e.td.ScanArgs(e.t, key, dests...)
}
func (e *evalCtx) hasArg(key string) bool {
for _, c := range e.td.CmdArgs {
if c.Key == key {
return true
}
}
return false
}
func (e *evalCtx) Fatalf(format string, args ...interface{}) {
e.t.Helper()
e.td.Fatalf(e.t, format, args...)
}
func (e *evalCtx) getResolve() (bool, roachpb.TransactionStatus) {
e.t.Helper()
if !e.hasArg("resolve") {
return false, roachpb.PENDING
}
return true, e.getTxnStatus()
}
func (e *evalCtx) getTs(txn *roachpb.Transaction) hlc.Timestamp {
return e.getTsWithName(txn, "ts")
}
func (e *evalCtx) getTsWithName(txn *roachpb.Transaction, name string) hlc.Timestamp {
var ts hlc.Timestamp
if txn != nil {
ts = txn.ReadTimestamp
}
if !e.hasArg(name) {
return ts
}
var tsS string
e.scanArg(name, &tsS)
ts, err := hlc.ParseTimestamp(tsS)
if err != nil {
e.Fatalf("%v", err)
}
return ts
}
type optArg int
const (
optional optArg = iota
mandatory
)
func (e *evalCtx) getList(argName string) []string {
for _, c := range e.td.CmdArgs {
if c.Key == argName {
return c.Vals
}
}
e.Fatalf("missing argument: %s", argName)
return nil
}
func (e *evalCtx) getTxn(opt optArg) *roachpb.Transaction {
e.t.Helper()
if opt == optional && (e.hasArg("notxn") || !e.hasArg("t")) {
return nil
}
var txnName string
e.scanArg("t", &txnName)
txn, err := e.lookupTxn(txnName)
if err != nil {
e.Fatalf("%v", err)
}
return txn
}
func (e *evalCtx) withWriter(cmd string, fn func(_ ReadWriter) error) error {
var rw ReadWriter
rw = e.engine
var batch Batch
if e.hasArg("batched") {
batch = e.engine.NewBatch()
defer batch.Close()
rw = batch
}
rw = e.tryWrapForIntentPrinting(rw)
origErr := fn(rw)
if batch != nil {
batchStatus := "non-empty"
if batch.Empty() {
batchStatus = "empty"
}
fmt.Fprintf(e.results.buf, "%s: batch after write is %s\n", cmd, batchStatus)
}
if origErr != nil {
return origErr
}
if batch != nil {
return batch.Commit(true)
}
return nil
}
func (e *evalCtx) getVal() roachpb.Value { return e.getValInternal("v") }
func (e *evalCtx) getValInternal(argName string) roachpb.Value {
var value string
e.scanArg(argName, &value)
var val roachpb.Value
if e.hasArg("raw") {
val.RawBytes = []byte(value)
} else {
val.SetString(value)
}
return val
}
func (e *evalCtx) getKey() roachpb.Key {
e.t.Helper()
var keyS string
e.scanArg("k", &keyS)
return toKey(keyS)
}
func (e *evalCtx) getKeyRange() (sk, ek roachpb.Key) {
e.t.Helper()
var keyS string
e.scanArg("k", &keyS)
sk = toKey(keyS)
ek = sk.Next()
if e.hasArg("end") {
var endKeyS string
e.scanArg("end", &endKeyS)
ek = toKey(endKeyS)
}
return sk, ek
}
func (e *evalCtx) newTxn(
txnName string, ts, globalUncertaintyLimit hlc.Timestamp, key roachpb.Key,
) (*roachpb.Transaction, error) {
if _, ok := e.txns[txnName]; ok {
e.Fatalf("txn %s already open", txnName)
}
txn := &roachpb.Transaction{
TxnMeta: enginepb.TxnMeta{
ID: uuid.FromUint128(e.txnCounter),
Key: []byte(key),
WriteTimestamp: ts,
Sequence: 0,
},
Name: txnName,
ReadTimestamp: ts,
GlobalUncertaintyLimit: globalUncertaintyLimit,
Status: roachpb.PENDING,
}
e.txnCounter = e.txnCounter.Add(1)
e.txns[txnName] = txn
return txn, nil
}
func (e *evalCtx) lookupTxn(txnName string) (*roachpb.Transaction, error) {
txn, ok := e.txns[txnName]
if !ok {
e.Fatalf("txn %s not open", txnName)
}
return txn, nil
}
func toKey(s string) roachpb.Key {
switch {
case len(s) > 0 && s[0] == '+':
return roachpb.Key(s[1:]).Next()
case len(s) > 0 && s[0] == '=':
return roachpb.Key(s[1:])
case len(s) > 0 && s[0] == '-':
return roachpb.Key(s[1:]).PrefixEnd()
case len(s) > 0 && s[0] == '%':
return append(keys.LocalRangePrefix, s[1:]...)
default:
return roachpb.Key(s)
}
}
|
package os
import (
"fmt"
"os"
)
//CreateDirectory creates dir with target name
//If you want to full access dir,pass os.ModePerm as FileMode parameter
func CreateDirectory(name string, permissionBits os.FileMode) error {
if _, err := os.Stat(name); os.IsNotExist(err) {
return os.Mkdir(name, permissionBits)
}
return nil
}
//CreateFile try to open a target file,if error,create one
//If you want to full access file,pass os.ModePerm as FileMode parameter
func CreateFile(fullPath string, mode os.FileMode) (*os.File, error) {
f, err := os.OpenFile(fullPath, os.O_APPEND|os.O_CREATE|os.O_RDWR, mode)
defer f.Close()
if err != nil {
f, err = os.Create(fullPath)
if err != nil {
fmt.Printf("Create file error: %v \n", err)
return nil, err
}
}
return f, nil
}
|
/*
* Wodby API Client
*
* Wodby Developer Documentation https://wodby.com/docs/dev
*
* API version: 3.0.18
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package client
type Stack struct {
Created int32 `json:"created"`
Id string `json:"id"`
NewVersion string `json:"new_version,omitempty"`
OrgId string `json:"org_id"`
RevisionNumber int32 `json:"revision_number,omitempty"`
Services []StackService `json:"services"`
Title string `json:"title"`
Updated int32 `json:"updated"`
Version string `json:"version,omitempty"`
}
|
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package worksheets
import (
"fmt"
)
type expression interface {
Args() []string
Compute(ws *Worksheet) (Value, error)
}
// Assert that all expressions implement the expression interface
var _ = []expression{
&Undefined{},
&Number{},
&Text{},
&Bool{},
&tExternal{},
&ePlugin{},
&tVar{},
&tUnop{},
&tBinop{},
&tReturn{},
}
func (e *tExternal) Args() []string {
panic(fmt.Sprintf("unresolved plugin in worksheet"))
}
func (e *tExternal) Compute(ws *Worksheet) (Value, error) {
panic(fmt.Sprintf("unresolved plugin in worksheet(%s)", ws.def.name))
}
func (e *Undefined) Args() []string {
return nil
}
func (e *Undefined) Compute(ws *Worksheet) (Value, error) {
return e, nil
}
func (e *Number) Args() []string {
return nil
}
func (e *Number) Compute(ws *Worksheet) (Value, error) {
return e, nil
}
func (e *Text) Args() []string {
return nil
}
func (e *Text) Compute(ws *Worksheet) (Value, error) {
return e, nil
}
func (e *Bool) Args() []string {
return nil
}
func (e *Bool) Compute(ws *Worksheet) (Value, error) {
return e, nil
}
func (e *tVar) Args() []string {
return []string{e.name}
}
func (e *tVar) Compute(ws *Worksheet) (Value, error) {
return ws.Get(e.name)
}
func (e *tUnop) Args() []string {
return e.expr.Args()
}
func (e *tUnop) Compute(ws *Worksheet) (Value, error) {
result, err := e.expr.Compute(ws)
if err != nil {
return nil, err
}
if _, ok := result.(*Undefined); ok {
return result, nil
}
switch e.op {
case opNot:
bResult, ok := result.(*Bool)
if !ok {
return nil, fmt.Errorf("! on non-bool")
}
return &Bool{!bResult.value}, nil
default:
panic(fmt.Sprintf("not implemented for %s", e.op))
}
}
func (e *tBinop) Args() []string {
left := e.left.Args()
right := e.right.Args()
return append(left, right...)
}
func (e *tBinop) Compute(ws *Worksheet) (Value, error) {
left, err := e.left.Compute(ws)
if err != nil {
return nil, err
}
// bool operations
if e.op == opAnd || e.op == opOr {
if _, ok := left.(*Undefined); ok {
return left, nil
}
bLeft, ok := left.(*Bool)
if !ok {
return nil, fmt.Errorf("op on non-bool")
}
if (e.op == opAnd && !bLeft.value) || (e.op == opOr && bLeft.value) {
return bLeft, nil
}
right, err := e.right.Compute(ws)
if err != nil {
return nil, err
}
if _, ok := right.(*Undefined); ok {
return right, nil
}
bRight, ok := right.(*Bool)
if !ok {
return nil, fmt.Errorf("op on non-bool")
}
return bRight, nil
}
right, err := e.right.Compute(ws)
if err != nil {
return nil, err
}
// equality
if e.op == opEqual {
return &Bool{left.Equal(right)}, nil
}
if e.op == opNotEqual {
return &Bool{!left.Equal(right)}, nil
}
// numerical operations
nLeft, ok := left.(*Number)
if !ok {
return nil, fmt.Errorf("op on non-number")
}
if _, ok := left.(*Undefined); ok {
return left, nil
}
nRight, ok := right.(*Number)
if !ok {
return nil, fmt.Errorf("op on non-number")
}
if _, ok := right.(*Undefined); ok {
return right, nil
}
var result *Number
switch e.op {
case opPlus:
result = nLeft.Plus(nRight)
case opMinus:
result = nLeft.Minus(nRight)
case opMult:
result = nLeft.Mult(nRight)
case opDiv:
if e.round == nil {
return nil, fmt.Errorf("division without rounding mode")
}
return nLeft.Div(nRight, e.round.mode, e.round.scale), nil
default:
panic(fmt.Sprintf("not implemented for %s", e.op))
}
if e.round != nil {
result = result.Round(e.round.mode, e.round.scale)
}
return result, nil
}
func (e *tReturn) Args() []string {
return e.expr.Args()
}
func (e *tReturn) Compute(ws *Worksheet) (Value, error) {
return e.expr.Compute(ws)
}
type ePlugin struct {
computedBy ComputedBy
}
func (e *ePlugin) Args() []string {
return e.computedBy.Args()
}
func (e *ePlugin) Compute(ws *Worksheet) (Value, error) {
args := e.computedBy.Args()
values := make([]Value, len(args), len(args))
for i, arg := range args {
value := ws.MustGet(arg)
values[i] = value
}
return e.computedBy.Compute(values...), nil
}
|
package model
// NOTE: Just use *time.Time in a struct...
// type NullableTime struct {
// time.Time `json:",omitempty"`
// }
//
// func (n NullableTime) MarshalJSON() ([]byte, error) {
// if n.Time.IsZero() {
// // Optional: return []byte("null"), but it will not be omitted
// return []byte(`""`), nil
// }
//
// return n.Time.MarshalJSON()
// }
//
// // Scan must be a pointer.
// func (n *NullableTime) Scan(value interface{}) error {
// if value == nil {
// return nil
// }
// var ok bool
// n.Time, ok = value.(time.Time)
// if !ok {
// return errors.New("failed to scan model.NullableTime")
// }
// return nil
// }
//
// func (n NullableTime) Value() (driver.Value, error) {
// return n.Time.Format(time.RFC3339Nano), nil
// }
|
func (client *client) request(url string) (*http.Response, error) {
//fmt.Printf("[INFO]: %s\n", "Request to "+url)
request, err := http.NewRequest("GET", url, nil)
if err != nil {
return &http.Response{}, fmt.Errorf("[ERR] :%s", err)
}
request.Header.Set("Authorization", "Bearer "+client.token)
res, err := client.do(request)
if err != nil {
return res, err
}
return res, nil
}
func (client *client) parallelRequest(pageDetailItemCh chan pageDetailItem, url string) error {
//fmt.Printf("[INFO]: %s\n", "Request to "+url)
request, err := http.NewRequest("GET", url, nil)
if err != nil {
return fmt.Errorf("[ERR] :%s", err)
}
request.Header.Set("Authorization", "Bearer "+client.token)
res, err := client.do(request)
if err != nil {
return err
}
var pageDetailItem pageDetailItem
decodeBody(res, &pageDetailItem)
pageDetailItemCh <- pageDetailItem
return nil
}
|
package license
// cactl.go file from license-ca team
import (
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"time"
)
const defaultMasterCAUrl = "https://private.ca.sensetime.com:8443"
const defaultSlaveCAUrl = "https://slave.private.ca.sensetime.com:8443"
const defaultMasterCACert = `
-----BEGIN CERTIFICATE-----
MIIDozCCAougAwIBAgIJAJgOlpYuWlSUMA0GCSqGSIb3DQEBCwUAMGgxCzAJBgNV
BAYTAkNOMRAwDgYDVQQIDAdCRUlKSU5HMRAwDgYDVQQHDAdCRUlKSU5HMRIwEAYD
VQQKDAlTRU5TRVRJTUUxITAfBgNVBAMMGHByaXZhdGUuY2Euc2Vuc2V0aW1lLmNv
bTAeFw0xNzEyMDYwOTM0MzdaFw0yNzEyMDQwOTM0MzdaMGgxCzAJBgNVBAYTAkNO
MRAwDgYDVQQIDAdCRUlKSU5HMRAwDgYDVQQHDAdCRUlKSU5HMRIwEAYDVQQKDAlT
RU5TRVRJTUUxITAfBgNVBAMMGHByaXZhdGUuY2Euc2Vuc2V0aW1lLmNvbTCCASIw
DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALPbG4PxtqX9TEk720hkxqlY07WB
KWg3MD51jzZzVEDe0LnsD0kmdSt0lA+WvIGwXNXh0TNX9B7zcNwJ+dhj6oEujA+Z
zmd3FpulpJElU0nE/R68LzTa/4bXCIwMmpkKvMbuLdwSNimbSKiO9IGrloCNFTfP
Fskmmp3NbcXkNFQCRseGFUGGJDfsNdSp5qGsTIolpqoBRlHyxsHxqzk3PVkvRZ0u
7ytQKQENbb4w60ukqh45hLX6J0irQfqSY8Bw51gos3OfQ3ur8z3HdFMp+/PxMh4n
rAMvqBLe4d6fBj+oj2Ej27gQZ8aDvV1jWh92rN5A9RKTM3XV90PRGHzMvn0CAwEA
AaNQME4wHQYDVR0OBBYEFBc2fH74sxyPX/N+TbATRDVmcM1+MB8GA1UdIwQYMBaA
FBc2fH74sxyPX/N+TbATRDVmcM1+MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEL
BQADggEBADScq9hKnAFlGw5gWJoNuTx6FPD2MJ6Zm0/VoD7xNS32nIaVVI0Tt6VH
eZe0JD7Cer4LIPUb5oJTmcR2mUYgBhVLtZKoLRwgH7daRqaI/LOdV8XQR+qRqyj6
iBtYOZmumXqvsW2NsrxV/fAWbXeVZl3bE7YVfbvktBhdFNT05DVEJDu+0QmoClHN
e39TYZbLuUgfBIVZUVItKJfp1NVVX6M5U+/KEzwxShAVOez/S3Jsn+dROKBf6WQn
mLmCh5WMppaIbSjWatz2hBcqarh12gGQgNwyd+zyWbqtCddEdaxNW8WLj1Y8JLxH
rO2hAGzKct7qiBd6mDCBJfSWIVxKU0Q=
-----END CERTIFICATE-----
`
const defaultSlaveCACert = `
-----BEGIN CERTIFICATE-----
MIIDrzCCApegAwIBAgIJAOI2xfBCEdAmMA0GCSqGSIb3DQEBCwUAMG4xCzAJBgNV
BAYTAkNOMRAwDgYDVQQIDAdCRUlKSU5HMRAwDgYDVQQHDAdCRUlKSU5HMRIwEAYD
VQQKDAlTRU5TRVRJTUUxJzAlBgNVBAMMHnNsYXZlLnByaXZhdGUuY2Euc2Vuc2V0
aW1lLmNvbTAeFw0xNzEyMTIxMTA3MjNaFw0yNzEyMTAxMTA3MjNaMG4xCzAJBgNV
BAYTAkNOMRAwDgYDVQQIDAdCRUlKSU5HMRAwDgYDVQQHDAdCRUlKSU5HMRIwEAYD
VQQKDAlTRU5TRVRJTUUxJzAlBgNVBAMMHnNsYXZlLnByaXZhdGUuY2Euc2Vuc2V0
aW1lLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALPbG4PxtqX9
TEk720hkxqlY07WBKWg3MD51jzZzVEDe0LnsD0kmdSt0lA+WvIGwXNXh0TNX9B7z
cNwJ+dhj6oEujA+Zzmd3FpulpJElU0nE/R68LzTa/4bXCIwMmpkKvMbuLdwSNimb
SKiO9IGrloCNFTfPFskmmp3NbcXkNFQCRseGFUGGJDfsNdSp5qGsTIolpqoBRlHy
xsHxqzk3PVkvRZ0u7ytQKQENbb4w60ukqh45hLX6J0irQfqSY8Bw51gos3OfQ3ur
8z3HdFMp+/PxMh4nrAMvqBLe4d6fBj+oj2Ej27gQZ8aDvV1jWh92rN5A9RKTM3XV
90PRGHzMvn0CAwEAAaNQME4wHQYDVR0OBBYEFBc2fH74sxyPX/N+TbATRDVmcM1+
MB8GA1UdIwQYMBaAFBc2fH74sxyPX/N+TbATRDVmcM1+MAwGA1UdEwQFMAMBAf8w
DQYJKoZIhvcNAQELBQADggEBAG8vG7uYYFpgwU6ZG1tVxjhMhMFnI7iIasX6kFrd
7yi8N5T3PnYQfHY2ryCkZK6lkdqOhYjX7QuIptRhKeZtIKzkJZIzC2ImnQImf+ah
WIkhN5pmuaA9rb43NRxnfCwLKbMxnheZnBUnFg/Ty83yYTcDEs2zAjNmiGJKLERn
xIUnoWEiXb/tGTatTPNwmNtWbrfy3AeFP39iRD82FPXtsMve45+EnGpt2WAXjx/q
LSbFMBojo7wGfFUu8rw7RDt9b8XgOgjQNYLUlct4MtIsCFMZJU17gCBJ5DFRTnHC
MFD+L3DkGdtm5sbsgdsVB9F3vhsnFWO8y9E2uusM4G8rnT8=
-----END CERTIFICATE-----
`
const defaultConsoleKey = `
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAxV8OzmENKTgpshVjko98tT8aLeD61g+ujdVj9aviOKKm5Edh
04jPwzr3n41ZMDf+B/xPqe7HWTxpv5Lu4Kqa1JBi0qqZZFShBqxcLQuAjVzNIGZe
sDhIrgm93ubLP8ZwvK+vdZmPbOnx1VKfjeOglZKWS+VrwXG61IM+0iYPTXaOJX/J
QDucSvcXeyfUxnybC4lgQdCQeTfF+nTWunO0a7A9vrbx79uzN5yUy+c5RBNuJhXd
MqXDfWvI46hLqPJS2zTDnG5CVKBWm1N0G3HGtXBa4SiwgAn2g3My8TluIQU85ThQ
Sy0umI//yMy8kXY5lLqxA2n72zfezS7PrHZ8zwIDAQABAoIBAAE7U6NUFbnxIMl8
uq9ad+PFrgslQUt+s48tCr+ov/OsiDAahfDFBM7qGkuDnU/guZQhLfoYhGP5LYvF
hfoe9nJnKEa6S9TFdm/NOZIKZVX8g0c1fFfLMiDr7KRsek4+lcuHqSepuqxqVVkI
d/hxuDnWvVth5idB53GWFBlJpYTNOshNfllkN0+Gwyo5QZRt2aWTnyp8+g0wPq8t
cVI1U5YB6tKABpl7qA25OhZhdHZF0tmwN51rsJ1YoML/PRpu1p3DH5FhJ81IDe0A
U5lpBGkMuSORNftgba/8LfRwyqTn/YYn579aZl6579C4K1ANxWJJnO64zZahDCNV
YrFz4WkCgYEA/drKBwlx+DPCSnX8f0thXNkrq9yk1l1m5+EIcd1oiHPl3iFHRJoo
iHXh5P8RPvVxkzB9LPnnyEx9aMJ6tEONDYkf13K88YcR4JwMcVDdKLSGrpHK0mDc
5xlkd/ueYwfiEKqnCwGOXw4BkPKopL5fyYaHLEn8GwyI1Mcgjw41KTMCgYEAxwoR
XELzw4w0/nnzwBVwUfV76OdFrXylwktq1H7AMMQmfWXTIrBMLkY/f9mkALydZZeq
pm8BjuKKKUicUN98Zh5LK7EQV2ogK3ps0OH6wQNVNnSz5oRRUsEkrM0Uaw15R613
qyZ6Dg3OkDmr5ZyUmVROS45oDPoZlz4PfM63tfUCgYBk8DlCwQuzQIlx6CZFS2jk
bWoDBVH59tuzOfSMqhglocf2Ik9fRNj3IcB3uMBXw2qstywe1SPHrjpzjFkUEoQk
rLCfj3z3oNiH8iS0bg3yYI3pHgmCy4cq0Rr05nUdNYY7UE/pfW3p9/zBcOuDzjry
O+7FuolnC/3gdWlJ2MFkpwKBgDALCxO1CXfbAPOn5iEoS5tM4OLf6B6vJqeWYqv2
CFf9ELlV+be2zDyjMjKfCwoufOOHz2YrBzpBDk5Wu3x95V4U09ow/BvNfwRfoaJt
2YP7VPc3BjGPIL4T5tFbEyGf9/VINsl2GSIJTSHc+dQLjobQJbHxJsZzG/g4v65F
i2x9AoGBANYmI4o2ZGoa1ywxwVoOvQtl3JmVEwy8Wwfp9qv/9bmijjlkKBQFQNWC
udksam8veH8inhBWok1G84T7e46IKcxSVz+26KaG3ViBIyVgrlJkSZ5JVBkZFRaG
JyYxeOfzwryGz1Z6plgVStXq6O9RPuGUeVkT1K5CmeMM0PQlmaG0
-----END RSA PRIVATE KEY-----
`
const defaultConsoleCert = `
-----BEGIN CERTIFICATE-----
MIIDdDCCAlwCCQD6c+kzCWZJDTANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJD
TjEQMA4GA1UECAwHQmVpamluZzEQMA4GA1UEBwwHQmVpamluZzESMBAGA1UECgwJ
U2Vuc2V0aW1lMRIwEAYDVQQLDAlTZW5zZXRpbWUxITAfBgNVBAMMGHByaXZhdGUu
Y2Euc2Vuc2V0aW1lLmNvbTAeFw0xODA3MTcwODU3MTdaFw0yODA3MTQwODU3MTda
MHwxCzAJBgNVBAYTAkNOMRAwDgYDVQQIDAdCZWlqaW5nMRAwDgYDVQQHDAdCZWlq
aW5nMRIwEAYDVQQKDAlTZW5zZXRpbWUxEjAQBgNVBAsMCVNlbnNldGltZTEhMB8G
A1UEAwwYcHJpdmF0ZS5jYS5zZW5zZXRpbWUuY29tMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAxV8OzmENKTgpshVjko98tT8aLeD61g+ujdVj9aviOKKm
5Edh04jPwzr3n41ZMDf+B/xPqe7HWTxpv5Lu4Kqa1JBi0qqZZFShBqxcLQuAjVzN
IGZesDhIrgm93ubLP8ZwvK+vdZmPbOnx1VKfjeOglZKWS+VrwXG61IM+0iYPTXaO
JX/JQDucSvcXeyfUxnybC4lgQdCQeTfF+nTWunO0a7A9vrbx79uzN5yUy+c5RBNu
JhXdMqXDfWvI46hLqPJS2zTDnG5CVKBWm1N0G3HGtXBa4SiwgAn2g3My8TluIQU8
5ThQSy0umI//yMy8kXY5lLqxA2n72zfezS7PrHZ8zwIDAQABMA0GCSqGSIb3DQEB
CwUAA4IBAQDFPS+zoXkDOAy6Y7dI0kwjpQlqZhKjPni1LAuCbNbebpGve9RlZQPr
p0fu1zD8vwgnAwqOCJSPtdw2SphRQCmwOjkEazfHZezve3eJ3hIaMXO89Kwn14ye
SGg9l/1/cwCun61kiQzvW2tMK8KxUtvO62TLmKZMmu6iMj1Koi98TsBnDHhMfpv1
c1UTgXKLq+W7vJrMAwlQqbGI6xjxGG4AHpEkrK23qDlGqkJ1uNXtNf5+dQe14+j4
MopZ2DjS2c2+Z0GfWW6D9IxoZVqq0eBdfLskc6THEh/JwZWbpIYL7k/zQELiB6HO
UUuPtAJTIpTsOPk8nMmG93jLTQYERIa6
-----END CERTIFICATE-----
`
const (
// Master presents Master CA
Master ServerType = iota
// Slave presents Slave CA
Slave
)
// ServerType defines master or slave server type
type ServerType uint
// CACtl is license-ca control interface
type CACtl struct {
masterCACert string // master ca cert, signed from same key as slave ca cert
slaveCACert string // slave ca cert, signed from same key as master ca cert
clientCert string // client cert, signed by client key
clientKey string // client key
caURL map[ServerType]string // ca urls
httpClient map[ServerType]*http.Client // https client
}
// CAStatus carries CA status from CA admin api
type CAStatus struct {
Mode string // default is voucher mode, if not, you should to change code(rock add)
Server uint // 0 is master, 1 is slave
Disable bool // is ca disabled, by soft start/stop ca, if disabled, ca can't supply nomal service
IsActive bool // master or standby ca
ActiveLimit int32 // cluster total active limit
AloneTime int32 // ca alone time, uint seconds, 0 means forever
DongleTime int64 // dongle timestamp
Status string // ca status, "alive" or "dead", means whether ca is in alive
AuthID string // cluster license sn
Product string // product name
DongleID string // dongle id
ExpiredAt string // expire time
Company string // company name
FeatureIds []uint64 // feature ids
Quotas map[string]quotaLimit // cluster quotas, used and total
Consts map[string]interface{} // cluster consts, value type will be int32 or string
Devices []caDeviceInfo // the quotas that devices have taken
}
// CAStatusRequest is ca request message to license-ca
type CAStatusRequest struct {
}
// ServiceControlRequest is request message to license-ca
type ServiceControlRequest struct {
Disable bool `json:"disable,omitempty"`
}
// ServiceControlResponse is response from license-ca
type ServiceControlResponse struct {
Success bool `json:"success,omitempty"`
}
// ActiveResponse is status from ca
type ActiveResponse struct {
StatusCode string `json:"status_code,omitempty"`
StatusMessage string `json:"status_message,omitempty"`
}
// HardwareInfoResponse contains finger_print, dongle c2v
type HardwareInfoResponse struct {
FingerPrint string `json:"finger_print,omitempty"`
C2V string `json:"c2v,omitempty"`
}
// ClientLicResponse ...
type ClientLicResponse struct {
Licenses []string `json:"licenses,omitempty"`
}
type onlineActiveRequest struct {
Action string `json:"action,omitempty"`
}
type hardwareInfoRequest struct {
Type int32 `json:"type,omitempty"`
}
type offlineActiveRequest struct {
V2C string `json:"v2c,omitempty"`
}
type clientLicRequest struct{}
// caStatusResponse is ca response message from license-ca, UpdateAt is different from rpc
type caStatusResponse struct {
Disable bool `json:"disable,omitempty"`
IsActive bool `json:"is_active,omitempty"`
AloneTime int32 `json:"alone_time,omitempty"`
Status string `json:"status,omitempty"`
Cluster string `json:"cluster,omitempty"`
UpdatedAt string `json:"updated_at,omitempty"`
CaJson string `json:"ca_json,omitempty"`
Product string `json:"product,omitempty"`
DongleId string `json:"dongle_id,omitempty"`
ExpiredAt string `json:"expired_at,omitempty"`
DongleTime string `json:"dongle_time,omitempty"`
Company string `json:"company,omitempty"`
Slaves []string `json:"slaves,omitempty"`
FeatureIds []string `json:"feature_ids,omitempty"`
}
type caValues interface {
getConsts() map[string]interface{}
getQuotas() map[string][2]int32
getDevices() []caDeviceInfo
getLimit() int32
getActive() int32
}
type caDeviceInfo struct {
UdID string `json:"udid,omitempty"`
QuotaUsage map[string]int32 `json:"quota_usage,omitempty"`
}
type dongleCAValues struct {
Limit int32 `json:"limit,omitempty"`
Active int32 `json:"active,omitempty"`
Quotas map[string][2]int32 `json:"quotas,omitempty"`
Consts map[string]interface{} `json:"consts,omitempty"`
Devices []caDeviceInfo `json:"devices",omitempty`
}
func (d *dongleCAValues) getConsts() map[string]interface{} {
return d.Consts
}
func (d *dongleCAValues) getQuotas() map[string][2]int32 {
return d.Quotas
}
func (d *dongleCAValues) getDevices() []caDeviceInfo {
return d.Devices
}
func (d *dongleCAValues) getLimit() int32 {
return d.Limit
}
func (d *dongleCAValues) getActive() int32 {
return d.Active
}
type voucherCAValues struct {
Limit int32 `json:"limit,omitempty"`
Active int32 `json:"active,omitempty"`
Quotas map[string][2]int32 `json:"ext_quotas,omitempty"`
Consts map[string]interface{} `json:"ext_consts,omitempty"`
Devices []caDeviceInfo `json:"devices,omitempty"`
}
func (d *voucherCAValues) getConsts() map[string]interface{} {
return d.Consts
}
func (d *voucherCAValues) getQuotas() map[string][2]int32 {
return d.Quotas
}
func (d *voucherCAValues) getDevices() []caDeviceInfo {
return d.Devices
}
func (d *voucherCAValues) getLimit() int32 {
return d.Limit
}
func (d *voucherCAValues) getActive() int32 {
return d.Active
}
type quotaLimit struct {
Used int32 // used quotas
Total int32 // total quotas
}
// NewServiceCtl creates a CACtl
func NewServiceCtl(masterCAUrl, slaveCAUrl string) (*CACtl, error) {
ctl := &CACtl{
masterCACert: defaultMasterCACert,
slaveCACert: defaultSlaveCACert,
clientCert: defaultConsoleCert,
clientKey: defaultConsoleKey,
}
ctl.caURL = make(map[ServerType]string)
//ctl.caURL[Master] = defaultMasterCAUrl
//ctl.caURL[Master] = defaultSlaveCAUrl
ctl.caURL[Master] = masterCAUrl // "https://10.151.5.136:8443"
ctl.caURL[Slave] = slaveCAUrl // "https://10.151.5.137:8443"
var err error
ctl.httpClient = make(map[ServerType]*http.Client, 2)
ctl.httpClient[Master], err = createHTTPSClient(ctl.masterCACert, ctl.clientKey, ctl.clientCert)
if err != nil {
return nil, err
}
ctl.httpClient[Slave], err = createHTTPSClient(ctl.slaveCACert, ctl.clientKey, ctl.clientCert)
if err != nil {
return nil, err
}
return ctl, nil
}
func newCAValues(authAddr string) caValues {
switch authAddr {
case "dongle":
return new(dongleCAValues)
case "voucher":
return new(voucherCAValues)
default:
return nil
}
}
// GetCAStatus get CA status from license-ca
// serverType: Master, get status from master license-ca and Slave gets from slave license-ca
func (ctl *CACtl) GetCAStatus(serverType ServerType, authAddr string) (*CAStatus, error) {
req, err := http.NewRequest("GET", ctl.caURL[serverType]+"/status", nil)
if err != nil {
return nil, err
}
resp, err := ctl.httpClient[serverType].Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("status code is %d", resp.StatusCode)
}
caRet, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
caResp := new(caStatusResponse)
err = json.Unmarshal(caRet, caResp)
if err != nil {
return nil, err
}
caVal := newCAValues(authAddr)
if caVal == nil {
return nil, fmt.Errorf("not support auth addr: ", authAddr)
}
err = json.Unmarshal([]byte(caResp.CaJson), caVal)
if err != nil {
return nil, err
}
quotas := make(map[string]quotaLimit, len(caVal.getQuotas()))
for k, v := range caVal.getQuotas() {
quotas[k] = quotaLimit{Total: v[1], Used: v[0]}
}
dongleTime, _ := strconv.ParseInt(caResp.DongleTime, 10, 64)
featureIDs := make([]uint64, 0)
for _, featureID := range caResp.FeatureIds {
id, _ := strconv.ParseUint(featureID, 10, 64)
featureIDs = append(featureIDs, id)
}
status := &CAStatus{
Mode: authAddr,
Server: uint(serverType),
Status: caResp.Status,
Disable: caResp.Disable,
IsActive: caResp.IsActive,
AuthID: caResp.Cluster,
Product: caResp.Product,
DongleID: caResp.DongleId,
ExpiredAt: caResp.ExpiredAt,
DongleTime: dongleTime,
AloneTime: caResp.AloneTime,
Company: caResp.Company,
FeatureIds: featureIDs,
ActiveLimit: caVal.getLimit(),
Quotas: quotas,
Consts: caVal.getConsts(),
Devices: caVal.getDevices(),
}
return status, nil
}
// CAControl sends start or stop cmd to license-ca, change ca service status
// serverType: Master, control master license-ca and Slave control slave license-ca
// true means stop license-ca and false means start server
func (ctl *CACtl) CAControl(serverType ServerType, disable bool) (*ServiceControlResponse, error) {
scReq := &ServiceControlRequest{Disable: disable}
reqBody, err := json.Marshal(scReq)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", ctl.caURL[serverType]+"/control", bytes.NewReader(reqBody))
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", "application/json")
resp, err := ctl.httpClient[serverType].Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("status code is %d", resp.StatusCode)
}
caRet, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
caResp := new(ServiceControlResponse)
err = json.Unmarshal(caRet, caResp)
if err != nil {
return nil, err
}
return caResp, nil
}
// OnlineActivate sends a action command to ca activate dongle online
func (ctl *CACtl) OnlineActivate(serverType ServerType, action string) (*ActiveResponse, error) {
olReq := &onlineActiveRequest{Action: action}
reqBody, err := json.Marshal(olReq)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", ctl.caURL[serverType]+"/online", bytes.NewReader(reqBody))
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", "application/json")
resp, err := ctl.httpClient[serverType].Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("status code is %d", resp.StatusCode)
}
caRet, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
caResp := new(ActiveResponse)
err = json.Unmarshal(caRet, caResp)
if err != nil {
return nil, err
}
return caResp, nil
}
// HardwareInfo gets fingerprint or c2v from ca
func (ctl *CACtl) HardwareInfo(serverType ServerType, Type int32) (*HardwareInfoResponse, error) {
req, err := http.NewRequest("GET", fmt.Sprintf(ctl.caURL[serverType]+"/hdinfo/%d", Type), nil)
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", "application/json")
resp, err := ctl.httpClient[serverType].Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("status code is %d, %s", resp.StatusCode, resp.Status)
}
caRet, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
caResp := new(HardwareInfoResponse)
err = json.Unmarshal(caRet, caResp)
if err != nil {
return nil, err
}
return caResp, nil
}
// OfflineActivate sends v2c file to ca activate dongle offline
func (ctl *CACtl) OfflineActivate(serverType ServerType, v2c string) (*ActiveResponse, error) {
offReq := &offlineActiveRequest{V2C: v2c}
reqBody, err := json.Marshal(offReq)
if err != nil {
return nil, err
}
//fmt.Println(string(reqBody))
req, err := http.NewRequest("POST", ctl.caURL[serverType]+"/offline", bytes.NewReader(reqBody))
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", "application/json")
resp, err := ctl.httpClient[serverType].Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("status code is %d", resp.StatusCode)
}
caRet, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
caResp := new(ActiveResponse)
err = json.Unmarshal(caRet, caResp)
if err != nil {
return nil, err
}
return caResp, nil
}
// GetClientLics get client licenses from license-ca
// serverType: Master, get status from master license-ca and Slave gets from slave license-ca
func (ctl *CACtl) GetClientLics(serverType ServerType) (*ClientLicResponse, error) {
req, err := http.NewRequest("GET", ctl.caURL[serverType]+"/clics", nil)
if err != nil {
return nil, err
}
resp, err := ctl.httpClient[serverType].Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("status code is %d", resp.StatusCode)
}
caRet, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
caResp := new(ClientLicResponse)
err = json.Unmarshal(caRet, caResp)
if err != nil {
return nil, err
}
return caResp, nil
}
// create a https client with server and client certs
func createHTTPSClient(serverCert, clientKey, clientCert string) (*http.Client, error) {
// add client key and cert
cert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientKey))
if err != nil {
return nil, err
}
// add server cert
serverCertPool := x509.NewCertPool()
if ok := serverCertPool.AppendCertsFromPEM([]byte(serverCert)); !ok {
return nil, errors.New("load ca cert error")
}
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: serverCertPool,
// InsecureSkipVerify控制客户端是否验证服务器的证书链和主机名。
// 如果InsecureSkipVerify(非安全的跳过验证)为true,那么TLS接受服务器提供的任何证书以及该证书中的任何主机名。
// 在这种模式下,TLS容易受到中间人攻击。这应该只用于测试。
InsecureSkipVerify: true,
}
tlsConfig.BuildNameToCertificate()
transport := &http.Transport{TLSClientConfig: tlsConfig}
httpClient := &http.Client{Transport: transport, Timeout: time.Second * 10} //http request timeout, 3 seconds
return httpClient, nil
}
|
package functions
import (
"fmt"
"strings"
"github.com/miekg/dns"
)
// Reads a CoreDNS resource record and returns its string representation.
// From: coredns/plugin/test/helpers.go
func ReadRR(val dns.RR) string {
var res string
switch x := val.(type) {
case *dns.SRV:
res = fmt.Sprintf("%d|%d|%d|%s", x.Priority, x.Weight, x.Port, x.Target)
case *dns.RRSIG:
res = fmt.Sprintf("%d|%d|%s", x.TypeCovered, x.Labels, x.SignerName)
case *dns.NSEC:
res = x.NextDomain
case *dns.A:
res = RT(x.A.String())
case *dns.AAAA:
res = RT(x.AAAA.String())
case *dns.TXT:
res = strings.Join(x.Txt, "|")
case *dns.HINFO:
res = fmt.Sprintf("%s|%s", x.Cpu, x.Os)
case *dns.SOA:
res = x.Ns
case *dns.PTR:
res = RT(x.Ptr)
case *dns.CNAME:
res = RT(x.Target)
case *dns.MX:
res = fmt.Sprintf("%s|%d", x.Mx, x.Preference)
case *dns.NS:
res = x.Ns
case *dns.OPT:
res = fmt.Sprintf("%d|%t", x.UDPSize(), x.Do())
}
return res
}
|
package tchart
import (
"errors"
"github.com/nsf/termbox-go"
ui "github.com/s-westphal/termui/v3"
)
func GetDefaultChartColors() []ui.Color {
return []ui.Color{ui.ColorRed, ui.ColorGreen, ui.ColorYellow, ui.ColorBlue, ui.ColorCyan}
}
type App struct {
*vContainer
panels []panel
widgets []Widget
}
func NewApp() *App {
return &App{
vContainer: newVContainer(),
panels: []panel{},
widgets: []Widget{},
}
}
func (app *App) AddPanel(widgetType string, storages []*Storage) error {
widget, err := CreateWidget(widgetType, "", storages)
if err != nil {
panic(err)
}
var panel = newPanel(storages, widget)
app.vContainer.putContainers(panel.container)
app.panels = append(app.panels, *panel)
return nil
}
func (app *App) AddInstructions() {
instructionsWidget := newInstructionsWidget("")
instructionsContainer := newHContainer(instructionsWidget)
instructionsContainer.setHeight(3)
app.vContainer.putContainers(instructionsContainer)
}
func (app *App) AddWidgetRow(widgets []Widget, height int) {
rowContainer := newHContainer()
for _, w := range widgets {
chartContainer := newVContainer(w)
rowContainer.putContainers(chartContainer)
}
app.widgets = append(app.widgets, widgets...)
if height != 0 {
rowContainer.setHeight(height)
}
app.vContainer.putContainers(rowContainer)
}
// Update update panels
func (app *App) Update() {
for _, panel := range app.panels {
panel.update()
}
for _, widget := range app.widgets {
widget.update()
}
w, h := termbox.Size()
app.vContainer.render(0, 0, w, h)
termbox.Flush()
}
// Render render tchart
func (app *App) Render() {
w, h := termbox.Size()
termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
app.vContainer.render(0, 0, w, h)
termbox.Flush()
}
func CreateWidget(widgetType string, title string, storages []*Storage) (Widget, error) {
var widget Widget
switch widgetType {
case "L":
if title == "" {
title = "Line Chart"
}
widget = NewLineChartWidget(title, storages, GetDefaultChartColors())
case "S":
if len(storages) != 2 {
return nil, errors.New("scatter plot needs 2 columns")
}
if title == "" {
title = "Scatter Plot"
}
widget = NewScatterPlotWidget(title, storages)
case "P":
if title == "" {
title = "Pie Chart"
}
widget = NewPieChartWidget(title, storages, GetDefaultChartColors())
case "G":
if len(storages) != 1 {
return nil, errors.New("gauge needs 1 column")
}
widget = NewGaugeWidget(title, storages[0], GetDefaultChartColors())
case "B":
if len(storages) != 1 {
return nil, errors.New("barchart needs 1 column")
}
if title == "" {
title = "Bar Chart"
}
widget = NewBarChartWidget(title, storages[0], 10)
}
return widget, nil
}
|
package csv
import (
"encoding/csv"
"io"
"reflect"
"fmt"
//"log"
)
type Writer struct {
*csv.Writer
}
func NewWriter(w io.Writer) *Writer {
return &Writer{
csv.NewWriter(w),
}
}
func (w *Writer) WriteAllCsv(data interface{}) (err error) {
refl := reflect.ValueOf(data)
err = w.WriteCsvHeader(refl.Index(0).Interface())
if err != nil {
return err
}
for i:=0;i<refl.Len();i++ {
val := refl.Index(i)
err = w.WriteCsv(val.Interface())
if err != nil {
return err
}
}
w.Flush()
return
}
func (w *Writer) WriteCsv(data interface{}) error {
rv := reflect.ValueOf(data)
reflected := reflect.TypeOf(data)
item := make([]string, reflected.NumField())
for j:=0;j<reflected.NumField();j++{
item[j] = fmt.Sprintf("%v", rv.Field(j))
}
return w.Write(item)
}
func (w *Writer) WriteCsvHeader(data interface{}) error {
reflected := reflect.TypeOf(data)
item := make([]string, reflected.NumField())
for s:=0;s<reflected.NumField();s++{
item[s] = fmt.Sprintf("%v", reflected.Field(s).Name)
}
return w.Write(item)
} |
package controllers
import (
"errors"
"regexp"
"strings"
"crypto-telegram-notifyer/coingecko"
"github.com/astaxie/beego"
)
type CoinController struct {
beego.Controller
}
// Definition of a response with data
type CoinResponse struct {
Name string `json:"symbol"`
UsdPrice string `json:"usd_price"`
BtcPrice string `json:"btc_price"`
}
type ErrorMsg struct {
Code int `json:"code"`
Message string `json:"message"`
}
// Extract all coins from array splitting by comma
func _extract_coin_symbols(allCoinsCommaSeparated string) ([]string, error) {
coinListStrIsNotEmpty := len(allCoinsCommaSeparated) > 0
if coinListStrIsNotEmpty {
match, _ := regexp.MatchString("([a-z]+)(,*)", allCoinsCommaSeparated)
if !match {
return nil, errors.New("Coin list must be separated by commas. Not other symbols or numbers allowed")
} else {
// Only return all coin list when is ready
var coinList []string = strings.Split(allCoinsCommaSeparated, ",")
return coinList, nil
}
} else {
return nil, errors.New("Coin symbol list must not be empty")
}
}
// For each coin, retrieve a CoinResponse object
func _obtain_coin_list_prices(coinList []string) (*[]CoinResponse, error) {
coinPricesList, err := coingecko.GetCoinsPrices(coinList)
if err != nil {
return nil, errors.New("Error while calling to Coingecko's API")
} else {
var coinPricesListToReturn = make([]CoinResponse, len(*coinPricesList))
for i, coinPriceResponse := range *coinPricesList {
// Cast between interfaces
coinResponseCasted := CoinResponse(coinPriceResponse)
coinPricesListToReturn[i] = coinResponseCasted
}
return &coinPricesListToReturn, nil
}
}
// Response for GET:/coins?symbols=btc,lsk
func (this *CoinController) Get() {
coinSymbols := this.GetString("symbols")
coinSymbolList, err := _extract_coin_symbols(coinSymbols)
// If list of coins is not well formed send a 403 error
if err != nil {
_send_403_error_response(this.Ctx.ResponseWriter, err.Error())
} else {
coinPricesList, err2 := _obtain_coin_list_prices(coinSymbolList)
if err2 != nil {
_send_500_error_response(this.Ctx.ResponseWriter, err2.Error())
} else {
this.Data["json"] = *coinPricesList
this.ServeJSON()
}
}
}
|
package validpalendrome
func isPalindrome(s string) bool {
if s == "" {
return true
}
// build a lowercase/numeric slice from the string
// Over allocate storage if needed to save re-allocation later
ln := make([]rune, 0, len(s))
for _, r := range s {
rok, ok := fix(r)
if !ok {
continue
}
ln = append(ln, rok)
}
// Now check if it's a palendrome
for i, j := 0, len(ln)-1; i < j; i, j = i+1, j-1 {
if ln[i] != ln[j] {
return false
}
}
return true
}
// Fix checks to see if the character is alpha numeric and
// lower cases at the same time.
func fix(r rune) (okrune rune, ok bool) {
if ('0' <= r && r <= '9') || ('a' <= r && r <= 'z') {
return r, true
}
if 'A' <= r && r <= 'Z' {
return r - 'A' + 'a', true
}
return '\u0000', false
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package gtest contains helpers for running google-test tests from Go.
package gtest
import (
"fmt"
"os/exec"
"strings"
)
var (
// listTestFlag is the flag that will list tests in gtest binaries.
listTestFlag = "--gtest_list_tests"
// filterTestFlag is the flag that will filter tests in gtest binaries.
filterTestFlag = "--gtest_filter"
// listBechmarkFlag is the flag that will list benchmarks in gtest binaries.
listBenchmarkFlag = "--benchmark_list_tests"
// filterBenchmarkFlag is the flag that will run specified benchmarks.
filterBenchmarkFlag = "--benchmark_filter"
)
// TestCase is a single gtest test case.
type TestCase struct {
// Suite is the suite for this test.
Suite string
// Name is the name of this individual test.
Name string
// all indicates that this will run without flags. This takes
// precendence over benchmark below.
all bool
// benchmark indicates that this is a benchmark. In this case, the
// suite will be empty, and we will use the appropriate test and
// benchmark flags.
benchmark bool
}
// FullName returns the name of the test including the suite. It is suitable to
// pass to "-gtest_filter".
func (tc TestCase) FullName() string {
return fmt.Sprintf("%s.%s", tc.Suite, tc.Name)
}
// Args returns arguments to be passed when invoking the test.
func (tc TestCase) Args() []string {
if tc.all {
return []string{} // No arguments.
}
if tc.benchmark {
return []string{
fmt.Sprintf("%s=^%s$", filterBenchmarkFlag, tc.Name),
fmt.Sprintf("%s=", filterTestFlag),
}
}
return []string{
fmt.Sprintf("%s=%s", filterTestFlag, tc.FullName()),
}
}
// ParseTestCases calls a gtest test binary to list its test and returns a
// slice with the name and suite of each test.
//
// If benchmarks is true, then benchmarks will be included in the list of test
// cases provided. Note that this requires the binary to support the
// benchmarks_list_tests flag.
func ParseTestCases(testBin string, benchmarks bool, extraArgs ...string) ([]TestCase, error) {
// Run to extract test cases.
args := append([]string{listTestFlag}, extraArgs...)
cmd := exec.Command(testBin, args...)
out, err := cmd.Output()
if err != nil {
// We failed to list tests with the given flags. Just
// return something that will run the binary with no
// flags, which should execute all tests.
return []TestCase{
{
Suite: "Default",
Name: "All",
all: true,
},
}, nil
}
// Parse test output.
var t []TestCase
var suite string
for _, line := range strings.Split(string(out), "\n") {
// Strip comments.
line = strings.Split(line, "#")[0]
// New suite?
if !strings.HasPrefix(line, " ") {
suite = strings.TrimSuffix(strings.TrimSpace(line), ".")
continue
}
// Individual test.
name := strings.TrimSpace(line)
// Do we have a suite yet?
if suite == "" {
return nil, fmt.Errorf("test without a suite: %v", name)
}
// Add this individual test.
t = append(t, TestCase{
Suite: suite,
Name: name,
})
}
// Finished?
if !benchmarks {
return t, nil
}
// Run again to extract benchmarks.
tb, err := ParseBenchmarks(testBin, extraArgs...)
if err != nil {
return nil, err
}
t = append(t, tb...)
return t, nil
}
// ParseBenchmarks returns each benchmark in the binary's list as a single test case.
func ParseBenchmarks(binary string, extraArgs ...string) ([]TestCase, error) {
var t []TestCase
args := append([]string{listBenchmarkFlag}, extraArgs...)
cmd := exec.Command(binary, args...)
out, err := cmd.Output()
if err != nil {
// We were able to enumerate tests above, but not benchmarks?
// We requested them, so we return an error in this case.
exitErr, ok := err.(*exec.ExitError)
if !ok {
return nil, fmt.Errorf("could not enumerate benchmarks: %v", err)
}
return nil, fmt.Errorf("could not enumerate benchmarks: %v\nstderr\n%s", err, exitErr.Stderr)
}
benches := strings.Trim(string(out), "\n")
if len(benches) == 0 {
return t, nil
}
// Parse benchmark output.
for _, line := range strings.Split(benches, "\n") {
// Strip comments.
line = strings.Split(line, "#")[0]
// Single benchmark.
name := strings.TrimSpace(line)
// Add the single benchmark.
t = append(t, TestCase{
Suite: "Benchmarks",
Name: name,
benchmark: true,
})
}
return t, nil
}
// BuildTestArgs builds arguments to be passed to the test binary to execute
// only the test cases in `indices`.
func BuildTestArgs(indices []int, testCases []TestCase) []string {
var testFilter, benchFilter string
for _, tci := range indices {
tc := testCases[tci]
if tc.all {
// No argument will make all tests run.
return nil
}
if tc.benchmark {
if len(benchFilter) > 0 {
benchFilter += "|"
}
benchFilter += "^" + tc.Name + "$"
} else {
if len(testFilter) > 0 {
testFilter += ":"
}
testFilter += tc.FullName()
}
}
var args []string
if len(testFilter) > 0 {
args = append(args, fmt.Sprintf("%s=%s", filterTestFlag, testFilter))
}
if len(benchFilter) > 0 {
args = append(args, fmt.Sprintf("%s=%s", filterBenchmarkFlag, benchFilter))
}
return args
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package cryptohome
import (
"context"
"os"
"time"
"chromiumos/tast/local/bundles/cros/cryptohome/cleanup"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/disk"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: ShowLowDiskSpaceNotification,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Test showing the low disk space notification",
Contacts: []string{
"vsavu@google.com", // Test author
"gwendal@chromium.com", // Lead for ChromeOS Storage
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
Pre: chrome.LoggedIn(),
})
}
func ShowLowDiskSpaceNotification(ctx context.Context, s *testing.State) {
cr := s.PreValue().(*chrome.Chrome)
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to get test API connection: ", err)
}
fillFile, err := disk.FillUntil(cleanup.UserHome, cleanup.MinimalFreeSpace)
if err != nil {
s.Fatal("Failed to fill disk space: ", err)
}
defer func() {
if err := os.Remove(fillFile); err != nil {
s.Errorf("Failed to remove fill file %s: %v", fillFile, err)
}
}()
const notificationWaitTime = 150 * time.Second // Timeout for checking for low disk space notification.
const notificationID = "low_disk" // Hardcoded in Chrome.
s.Logf("Waiting %d seconds for %v notification", notificationWaitTime/time.Second, notificationID)
if _, err := ash.WaitForNotification(ctx, tconn, notificationWaitTime, ash.WaitIDContains(notificationID)); err != nil {
// Check if too much space was made available.
freeSpace, fErr := disk.FreeSpace(cleanup.UserHome)
if fErr != nil {
s.Fatal("Failed to read the amount of free space", fErr)
}
if freeSpace >= cleanup.NotificationThreshold {
s.Errorf("Space was cleaned without notification: got %d; want < %d", freeSpace, cleanup.NotificationThreshold)
}
s.Error("Notification not shown: ", err)
}
}
|
package loads
import (
"jean/instructions/base"
"jean/instructions/factory"
"jean/rtda/jvmstack"
)
type ILOAD struct {
base.Index8Instruction
}
func (i *ILOAD) Execute(frame *jvmstack.Frame) {
_iload(frame, i.Index)
}
func _iload(frame *jvmstack.Frame, index uint) {
val := frame.LocalVars().GetInt(index)
frame.OperandStack().PushInt(val)
}
// ILOAD_idx idx表示在局部变量表中的索引
type ILOAD_0 struct {
base.NoOperandsInstruction
}
func (i *ILOAD_0) Execute(frame *jvmstack.Frame) {
_iload(frame, 0)
}
type ILOAD_1 struct {
base.NoOperandsInstruction
}
func (i *ILOAD_1) Execute(frame *jvmstack.Frame) {
_iload(frame, 1)
}
type ILOAD_2 struct {
base.NoOperandsInstruction
}
func (i *ILOAD_2) Execute(frame *jvmstack.Frame) {
_iload(frame, 2)
}
type ILOAD_3 struct {
base.NoOperandsInstruction
}
func (i *ILOAD_3) Execute(frame *jvmstack.Frame) {
_iload(frame, 3)
}
func init() {
factory.Factory.AddInstruction(0x15, func() base.Instruction {
return &ILOAD{}
})
iload_0 := &ILOAD_0{}
iload_1 := &ILOAD_1{}
iload_2 := &ILOAD_2{}
iload_3 := &ILOAD_3{}
factory.Factory.AddInstruction(0x1a, func() base.Instruction {
return iload_0
})
factory.Factory.AddInstruction(0x1b, func() base.Instruction {
return iload_1
})
factory.Factory.AddInstruction(0x1c, func() base.Instruction {
return iload_2
})
factory.Factory.AddInstruction(0x1d, func() base.Instruction {
return iload_3
})
}
|
package fixture
import (
"bytes"
"encoding/base64"
"fmt"
"log"
"net/smtp"
"net/mail"
"os"
"os/exec"
"runtime"
"strconv"
"time"
)
type EmailUser struct {
Username string
Password string
EmailServer string
Port int
}
func sendEmail(send_to string, subj string, content string) {
emailUser := &EmailUser{"fixture.plumber", "plumber!", "smtp.gmail.com", 587}
auth := smtp.PlainAuth("",
emailUser.Username,
emailUser.Password,
emailUser.EmailServer)
from := mail.Address{"Fixture Plumber", "fixture.plumber.com"}
to := mail.Address{"Test", send_to}
title := subj
body := content;
header := make(map[string]string)
header["From"] = from.String()
header["To"] = to.String()
header["Subject"] = title
header["MIME-Version"] = "1.0"
header["Content-Type"] = "text/plain; charset=\"utf-8\""
header["Content-Transfer-Encoding"] = "base64"
message := ""
for k, v := range header {
message += fmt.Sprintf("%s: %s\r\n", k, v)
}
message += "\r\n" + base64.StdEncoding.EncodeToString([]byte(body))
err := smtp.SendMail(
emailUser.EmailServer + ":" + strconv.Itoa(emailUser.Port),
auth,
from.Address,
[]string{to.Address},
[]byte(message),
)
if err != nil {
log.Print("ERROR: attempting to send a mail ", err)
}
}
type Notifier struct {
fd_threshold int
routine_threshold int
email string
last_message_fd *time.Time
last_message_routine *time.Time
}
func (n *Notifier) checkRoutineAlert() {
num_routines := runtime.NumGoroutine()
fmt.Println("Number of go routines: ", num_routines)
if num_routines >= n.routine_threshold {
subject := "ALERT: Routine Threshold Exceeded"
message := fmt.Sprintf("Number of current Go Routines is %d", num_routines)
n.AlertUser(subject, message, 1)
}
}
func (n *Notifier) checkFileAlert() {
var fd_count bytes.Buffer
process_id := os.Getpid()
c1 := exec.Command("lsof", "-p", strconv.Itoa(process_id))
c2 := exec.Command("wc", "-l")
c2.Stdin, _ = c1.StdoutPipe()
c2.Stdout = &fd_count
_ = c2.Start()
_ = c1.Run()
_ = c2.Wait()
// fmt.Println("Number of fds: ", fd_count.String())
count,_ := strconv.Atoi(fd_count.String())
if count >= n.fd_threshold {
subject := "ALERT: File Descriptor Threshold Exceeded"
message := fmt.Sprintf("Number of current file descriptors %d", count)
n.AlertUser(subject, message, 0)
}
}
func (n *Notifier) AlertUser(subject string, body string, alert_type int) {
if alert_type == 0 {
if n.last_message_fd == nil {
sendEmail(n.email, subject, body)
x := new(time.Time)
*x = time.Now().UTC()
n.last_message_fd = x
return
}
diff := time.Now().UTC().Sub(*n.last_message_fd)
if diff.Hours() >= 1 {
sendEmail(n.email, subject, body)
}
} else {
if n.last_message_routine == nil {
sendEmail(n.email, subject, body)
x := new(time.Time)
*x = time.Now().UTC()
n.last_message_routine = x
fmt.Println("why do i keep getting nil")
return
}
diff := time.Now().UTC().Sub(*n.last_message_routine)
fmt.Println("The diff is: %s", diff)
fmt.Println("Hours is: %d", diff.Hours())
if diff.Hours() >= 1 {
sendEmail(n.email, subject, body)
}
}
}
func RunAnalysis(fd_thresh int, r_thresh int, freq_sec int, email string) {
if fd_thresh < -1 || r_thresh < -1 {
fmt.Errorf("Argument less than -1")
return
}
if freq_sec <= 0 {
fmt.Errorf("Frequency too low: %d", freq_sec)
return
}
n := &Notifier{fd_thresh, r_thresh, email, nil, nil}
for {
if n.fd_threshold != -1 {
n.checkFileAlert()
}
if n.routine_threshold != -1 {
n.checkRoutineAlert()
}
time.Sleep(time.Duration(freq_sec) * time.Second)
}
}
|
package controller
// TODO(huangsz): re-enable / re-write the tests
//import (
// "reflect"
// "testing"
//
// "github.com/multivactech/MultiVAC/configs/config"
// "github.com/multivactech/MultiVAC/model/shard"
// "github.com/multivactech/MultiVAC/processor/shared/message"
// "github.com/multivactech/MultiVAC/rpc/btcjson"
// "github.com/stretchr/testify/assert"
//)
//
//func TestController_HandleRpcReq_NonSharded(t *testing.T) {
// _, _ = config.LoadConfig()
// tests := []struct {
// desc string
// req *message.RPCReq
// expected *message.RPCResp
// resultEqual func(a, o interface{}) bool
// }{
// {
// desc: "RPCGetOutState",
// req: message.NewNonShardedRPCReq(message.RPCGetAllShardsInfo),
// expected: message.NewSuccessRPCResp(&btcjson.ShardsInfo{
// NumShards: len(shardListForTest),
// Shards: []btcjson.ShardInfo{
// {
// Index: shard.IDToShardIndex(shardListForTest[0]),
// Enabled: true,
// },
// {
// Index: shard.IDToShardIndex(shardListForTest[1]),
// Enabled: true,
// },
// {
// Index: shard.IDToShardIndex(shardListForTest[2]),
// Enabled: true,
// },
// {
// Index: shard.IDToShardIndex(shardListForTest[3]),
// Enabled: true,
// },
// },
// }),
// resultEqual: func(a, o interface{}) bool {
// return reflect.DeepEqual(a, o)
// },
// },
// }
//
// ctrl := creatFakeStorageController(&config.Config{
// StorageNode: true,
// IsOneOfFirstMiners: false,
// IsSharded: false,
// Shards: shardListForTest,
// })
// a := assert.New(t)
// for _, test := range tests {
// r := ctrl.HandleRPCReq(test.req)
//
// a.Equalf(r.Status, test.expected.Status, "HandleRPCReq %v", test.desc)
// a.Condition(
// func() bool { return test.resultEqual(test.expected.Result, r.Result) },
// "HandleRPCReq %v, Result expected: %v, got: %v",
// test.desc, test.expected.Result, r.Result)
// }
//}
|
// Copyright 2019 - 2022 The Samply Community
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
. "strings"
"unicode"
"github.com/dave/jennifer/jen"
"github.com/samply/golang-fhir-models/fhir-models-gen/fhir"
"github.com/spf13/cobra"
)
type Resource struct {
ResourceType string
Url *string
Version *string
Name *string
}
func UnmarshalResource(b []byte) (Resource, error) {
var resource Resource
if err := json.Unmarshal(b, &resource); err != nil {
return resource, err
}
return resource, nil
}
type ResourceMap = map[string]map[string][]byte
var licenseComment = Split(Trim(`
Copyright 2019 - 2022 The Samply Community
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
`, "\n"), "\n")
var namePattern = regexp.MustCompile("^[A-Z]([A-Za-z0-9_]){0,254}$")
// genResourcesCmd represents the genResources command
var genResourcesCmd = &cobra.Command{
Use: "gen-resources",
Short: "Generates Go structs from FHIR resource structure definitions.",
Run: func(cmd *cobra.Command, args []string) {
dir := args[0]
resources := make(ResourceMap)
resources["StructureDefinition"] = make(map[string][]byte)
resources["ValueSet"] = make(map[string][]byte)
resources["CodeSystem"] = make(map[string][]byte)
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
if !HasSuffix(info.Name(), ".json") {
return nil
}
bytes, err := ioutil.ReadFile(path)
if err != nil {
return err
}
fmt.Printf("Generate Go sources from file: %s\n", path)
resource, err := UnmarshalResource(bytes)
if err != nil {
return err
}
if resource.ResourceType == "Bundle" {
bundle, err := fhir.UnmarshalBundle(bytes)
if err != nil {
return err
}
for _, entry := range bundle.Entry {
entryResource, err := UnmarshalResource(entry.Resource)
if err != nil {
return err
}
switch entryResource.ResourceType {
case "StructureDefinition":
if entryResource.Name != nil {
resources[entryResource.ResourceType][*entryResource.Name] = entry.Resource
}
case "ValueSet":
fallthrough
case "CodeSystem":
if entryResource.Url != nil {
if entryResource.Version != nil {
resources[entryResource.ResourceType][*entryResource.Url+"|"+*entryResource.Version] = entry.Resource
resources[entryResource.ResourceType][*entryResource.Url] = entry.Resource
} else {
resources[entryResource.ResourceType][*entryResource.Url] = entry.Resource
}
}
}
}
}
switch resource.ResourceType {
case "StructureDefinition":
if resource.Name != nil {
resources[resource.ResourceType][*resource.Name] = bytes
}
case "ValueSet":
fallthrough
case "CodeSystem":
if resource.Url != nil {
if resource.Version != nil {
resources[resource.ResourceType][*resource.Url+"|"+*resource.Version] = bytes
resources[resource.ResourceType][*resource.Url] = bytes
} else {
resources[resource.ResourceType][*resource.Url] = bytes
}
}
}
return nil
})
if err != nil {
fmt.Println(err)
os.Exit(1)
}
requiredTypes := make(map[string]bool, 0)
requiredValueSetBindings := make(map[string]bool, 0)
for _, bytes := range resources["StructureDefinition"] {
structureDefinition, err := fhir.UnmarshalStructureDefinition(bytes)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if (structureDefinition.Kind == fhir.StructureDefinitionKindResource) &&
structureDefinition.Name != "Element" &&
structureDefinition.Name != "BackboneElement" {
goFile, err := generateResourceOrType(resources, requiredTypes, requiredValueSetBindings, structureDefinition)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
err = goFile.Save(FirstLower(structureDefinition.Name) + ".go")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
}
err = generateTypes(resources, make(map[string]bool, 0), requiredTypes, requiredValueSetBindings)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
for url := range requiredValueSetBindings {
bytes := resources["ValueSet"][url]
if bytes == nil {
fmt.Printf("Missing ValueSet `%s`.\n", url)
os.Exit(1)
}
valueSet, err := fhir.UnmarshalValueSet(bytes)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if valueSet.Name == nil {
fmt.Println("Skip ValueSet without name.")
continue
}
if !namePattern.MatchString(*valueSet.Name) {
fmt.Printf("Skip ValueSet with non-conforming name `%s`.\n", *valueSet.Name)
continue
}
goFile, err := generateValueSet(resources, valueSet)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
err = goFile.Save(FirstLower(*valueSet.Name) + ".go")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
},
}
func FirstLower(s string) string {
return ToLower(s[:1]) + s[1:]
}
func generateTypes(resources ResourceMap, alreadyGeneratedTypes map[string]bool, types map[string]bool, requiredValueSetBindings map[string]bool) error {
moreRequiredTypes := make(map[string]bool, 0)
for name := range types {
bytes := resources["StructureDefinition"][name]
if bytes == nil {
return fmt.Errorf("missing StructureDefinition with name `%s`", name)
}
structureDefinition, err := fhir.UnmarshalStructureDefinition(bytes)
if err != nil {
return err
}
goFile, err := generateResourceOrType(resources, moreRequiredTypes, requiredValueSetBindings, structureDefinition)
if err != nil {
return err
}
err = goFile.Save(FirstLower(structureDefinition.Name) + ".go")
if err != nil {
return err
}
alreadyGeneratedTypes[name] = true
}
for name := range alreadyGeneratedTypes {
delete(moreRequiredTypes, name)
}
if len(moreRequiredTypes) > 0 {
return generateTypes(resources, alreadyGeneratedTypes, moreRequiredTypes, requiredValueSetBindings)
}
return nil
}
func generateResourceOrType(resources ResourceMap, requiredTypes map[string]bool, requiredValueSetBindings map[string]bool, definition fhir.StructureDefinition) (*jen.File, error) {
elementDefinitions := definition.Snapshot.Element
if len(elementDefinitions) == 0 {
return nil, fmt.Errorf("missing element definitions in structure definition `%s`", definition.Name)
}
fmt.Printf("Generate Go sources for StructureDefinition: %s\n", definition.Name)
file := jen.NewFile("fhir")
appendLicenseComment(file)
appendGeneratorComment(file)
// generate structs
file.Commentf("%s is documented here %s", definition.Name, definition.Url)
var err error
file.Type().Id(definition.Name).StructFunc(func(rootStruct *jen.Group) {
_, err = appendFields(resources, requiredTypes, requiredValueSetBindings, file, rootStruct, definition.Name, elementDefinitions, 1, 1)
})
if err != nil {
return nil, err
}
// generate marshal
if definition.Kind == fhir.StructureDefinitionKindResource {
file.Type().Id("Other" + definition.Name).Id(definition.Name)
file.Commentf("MarshalJSON marshals the given %s as JSON into a byte slice", definition.Name)
file.Func().Params(jen.Id("r").Id(definition.Name)).Id("MarshalJSON").Params().
Params(jen.Op("[]").Byte(), jen.Error()).Block(
jen.Return().Qual("encoding/json", "Marshal").Call(jen.Struct(
jen.Id("Other"+definition.Name),
jen.Id("ResourceType").String().Tag(map[string]string{"json": "resourceType"}),
).Values(jen.Dict{
jen.Id("Other" + definition.Name): jen.Id("Other" + definition.Name).Call(jen.Id("r")),
jen.Id("ResourceType"): jen.Lit(definition.Name),
})),
)
}
// generate unmarshal
if definition.Kind == fhir.StructureDefinitionKindResource {
file.Commentf("Unmarshal%s unmarshals a %s.", definition.Name, definition.Name)
file.Func().Id("Unmarshal"+definition.Name).
Params(jen.Id("b").Op("[]").Byte()).
Params(jen.Id(definition.Name), jen.Error()).
Block(
jen.Var().Id(FirstLower(definition.Name)).Id(definition.Name),
jen.If(
jen.Err().Op(":=").Qual("encoding/json", "Unmarshal").Call(
jen.Id("b"),
jen.Op("&").Id(FirstLower(definition.Name)),
),
jen.Err().Op("!=").Nil(),
).Block(
jen.Return(jen.Id(FirstLower(definition.Name)), jen.Err()),
),
jen.Return(jen.Id(FirstLower(definition.Name)), jen.Nil()),
)
}
return file, nil
}
func appendLicenseComment(file *jen.File) {
for _, line := range licenseComment {
file.HeaderComment(line)
}
}
func appendGeneratorComment(file *jen.File) {
file.Comment("// THIS FILE IS GENERATED BY https://github.com/samply/golang-fhir-models\n// PLEASE DO NOT EDIT BY HAND\n")
}
func appendFields(resources ResourceMap, requiredTypes map[string]bool, requiredValueSetBindings map[string]bool,
file *jen.File, fields *jen.Group, parentName string, elementDefinitions []fhir.ElementDefinition, start,
level int) (int, error) {
//fmt.Printf("appendFields parentName=%s, start=%d, level=%d\n", parentName, start, level)
for i := start; i < len(elementDefinitions); i++ {
element := elementDefinitions[i]
pathParts := Split(element.Path, ".")
if len(pathParts) == level+1 {
// direct childs
name := Title(pathParts[level])
// support contained resources later
if name != "Contained" {
switch len(element.Type) {
case 0:
if element.ContentReference != nil && (*element.ContentReference)[:1] == "#" {
statement := fields.Id(name)
if *element.Max == "*" {
statement.Op("[]")
} else if *element.Min == 0 {
statement.Op("*")
}
typeIdentifier := ""
for _, pathPart := range Split((*element.ContentReference)[1:], ".") {
typeIdentifier = typeIdentifier + Title(pathPart)
}
statement.Id(typeIdentifier).Tag(map[string]string{"json": pathParts[level] + ",omitempty", "bson": pathParts[level] + ",omitempty"})
}
case 1:
var err error
i, err = addFieldStatement(resources, requiredTypes, requiredValueSetBindings, file, fields,
pathParts[level], parentName, elementDefinitions, i, level, element.Type[0])
if err != nil {
return 0, err
}
default: //polymorphic type
name = Replace(pathParts[level], "[x]", "", -1)
for _, eleType := range element.Type {
name := name + Title(eleType.Code)
var err error
i, err = addFieldStatement(resources, requiredTypes, requiredValueSetBindings, file, fields,
name, parentName, elementDefinitions, i, level, eleType)
if err != nil {
return 0, err
}
}
}
}
} else {
// index of the next parent sibling
return i, nil
}
}
return 0, nil
}
func addFieldStatement(
resources ResourceMap,
requiredTypes map[string]bool,
requiredValueSetBindings map[string]bool,
file *jen.File,
fields *jen.Group,
name string,
parentName string,
elementDefinitions []fhir.ElementDefinition,
elementIndex, level int,
elementType fhir.ElementDefinitionType,
) (idx int, err error) {
fieldName := Title(name)
element := elementDefinitions[elementIndex]
statement := fields.Id(fieldName)
switch elementType.Code {
case "code":
if *element.Max == "*" {
statement.Op("[]")
} else if *element.Min == 0 {
statement.Op("*")
}
if url := requiredValueSetBinding(element); url != nil {
if bytes := resources["ValueSet"][*url]; bytes != nil {
valueSet, err := fhir.UnmarshalValueSet(bytes)
if err != nil {
return 0, err
}
if name := valueSet.Name; name != nil {
if !namePattern.MatchString(*name) {
fmt.Printf("Skip generating an enum for a ValueSet binding to `%s` because the ValueSet has a non-conforming name.\n", *name)
statement.Id("string")
} else if len(valueSet.Compose.Include) > 1 {
fmt.Printf("Skip generating an enum for a ValueSet binding to `%s` because the ValueSet includes more than one CodeSystem.\n", *valueSet.Name)
statement.Id("string")
} else if codeSystemUrl := canonical(valueSet.Compose.Include[0]); resources["CodeSystem"][codeSystemUrl] == nil {
fmt.Printf("Skip generating an enum for a ValueSet binding to `%s` because the ValueSet includes the non-existing CodeSystem with canonical URL `%s`.\n", *valueSet.Name, codeSystemUrl)
statement.Id("string")
} else {
requiredValueSetBindings[*url] = true
statement.Id(*name)
}
} else {
return 0, fmt.Errorf("missing name in ValueSet with canonical URL `%s`", *url)
}
} else {
statement.Id("string")
}
} else {
statement.Id("string")
}
case "Resource":
statement.Qual("encoding/json", "RawMessage")
default:
if *element.Max == "*" {
statement.Op("[]")
} else if *element.Min == 0 {
statement.Op("*")
}
var typeIdentifier string
if parentName == "Element" && fieldName == "Id" ||
parentName == "Extension" && fieldName == "Url" {
typeIdentifier = "string"
} else {
typeIdentifier = typeCodeToTypeIdentifier(elementType.Code)
}
if typeIdentifier == "Element" || typeIdentifier == "BackboneElement" {
backboneElementName := parentName + fieldName
statement.Id(backboneElementName)
var err error
file.Type().Id(backboneElementName).StructFunc(func(childFields *jen.Group) {
//var err error
elementIndex, err = appendFields(resources, requiredTypes, requiredValueSetBindings, file, childFields,
backboneElementName, elementDefinitions, elementIndex+1, level+1)
})
if err != nil {
return 0, err
}
elementIndex--
} else if typeIdentifier == "decimal" {
statement.Qual("encoding/json", "Number")
} else {
if unicode.IsUpper(rune(typeIdentifier[0])) {
requiredTypes[typeIdentifier] = true
}
statement.Id(typeIdentifier)
}
}
if *element.Min == 0 {
statement.Tag(map[string]string{"json": name + ",omitempty", "bson": name + ",omitempty"})
} else {
statement.Tag(map[string]string{"json": name, "bson": name})
}
return elementIndex, err
}
func requiredValueSetBinding(elementDefinition fhir.ElementDefinition) *string {
if elementDefinition.Binding != nil {
binding := *elementDefinition.Binding
if binding.Strength == fhir.BindingStrengthRequired {
return binding.ValueSet
}
}
return nil
}
func typeCodeToTypeIdentifier(typeCode string) string {
switch typeCode {
case "base64Binary":
return "string"
case "boolean":
return "bool"
case "canonical":
return "string"
case "code":
return "string"
case "date":
return "string"
case "dateTime":
return "string"
case "id":
return "string"
case "instant":
return "string"
case "integer":
return "int"
case "markdown":
return "string"
case "oid":
return "string"
case "positiveInt":
return "int"
case "string":
return "string"
case "time":
return "string"
case "unsignedInt":
return "int"
case "uri":
return "string"
case "url":
return "string"
case "uuid":
return "string"
case "xhtml":
return "string"
case "http://hl7.org/fhirpath/System.String":
return "string"
default:
return typeCode
}
}
func init() {
rootCmd.AddCommand(genResourcesCmd)
}
|
package main
import (
"bufio"
"fmt"
"net"
"github.com/cyberark/secretless-broker/pkg/secretless/log"
"github.com/cyberark/secretless-broker/pkg/secretless/plugin/connector"
)
// SingleUseConnector creates an authenticated connection to a target TCP service.
type SingleUseConnector struct {
logger log.Logger
}
// Connect is the function that implements the tcp.Connector func signature in this
// example plugin. It has access to the client connection and the credentials (as a map),
// and is expected to return the target service connection.
//
// This example connector works as follows:
// 1. Waits for the initial message from the client
// 2. Connect to a target service whose address is the value of the credential identified
// by the key "address"
// 3. Inject credentials from a credential identified by the key "auth"
// 4. Write the initial message from the client with some modification
func (connector *SingleUseConnector) Connect(
clientConn net.Conn,
credentialValuesByID connector.CredentialValuesByID,
) (net.Conn, error) {
connector.logger.Debugln("Waiting for initial write from client")
clientInitMsg, _, err := bufio.NewReader(clientConn).ReadLine()
if err != nil {
return nil, err
}
connector.logger.Debugln("Dialing target service")
conn, err := net.Dial("tcp", string(credentialValuesByID["address"]))
if err != nil {
return nil, err
}
connector.logger.Debugln("Sending packet with injected credentials to target service")
credInjectionPacket := []byte(
fmt.Sprintf(
"credential injection: %s\n",
string(credentialValuesByID["auth"]),
),
)
_, err = conn.Write(credInjectionPacket)
if err != nil {
return nil, err
}
connector.logger.Debugln("Sending modified client initial packet to target service")
initMsgPacket := []byte(
fmt.Sprintf(
"initial message from client: %s\n",
string(clientInitMsg),
),
)
_, err = conn.Write(initMsgPacket)
if err != nil {
return nil, err
}
connector.logger.Debugln("Successfully connected to target service")
return conn, nil
}
|
package main
import (
"bufio"
"encoding/csv"
"flag"
"fmt"
"io"
"os"
"strings"
"time"
)
/**
Problem: Have to close the goroutines from the previous questions, not just leave them hanging!
- Pretty sure the done goroutine is not firing
*/
func main(){
fileName := flag.String("test-file", "problems.csv", "The name of the test file")
timer := flag.Int("question-timer", 10, "How long to allow for each question")
flag.Parse()
// get file name from flags
csv, err := openTestFile(*fileName)
if err != nil {
fmt.Printf("error opening file %s: %v", *fileName, err)
return
}
runQuiz(csv, *timer)
}
func runQuiz(csv *csv.Reader, timer int) {
var totalCorrect, totalQuestions int
// if we receive on the timer channel, cancel
timerChan := make(chan struct{})
answerChan := make(chan string)
done := make(chan struct{})
// skip 0th line
csv.Read()
for {
question, answer, err := readLine(csv)
if err != nil {
if err == io.EOF {
break
} else {
// skip badly formed questions
fmt.Println(err)
continue
}
}
totalQuestions ++
fmt.Printf("Q%d: %s\n", totalQuestions, question)
go countdown(timer, timerChan, done)
go getInput(answerChan, done)
// select between the timer countdown ending or the user entering input
select {
case <- timerChan:
continue
case input := <- answerChan:
isCorrect := compareAnswers(answer, input)
showAnswer(isCorrect)
if isCorrect {
totalCorrect ++
}
}
}
fmt.Printf("\n\nQuiz Finished\n")
fmt.Printf("Your score: %d/%d\n", totalCorrect, totalQuestions)
}
func countdown(timer int, timerChan chan struct{}, doneChan chan struct{}){
ticker := time.NewTicker(1 * time.Second)
for i:=timer; i >= 0; i-- {
select {
case <- ticker.C:
continue
case <- doneChan:
ticker.Stop()
return
}
}
timerChan <- struct{}{}
ticker.Stop()
}
func readLine(csv *csv.Reader) (string, string, error) {
line, err := csv.Read()
if err != nil {
return "", "", err
}
return line[0], line[1], nil
}
func showAnswer(isCorrect bool) {
if isCorrect {
fmt.Println("Correct")
} else {
fmt.Println("Incorrect")
}
}
// read in csv file from default problems.csv
func openTestFile(filename string) (*csv.Reader, error) {
file, err := os.Open(filename)
if err != nil {
return nil, err
}
reader := csv.NewReader(bufio.NewReader(file))
return reader, nil
}
func getInput(answerChan chan string, doneChan chan struct{}) {
for {
select {
case <-doneChan:
return
default:
reader := bufio.NewReader(os.Stdin)
input, _ := reader.ReadString('\n')
answerChan <- input
}
}
}
func compareAnswers(input string, answer string) bool {
inputNormalised := strings.ToLower(strings.TrimSpace(input))
answerNormalised := strings.ToLower(strings.TrimSpace(answer))
return inputNormalised == answerNormalised
} |
package k8pool
import (
"context"
"fmt"
"reflect"
api_v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
)
type PeerInfo struct {
// (Optional) The name of the data center this peer is in. Leave blank if not using multi data center support.
DataCenter string
// (Required) The ip address of the peer
IPAddress string
// (Optional) The http address:port of the peer
HTTPAddress string
// (Optional) The grpc address:port of the peer
GRPCAddress string
// (Optional) Is true if PeerInfo is for this instance of app
IsOwner bool
}
type UpdateFunc func([]PeerInfo)
type Pool struct {
informer cache.SharedIndexInformer
client *kubernetes.Clientset
log logger
conf Config
done chan struct{}
}
type Config struct {
Logger logger
OnUpdate UpdateFunc
Namespace string
Selector string
PodIP string
PodPort string
}
func New(conf Config) (*Pool, error) {
config, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
// creates the client
client, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
pool := &Pool{
done: make(chan struct{}),
log: conf.Logger,
client: client,
conf: conf,
}
return pool, pool.start()
}
func (e *Pool) start() error {
e.informer = cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) {
options.LabelSelector = e.conf.Selector
return e.client.CoreV1().Endpoints(e.conf.Namespace).List(context.TODO(), options)
},
WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) {
options.LabelSelector = e.conf.Selector
return e.client.CoreV1().Endpoints(e.conf.Namespace).Watch(context.TODO(), options)
},
},
&api_v1.Endpoints{},
0, //Skip resync
cache.Indexers{},
)
e.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
e.log.Debugf("Queue (Add) '%s' - %s", key, err)
if err != nil {
e.log.Errorf("while calling MetaNamespaceKeyFunc(): %s", err)
return
}
},
UpdateFunc: func(obj, new interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
e.log.Debugf("Queue (Update) '%s' - %s", key, err)
if err != nil {
e.log.Errorf("while calling MetaNamespaceKeyFunc(): %s", err)
return
}
e.updatePeers()
},
DeleteFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
e.log.Debugf("Queue (Delete) '%s' - %s", key, err)
if err != nil {
e.log.Errorf("while calling MetaNamespaceKeyFunc(): %s", err)
return
}
e.updatePeers()
},
})
go e.informer.Run(e.done)
if !cache.WaitForCacheSync(e.done, e.informer.HasSynced) {
close(e.done)
return fmt.Errorf("timed out waiting for caches to sync")
}
return nil
}
func (e *Pool) updatePeers() {
e.log.Debug("Fetching peer list from endpoints API")
var peers []PeerInfo
for _, obj := range e.informer.GetStore().List() {
endpoint, ok := obj.(*api_v1.Endpoints)
if !ok {
e.log.Errorf("expected type v1.Endpoints got '%s' instead", reflect.TypeOf(obj).String())
}
for _, s := range endpoint.Subsets {
for _, addr := range s.Addresses {
// TODO(thrawn01): Might consider using the `namespace` as the `DataCenter`. We should
// do what ever k8s convention is for identifying a k8s cluster within a federated multi-data
// center setup.
peer := PeerInfo{
IPAddress: fmt.Sprintf("%s", addr.IP),
HTTPAddress: fmt.Sprintf("http://%s:%s", addr.IP, e.conf.PodPort),
GRPCAddress: fmt.Sprintf("%s:%s", addr.IP, e.conf.PodPort),
}
if addr.IP == e.conf.PodIP {
peer.IsOwner = true
}
peers = append(peers, peer)
e.log.Debugf("Peer: %+v\n", peer)
}
}
}
e.conf.OnUpdate(peers)
}
func (e *Pool) Close() {
close(e.done)
}
func isPodReady(pod *api_v1.Pod) bool {
for _, condition := range pod.Status.Conditions {
if condition.Type == api_v1.PodReady && condition.Status == api_v1.ConditionTrue {
return true
}
}
return false
}
|
package cluster
import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/cnrancher/autok3s/pkg/common"
"github.com/cnrancher/autok3s/pkg/hosts"
"github.com/cnrancher/autok3s/pkg/providers"
"github.com/cnrancher/autok3s/pkg/types"
"github.com/cnrancher/autok3s/pkg/utils"
"github.com/rancher/k3s/pkg/agent/templates"
"github.com/sirupsen/logrus"
yamlv3 "gopkg.in/yaml.v3"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var (
initCommand = "curl -sLS %s | %s K3S_TOKEN='%s' INSTALL_K3S_EXEC='server %s --node-external-ip %s %s' %s sh -"
joinCommand = "curl -sLS %s | %s K3S_URL='https://%s:6443' K3S_TOKEN='%s' INSTALL_K3S_EXEC='%s' %s sh -"
getTokenCommand = "sudo cat /var/lib/rancher/k3s/server/node-token"
catCfgCommand = "sudo cat /etc/rancher/k3s/k3s.yaml"
dockerCommand = "if ! type docker; then curl -sSL %s | sh - %s; fi"
deployUICommand = "echo \"%s\" | base64 -d | sudo tee \"%s/ui.yaml\""
masterUninstallCommand = "sh /usr/local/bin/k3s-uninstall.sh"
workerUninstallCommand = "sh /usr/local/bin/k3s-agent-uninstall.sh"
registryPath = "/etc/rancher/k3s"
)
func (p *ProviderBase) InitK3sCluster(cluster *types.Cluster) error {
p.Logger.Infof("[%s] executing init k3s cluster logic...", p.Provider)
provider, err := providers.GetProvider(p.Provider)
if err != nil {
return err
}
k3sScript := cluster.InstallScript
k3sMirror := cluster.Mirror
dockerMirror := cluster.DockerMirror
if cluster.Token == "" {
token, err := utils.RandomToken(16)
if err != nil {
return err
}
cluster.Token = token
}
if len(cluster.MasterNodes) <= 0 || len(cluster.MasterNodes[0].InternalIPAddress) <= 0 {
return errors.New("[cluster] master node internal ip address can not be empty")
}
publicIP := cluster.IP
if cluster.IP == "" {
cluster.IP = cluster.MasterNodes[0].InternalIPAddress[0]
publicIP = cluster.MasterNodes[0].PublicIPAddress[0]
}
// append tls-sans to k3s install script:
// 1. appends from --tls-sans flags.
// 2. appends all master nodes' first public address.
var tlsSans string
p.TLSSans = append(p.TLSSans, publicIP)
for _, master := range cluster.MasterNodes {
if master.PublicIPAddress[0] != "" && master.PublicIPAddress[0] != publicIP {
p.TLSSans = append(p.TLSSans, master.PublicIPAddress[0])
}
}
for _, tlsSan := range p.TLSSans {
tlsSans = tlsSans + fmt.Sprintf(" --tls-san %s", tlsSan)
}
// save p.TlsSans to db.
cluster.TLSSans = p.TLSSans
masterExtraArgs := cluster.MasterExtraArgs
workerExtraArgs := cluster.WorkerExtraArgs
if cluster.DataStore != "" {
cluster.Cluster = false
masterExtraArgs += " --datastore-endpoint " + cluster.DataStore
}
if cluster.Network != "" {
masterExtraArgs += fmt.Sprintf(" --flannel-backend=%s", cluster.Network)
}
if cluster.ClusterCidr != "" {
masterExtraArgs += " --cluster-cidr " + cluster.ClusterCidr
}
p.Logger.Infof("[%s] creating k3s master-%d...", p.Provider, 1)
master0ExtraArgs := masterExtraArgs
providerExtraArgs := provider.GenerateMasterExtraArgs(cluster, cluster.MasterNodes[0])
if providerExtraArgs != "" {
master0ExtraArgs += providerExtraArgs
}
if cluster.Cluster {
master0ExtraArgs += " --cluster-init"
}
if err := p.initMaster(k3sScript, k3sMirror, dockerMirror, tlsSans, publicIP, master0ExtraArgs, cluster, cluster.MasterNodes[0]); err != nil {
return err
}
p.Logger.Infof("[%s] successfully created k3s master-%d", p.Provider, 1)
for i, master := range cluster.MasterNodes {
// skip first master nodes.
if i == 0 {
continue
}
p.Logger.Infof("[%s] creating k3s master-%d...", p.Provider, i+1)
masterNExtraArgs := masterExtraArgs
providerExtraArgs := provider.GenerateMasterExtraArgs(cluster, master)
if providerExtraArgs != "" {
masterNExtraArgs += providerExtraArgs
}
if err := p.initAdditionalMaster(k3sScript, k3sMirror, dockerMirror, tlsSans, publicIP, masterNExtraArgs, cluster, master); err != nil {
return err
}
p.Logger.Infof("[%s] successfully created k3s master-%d", p.Provider, i+1)
}
workerErrChan := make(chan error)
workerWaitGroupDone := make(chan bool)
workerWaitGroup := &sync.WaitGroup{}
workerWaitGroup.Add(len(cluster.WorkerNodes))
for i, worker := range cluster.WorkerNodes {
go func(i int, worker types.Node) {
p.Logger.Infof("[%s] creating k3s worker-%d...", p.Provider, i+1)
extraArgs := workerExtraArgs
providerExtraArgs := provider.GenerateWorkerExtraArgs(cluster, worker)
if providerExtraArgs != "" {
extraArgs += providerExtraArgs
}
p.initWorker(workerWaitGroup, workerErrChan, k3sScript, k3sMirror, dockerMirror, extraArgs, cluster, worker)
p.Logger.Infof("[%s] successfully created k3s worker-%d", p.Provider, i+1)
}(i, worker)
}
go func() {
workerWaitGroup.Wait()
close(workerWaitGroupDone)
}()
select {
case <-workerWaitGroupDone:
break
case err := <-workerErrChan:
return err
}
// get k3s cluster config.
cfg, err := p.execute(&cluster.MasterNodes[0], []string{catCfgCommand})
if err != nil {
return err
}
p.Logger.Infof("[%s] deploying additional manifests", p.Provider)
// deploy additional UI manifests.
if cluster.UI {
if _, err := p.execute(&cluster.MasterNodes[0], []string{fmt.Sprintf(deployUICommand,
base64.StdEncoding.EncodeToString([]byte(dashboardTmpl)), common.K3sManifestsDir)}); err != nil {
return err
}
}
p.Logger.Infof("[%s] successfully deployed additional manifests", p.Provider)
// merge current cluster to kube config.
if err := SaveCfg(cfg, publicIP, cluster.ContextName); err != nil {
return err
}
_ = os.Setenv(clientcmd.RecommendedConfigPathEnvVar, fmt.Sprintf("%s/%s", common.CfgPath, common.KubeCfgFile))
cluster.Status.Status = common.StatusRunning
// write current cluster to state file.
// native provider no need to operate .state file.
if p.Provider != "native" {
if err := common.DefaultDB.SaveCluster(cluster); err != nil {
return err
}
}
p.Logger.Infof("[%s] successfully executed init k3s cluster logic", p.Provider)
return nil
}
func (p *ProviderBase) Join(merged, added *types.Cluster) error {
p.Logger.Infof("[%s] executing join k3s node logic", merged.Provider)
provider, err := providers.GetProvider(merged.Provider)
if err != nil {
return err
}
k3sScript := merged.InstallScript
k3sMirror := merged.Mirror
dockerMirror := merged.DockerMirror
if merged.IP == "" {
if len(merged.MasterNodes) <= 0 || len(merged.MasterNodes[0].InternalIPAddress) <= 0 {
return errors.New("[cluster] master node internal ip address can not be empty")
}
merged.IP = merged.MasterNodes[0].InternalIPAddress[0]
}
// get cluster token from `--ip` address.
if merged.Token == "" {
serverNode := types.Node{}
if len(added.MasterNodes) > 0 {
serverNode = added.MasterNodes[0]
} else {
serverNode = added.WorkerNodes[0]
}
serverNode.PublicIPAddress = []string{merged.IP}
token, err := p.execute(&serverNode, []string{getTokenCommand})
if err != nil {
return err
}
merged.Token = strings.TrimSpace(token)
}
if merged.Token == "" {
return errors.New("[cluster] k3s token can not be empty")
}
// append tls-sans to k3s install script:
// 1. appends from --tls-sans flags.
// 2. appends all master nodes' first public address.
var tlsSans string
for _, master := range added.MasterNodes {
if master.PublicIPAddress[0] != "" {
merged.TLSSans = append(merged.TLSSans, master.PublicIPAddress[0])
}
}
for _, tlsSan := range merged.TLSSans {
tlsSans = tlsSans + fmt.Sprintf(" --tls-san %s", tlsSan)
}
errChan := make(chan error)
waitGroupDone := make(chan bool)
waitGroup := &sync.WaitGroup{}
waitGroup.Add(len(added.WorkerNodes))
for i := 0; i < len(added.Status.MasterNodes); i++ {
for _, full := range merged.MasterNodes {
extraArgs := merged.MasterExtraArgs
if added.Status.MasterNodes[i].InstanceID == full.InstanceID {
p.Logger.Infof("[%s] joining k3s master-%d...", merged.Provider, i+1)
additionalExtraArgs := provider.GenerateMasterExtraArgs(added, full)
if additionalExtraArgs != "" {
extraArgs += additionalExtraArgs
}
if err := p.joinMaster(k3sScript, k3sMirror, dockerMirror, extraArgs, tlsSans, merged, full); err != nil {
return err
}
p.Logger.Infof("[%s] successfully joined k3s master-%d", merged.Provider, i+1)
break
}
}
}
for i := 0; i < len(added.Status.WorkerNodes); i++ {
for _, full := range merged.WorkerNodes {
extraArgs := merged.WorkerExtraArgs
if added.Status.WorkerNodes[i].InstanceID == full.InstanceID {
go func(i int, full types.Node) {
p.Logger.Infof("[%s] joining k3s worker-%d...", merged.Provider, i+1)
additionalExtraArgs := provider.GenerateWorkerExtraArgs(added, full)
if additionalExtraArgs != "" {
extraArgs += additionalExtraArgs
}
p.joinWorker(waitGroup, errChan, k3sScript, k3sMirror, dockerMirror, extraArgs, merged, full)
p.Logger.Infof("[%s] successfully joined k3s worker-%d", merged.Provider, i+1)
}(i, full)
break
}
}
}
go func() {
waitGroup.Wait()
close(waitGroupDone)
}()
select {
case <-waitGroupDone:
break
case err := <-errChan:
return err
}
// sync master & worker numbers.
merged.Master = strconv.Itoa(len(merged.MasterNodes))
merged.Worker = strconv.Itoa(len(merged.WorkerNodes))
merged.Status.Status = common.StatusRunning
// write current cluster to state file.
// native provider no need to operate .state file.
if p.Provider != "native" {
if err = common.DefaultDB.SaveCluster(merged); err != nil {
p.Logger.Errorf("failed to save cluster state: %v", err)
return nil
}
}
p.Logger.Infof("[%s] successfully executed join k3s node logic", merged.Provider)
return nil
}
func SSHK3sNode(ip string, cluster *types.Cluster, ssh *types.SSH) error {
var node types.Node
for _, n := range cluster.Status.MasterNodes {
if n.PublicIPAddress[0] == ip || n.InstanceID == ip {
node = n
break
}
}
for _, n := range cluster.Status.WorkerNodes {
if n.PublicIPAddress[0] == ip || n.InstanceID == ip {
node = n
break
}
}
if ssh.SSHUser != "" {
node.SSH.SSHUser = ssh.SSHUser
}
if ssh.SSHPort != "" {
node.SSH.SSHPort = ssh.SSHPort
}
if ssh.SSHPassword != "" {
node.SSH.SSHPassword = ssh.SSHPassword
}
if ssh.SSHKeyPath != "" {
node.SSH.SSHKeyPath = ssh.SSHKeyPath
}
if ssh.SSHCert != "" {
node.SSH.SSHCert = ssh.SSHCert
}
if ssh.SSHCertPath != "" {
node.SSH.SSHCertPath = ssh.SSHCertPath
}
if ssh.SSHKeyPassphrase != "" {
node.SSH.SSHKeyPassphrase = ssh.SSHKeyPassphrase
}
if ssh.SSHAgentAuth {
node.SSH.SSHAgentAuth = ssh.SSHAgentAuth
}
if node.PublicIPAddress == nil {
node.PublicIPAddress = []string{ip}
}
if node.SSH.SSHPort == "" {
node.SSH.SSHPort = "22"
}
// preCheck ssh config
if node.SSH.SSHUser == "" || (node.SSH.SSHPassword == "" && node.SSH.SSHKeyPath == "") {
return fmt.Errorf("couldn't ssh to chosen node with current ssh config: --ssh-user %s --ssh-port %s --ssh-password %s --ssh-key-path %s", node.SSH.SSHUser, node.SSH.SSHPort, node.SSH.SSHPassword, node.SSH.SSHKeyPath)
}
return terminal(&node)
}
func (p *ProviderBase) UninstallK3sNodes(nodes []types.Node) (warnMsg []string) {
for _, node := range nodes {
if node.Master {
_, e := p.execute(&node, []string{masterUninstallCommand})
if e != nil {
warnMsg = append(warnMsg, fmt.Sprintf("failed to uninstall k3s on master node %s: %s", node.InstanceID, e.Error()))
}
} else {
_, e := p.execute(&node, []string{workerUninstallCommand})
if e != nil {
warnMsg = append(warnMsg, fmt.Sprintf("failed to uninstall k3s on worker node %s: %s", node.InstanceID, e.Error()))
}
}
}
return
}
func SaveCfg(cfg, ip, context string) error {
replacer := strings.NewReplacer(
"127.0.0.1", ip,
"localhost", ip,
"default", context,
)
result := replacer.Replace(cfg)
tempPath := fmt.Sprintf("%s/.kube", common.CfgPath)
if err := utils.EnsureFolderExist(tempPath); err != nil {
return fmt.Errorf("[cluster] generate kubecfg temp folder error, msg: %s", err)
}
temp, err := ioutil.TempFile(tempPath, common.KubeCfgTempName)
if err != nil {
return fmt.Errorf("[cluster] generate kubecfg temp file error, msg: %s", err)
}
defer func() {
_ = temp.Close()
}()
absPath, _ := filepath.Abs(temp.Name())
if err = ioutil.WriteFile(absPath, []byte(result), 0600); err != nil {
return fmt.Errorf("[cluster] write content to kubecfg temp file error: %v", err)
}
return mergeCfg(context, temp.Name())
}
func OverwriteCfg(context string) error {
path := fmt.Sprintf("%s/%s", common.CfgPath, common.KubeCfgFile)
_ = os.Setenv(clientcmd.RecommendedConfigPathEnvVar, path)
fMgr := &common.ConfigFileManager{}
return fMgr.OverwriteCfg(path, context, fMgr.RemoveCfg)
}
func (p *ProviderBase) DeployExtraManifest(cluster *types.Cluster, cmds []string) error {
if _, err := p.execute(&cluster.MasterNodes[0], cmds); err != nil {
return err
}
return nil
}
func (p *ProviderBase) initMaster(k3sScript, k3sMirror, dockerMirror, tlsSans, ip, extraArgs string, cluster *types.Cluster, master types.Node) error {
if strings.Contains(extraArgs, "--docker") {
p.Logger.Infof("[cluster] install docker command %s", fmt.Sprintf(dockerCommand, cluster.DockerScript, dockerMirror))
if _, err := p.execute(&master, []string{fmt.Sprintf(dockerCommand, cluster.DockerScript, dockerMirror)}); err != nil {
return err
}
}
if cluster.Registry != "" || cluster.RegistryContent != "" {
if err := p.handleRegistry(&master, cluster); err != nil {
return err
}
}
p.Logger.Infof("[cluster] k3s master command: %s", fmt.Sprintf(initCommand, k3sScript, k3sMirror, cluster.Token,
tlsSans, ip, strings.TrimSpace(extraArgs), genK3sVersion(cluster.K3sVersion, cluster.K3sChannel)))
if _, err := p.execute(&master, []string{fmt.Sprintf(initCommand, k3sScript, k3sMirror,
cluster.Token, tlsSans, ip, strings.TrimSpace(extraArgs), genK3sVersion(cluster.K3sVersion, cluster.K3sChannel))}); err != nil {
return err
}
return nil
}
func (p *ProviderBase) initAdditionalMaster(k3sScript, k3sMirror, dockerMirror, tlsSans, ip, extraArgs string, cluster *types.Cluster, master types.Node) error {
sortedExtraArgs := ""
if strings.Contains(extraArgs, "--docker") {
if _, err := p.execute(&master, []string{fmt.Sprintf(dockerCommand, cluster.DockerScript, dockerMirror)}); err != nil {
return err
}
}
if cluster.Registry != "" || cluster.RegistryContent != "" {
if err := p.handleRegistry(&master, cluster); err != nil {
return err
}
}
if !strings.Contains(extraArgs, "server --server") {
sortedExtraArgs += fmt.Sprintf(" server --server %s %s --node-external-ip %s", fmt.Sprintf("https://%s:6443", ip), tlsSans, master.PublicIPAddress[0])
}
sortedExtraArgs += " " + extraArgs
p.Logger.Infof("[cluster] k3s additional master command: %s", fmt.Sprintf(joinCommand, k3sScript, k3sMirror,
ip, cluster.Token, strings.TrimSpace(sortedExtraArgs), genK3sVersion(cluster.K3sVersion, cluster.K3sChannel)))
if _, err := p.execute(&master, []string{fmt.Sprintf(joinCommand, k3sScript, k3sMirror, ip,
cluster.Token, strings.TrimSpace(sortedExtraArgs), genK3sVersion(cluster.K3sVersion, cluster.K3sChannel))}); err != nil {
return err
}
return nil
}
func (p *ProviderBase) initWorker(wg *sync.WaitGroup, errChan chan error, k3sScript, k3sMirror, dockerMirror, extraArgs string,
cluster *types.Cluster, worker types.Node) {
sortedExtraArgs := ""
if strings.Contains(extraArgs, "--docker") {
if _, err := p.execute(&worker, []string{fmt.Sprintf(dockerCommand, cluster.DockerScript, dockerMirror)}); err != nil {
errChan <- err
}
}
if cluster.Registry != "" || cluster.RegistryContent != "" {
if err := p.handleRegistry(&worker, cluster); err != nil {
errChan <- err
}
}
sortedExtraArgs += fmt.Sprintf(" --node-external-ip %s", worker.PublicIPAddress[0])
sortedExtraArgs += " " + extraArgs
p.Logger.Infof("[cluster] k3s worker command: %s", fmt.Sprintf(joinCommand, k3sScript, k3sMirror, cluster.IP,
cluster.Token, strings.TrimSpace(sortedExtraArgs), genK3sVersion(cluster.K3sVersion, cluster.K3sChannel)))
if _, err := p.execute(&worker, []string{fmt.Sprintf(joinCommand, k3sScript, k3sMirror, cluster.IP,
cluster.Token, strings.TrimSpace(sortedExtraArgs), genK3sVersion(cluster.K3sVersion, cluster.K3sChannel))}); err != nil {
errChan <- err
}
wg.Done()
}
func (p *ProviderBase) joinMaster(k3sScript, k3sMirror, dockerMirror,
extraArgs, tlsSans string, merged *types.Cluster, full types.Node) error {
sortedExtraArgs := ""
if !strings.Contains(extraArgs, "server --server") {
sortedExtraArgs += fmt.Sprintf(" server --server %s %s --node-external-ip %s", fmt.Sprintf("https://%s:6443", merged.IP), tlsSans, full.PublicIPAddress[0])
}
if merged.DataStore != "" {
sortedExtraArgs += " --datastore-endpoint " + merged.DataStore
}
if merged.ClusterCidr != "" {
sortedExtraArgs += " --cluster-cidr " + merged.ClusterCidr
}
if strings.Contains(extraArgs, "--docker") {
if _, err := p.execute(&full, []string{fmt.Sprintf(dockerCommand, merged.DockerScript, dockerMirror)}); err != nil {
return err
}
}
if merged.Registry != "" || merged.RegistryContent != "" {
if err := p.handleRegistry(&full, merged); err != nil {
return err
}
}
sortedExtraArgs += " " + extraArgs
p.Logger.Infof("[cluster] k3s master command: %s", fmt.Sprintf(joinCommand, k3sScript, k3sMirror, merged.IP,
merged.Token, strings.TrimSpace(sortedExtraArgs), genK3sVersion(merged.K3sVersion, merged.K3sChannel)))
// for now, use the workerCommand to join the additional master server node.
if _, err := p.execute(&full, []string{fmt.Sprintf(joinCommand, k3sScript, k3sMirror, merged.IP,
merged.Token, strings.TrimSpace(sortedExtraArgs), genK3sVersion(merged.K3sVersion, merged.K3sChannel))}); err != nil {
return err
}
return nil
}
func (p *ProviderBase) joinWorker(wg *sync.WaitGroup, errChan chan error, k3sScript, k3sMirror, dockerMirror, extraArgs string,
merged *types.Cluster, full types.Node) {
sortedExtraArgs := ""
if strings.Contains(extraArgs, "--docker") {
if _, err := p.execute(&full, []string{fmt.Sprintf(dockerCommand, merged.DockerScript, dockerMirror)}); err != nil {
errChan <- err
}
}
if merged.Registry != "" || merged.RegistryContent != "" {
if err := p.handleRegistry(&full, merged); err != nil {
errChan <- err
}
}
sortedExtraArgs += fmt.Sprintf(" --node-external-ip %s", full.PublicIPAddress[0])
sortedExtraArgs += " " + extraArgs
p.Logger.Infof("[cluster] k3s worker command: %s", fmt.Sprintf(joinCommand, k3sScript, k3sMirror, merged.IP,
merged.Token, strings.TrimSpace(sortedExtraArgs), genK3sVersion(merged.K3sVersion, merged.K3sChannel)))
if _, err := p.execute(&full, []string{fmt.Sprintf(joinCommand, k3sScript, k3sMirror, merged.IP,
merged.Token, strings.TrimSpace(sortedExtraArgs), genK3sVersion(merged.K3sVersion, merged.K3sChannel))}); err != nil {
errChan <- err
}
wg.Done()
}
func (p *ProviderBase) execute(n *types.Node, cmds []string) (string, error) {
if len(cmds) <= 0 {
return "", nil
}
dialer, err := hosts.NewSSHDialer(n, true)
if err != nil {
return "", err
}
defer func() {
_ = dialer.Close()
}()
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
dialer.SetStdio(&stdout, &stderr, nil).SetWriter(p.Logger.Out)
for _, cmd := range cmds {
dialer.Cmd(cmd)
}
if err := dialer.Run(); err != nil {
return "", fmt.Errorf("%w: %s", err, stderr.String())
}
return stdout.String(), nil
}
func terminal(n *types.Node) error {
dialer, err := hosts.NewSSHDialer(n, true)
if err != nil {
return err
}
defer func() {
_ = dialer.Close()
}()
dialer.SetStdio(os.Stdout, os.Stderr, os.Stdin)
return dialer.Terminal()
}
func mergeCfg(context, tempFile string) error {
defer func() {
if err := os.Remove(tempFile); err != nil {
logrus.Errorf("[cluster] remove kubecfg temp file error, msg: %s", err)
}
_ = os.Setenv(clientcmd.RecommendedConfigPathEnvVar, fmt.Sprintf("%s/%s", common.CfgPath, common.KubeCfgFile))
}()
kubeConfigPath := fmt.Sprintf("%s/%s", common.CfgPath, common.KubeCfgFile)
_ = os.Setenv(clientcmd.RecommendedConfigPathEnvVar, kubeConfigPath)
fMgr := &common.ConfigFileManager{}
_ = fMgr.OverwriteCfg(kubeConfigPath, context, fMgr.RemoveCfg)
mergeKubeConfigENV := fmt.Sprintf("%s:%s", kubeConfigPath, tempFile)
_ = os.Setenv(clientcmd.RecommendedConfigPathEnvVar, mergeKubeConfigENV)
return fMgr.OverwriteCfg(fmt.Sprintf("%s/%s", common.CfgPath, common.KubeCfgFile), context, fMgr.MergeCfg)
}
func genK3sVersion(version, channel string) string {
if version != "" {
return fmt.Sprintf("INSTALL_K3S_VERSION='%s'", version)
}
return fmt.Sprintf("INSTALL_K3S_CHANNEL='%s'", channel)
}
func (p *ProviderBase) handleRegistry(n *types.Node, c *types.Cluster) (err error) {
if c.Registry == "" && c.RegistryContent == "" {
return nil
}
cmd := make([]string, 0)
cmd = append(cmd, fmt.Sprintf("sudo mkdir -p %s", registryPath))
var registry *templates.Registry
if c.Registry != "" {
registry, err = unmarshalRegistryFile(c.Registry)
if err != nil {
return err
}
} else if c.RegistryContent != "" {
registry = &templates.Registry{}
err = yamlv3.Unmarshal([]byte(c.RegistryContent), registry)
if err != nil {
return err
}
}
tls, err := registryTLSMap(registry)
if err != nil {
return err
}
if tls != nil && len(tls) > 0 {
registry, cmd, err = saveRegistryTLS(registry, tls)
if err != nil {
return err
}
}
registryContent, err := registryToString(registry)
if err != nil {
return err
}
cmd = append(cmd, fmt.Sprintf("echo \"%s\" | base64 -d | sudo tee \"/etc/rancher/k3s/registries.yaml\"",
base64.StdEncoding.EncodeToString([]byte(registryContent))))
_, err = p.execute(n, cmd)
return err
}
func unmarshalRegistryFile(file string) (*templates.Registry, error) {
registry := &templates.Registry{}
b, err := ioutil.ReadFile(file)
if err != nil {
if os.IsNotExist(err) {
return registry, nil
}
return nil, err
}
if len(b) == 0 {
return nil, fmt.Errorf("registry file %s is empty", file)
}
err = yamlv3.Unmarshal(b, registry)
if err != nil {
return nil, err
}
return registry, nil
}
func registryTLSMap(registry *templates.Registry) (m map[string]map[string][]byte, err error) {
m = make(map[string]map[string][]byte)
if registry == nil {
err = fmt.Errorf("registry is nil")
return
}
for r, c := range registry.Configs {
if _, ok := m[r]; !ok {
m[r] = map[string][]byte{}
}
if c.TLS == nil {
continue
}
if c.TLS.CertFile != "" {
b, err := ioutil.ReadFile(c.TLS.CertFile)
if err != nil {
return m, err
}
m[r]["cert"] = b
}
if c.TLS.KeyFile != "" {
b, err := ioutil.ReadFile(c.TLS.KeyFile)
if err != nil {
return m, err
}
m[r]["key"] = b
}
if c.TLS.CAFile != "" {
b, err := ioutil.ReadFile(c.TLS.CAFile)
if err != nil {
return m, err
}
m[r]["ca"] = b
}
}
return
}
func saveRegistryTLS(registry *templates.Registry, m map[string]map[string][]byte) (*templates.Registry, []string, error) {
cmd := make([]string, 0)
for r, c := range m {
if r != "" {
if _, ok := registry.Configs[r]; !ok {
return nil, cmd, fmt.Errorf("registry map is not match the struct: %s", r)
}
// i.e /etc/rancher/k3s/mycustomreg:5000/.
path := fmt.Sprintf("/etc/rancher/k3s/%s", r)
cmd = append(cmd, fmt.Sprintf("sudo mkdir -p %s", path))
for f, b := range c {
// i.e /etc/rancher/k3s/mycustomreg:5000/{ca,key,cert}.
file := fmt.Sprintf("%s/%s", path, f)
cmd = append(cmd, fmt.Sprintf("echo \"%s\" | base64 -d | sudo tee \"%s\"", base64.StdEncoding.EncodeToString(b), file))
cmd = append(cmd, fmt.Sprintf("sudo chmod 755 %s", file))
switch f {
case "cert":
registry.Configs[r].TLS.CertFile = file
case "key":
registry.Configs[r].TLS.KeyFile = file
case "ca":
registry.Configs[r].TLS.CAFile = file
}
}
}
}
return registry, cmd, nil
}
func registryToString(registry *templates.Registry) (string, error) {
if registry == nil {
return "", fmt.Errorf("can't save registry file: registry is nil")
}
b, err := yamlv3.Marshal(registry)
if err != nil {
return "", err
}
return string(b), nil
}
func buildConfigFromFlags(context, kubeconfigPath string) (*rest.Config, error) {
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath},
&clientcmd.ConfigOverrides{
CurrentContext: context,
}).ClientConfig()
}
func GetClusterConfig(name, kubeconfig string) (*kubernetes.Clientset, error) {
config, err := buildConfigFromFlags(name, kubeconfig)
if err != nil {
return nil, err
}
config.Timeout = 15 * time.Second
c, err := kubernetes.NewForConfig(config)
return c, err
}
func GetClusterStatus(c *kubernetes.Clientset) string {
_, err := c.RESTClient().Get().Timeout(15 * time.Second).RequestURI("/readyz").DoRaw(context.TODO())
if err != nil {
return types.ClusterStatusStopped
}
return types.ClusterStatusRunning
}
func GetClusterVersion(c *kubernetes.Clientset) string {
v, err := c.DiscoveryClient.ServerVersion()
if err != nil {
return types.ClusterStatusUnknown
}
return v.GitVersion
}
func DescribeClusterNodes(client *kubernetes.Clientset, instanceNodes []types.ClusterNode) ([]types.ClusterNode, error) {
// list cluster nodes.
timeout := int64(5 * time.Second)
nodeList, err := client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{TimeoutSeconds: &timeout})
if err != nil || nodeList == nil {
return nil, err
}
for _, node := range nodeList.Items {
var internalIP, hostName string
addressList := node.Status.Addresses
for _, address := range addressList {
switch address.Type {
case v1.NodeInternalIP:
internalIP = address.Address
case v1.NodeHostName:
hostName = address.Address
default:
continue
}
}
for index, n := range instanceNodes {
isCurrentInstance := false
for _, address := range n.InternalIP {
if address == internalIP {
isCurrentInstance = true
break
}
}
if !isCurrentInstance {
if n.InstanceID == node.Name {
isCurrentInstance = true
}
}
if isCurrentInstance {
n.HostName = hostName
n.Version = node.Status.NodeInfo.KubeletVersion
n.ContainerRuntimeVersion = node.Status.NodeInfo.ContainerRuntimeVersion
// get roles.
labels := node.Labels
roles := make([]string, 0)
for role := range labels {
if strings.HasPrefix(role, "node-role.kubernetes.io") {
roleArray := strings.Split(role, "/")
if len(roleArray) > 1 {
roles = append(roles, roleArray[1])
}
}
}
if len(roles) == 0 {
roles = append(roles, "<none>")
}
sort.Strings(roles)
n.Roles = strings.Join(roles, ",")
// get status.
conditions := node.Status.Conditions
for _, c := range conditions {
if c.Type == v1.NodeReady {
if c.Status == v1.ConditionTrue {
n.Status = "Ready"
} else {
n.Status = "NotReady"
}
break
}
}
instanceNodes[index] = n
break
}
}
}
return instanceNodes, nil
}
|
package main
import (
"flag"
"net"
"google.golang.org/grpc"
"google.golang.org/grpc/grpclog"
"fmt"
"github.com/lintflow/core/inspector"
pb "github.com/lintflow/core/proto"
"net/http"
_ "net/http/pprof"
)
var (
addr = flag.String(`addr`, `localhost:4568`, `address for listen service`)
lookupd = flag.String(`lookupd`, `localhost:4567`, `address for listen lookupd`)
)
func main() {
flag.Parse()
lis, err := net.Listen("tcp", *addr)
if err != nil {
grpclog.Fatalf("failed to listen: %v", err)
}
// Set up a connection to the lookupd services
conn, err := grpc.Dial(*lookupd, grpc.WithInsecure())
if err != nil {
grpclog.Fatalf("failed to listen lookupd: %v", err)
}
defer conn.Close()
grpcServer := grpc.NewServer()
defer grpcServer.Stop()
go http.ListenAndServe(fmt.Sprintf(":%d", 36663), nil)
pb.RegisterInspectorServiceServer(grpcServer, inspector.New(pb.NewLookupdServiceClient(conn)))
grpcServer.Serve(lis)
}
|
package main
import (
"fmt"
"log"
"os"
"os/signal"
"syscall"
"time"
"github.com/CharlesHolbrow/gm"
"github.com/CharlesHolbrow/m"
"github.com/rakyll/portmidi"
)
func main() {
if err := portmidi.Initialize(); err != nil {
panic("Error initializing portmidi: " + err.Error())
}
out, err := portmidi.NewOutputStream(2, 1024, 0)
if err != nil {
log.Fatal(err)
}
// Reason doesn't obey cc123. Send a note off message to every note.
// BUG(charles) output stream should be locked with a mutex
go func() {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT)
// Block until a signal is received.
<-c
fmt.Println("\nSending note-off event on each midi note")
for ch := uint8(0); ch <= 15; ch++ {
for n := uint8(0); n <= 127; n++ {
out.WriteShort(gm.Note{Note: n, Ch: ch}.Midi())
}
}
time.Sleep(10 * time.Millisecond)
portmidi.Terminate()
os.Exit(0)
}()
chords := []m.NoteGroup{
// m.Sus4Triad(m.F4),h\
m.MajorChord(m.G4),
m.MinorChord(m.A4),
m.MajorChord(m.F4),
m.MajorChord(m.C4),
}
progression := []m.NoteGroup{
// m.Sus4Triad(m.F4),h\
m.MajorChord(m.G4),
m.MinorChord(m.A4),
m.MajorChord(m.F4),
m.Sus4Chord(m.A4),
// Sus4s
m.Sus4Chord(m.A4).AllOctaves().Over(m.G3 + 1).Under(m.G5),
m.Sus4Chord(m.A4).AllOctaves().Over(m.A3 + 1).Under(m.G5),
m.Sus4Chord(m.A4).AllOctaves().Over(m.F3 + 1).Under(m.G5),
m.Sus4Chord(m.A4).AllOctaves().Over(m.G3 + 1).Under(m.G5),
// final
m.DominantSeventh(m.A4),
}
bassProg := []*m.Sequence{
// 4
Bass(m.G3),
Bass(m.A3),
Bass(m.F3),
Bass(m.G3),
// 4
Bass(m.G3),
Bass(m.A3),
Bass(m.F3),
Bass(m.G3),
// 4
Bass(m.D4),
}
main := m.NewSequence()
// Bass sequence
minor := progression[1]
root := minor[0]
sus4 := m.Sus4Triad(root)
d7 := m.DominantSeventh(root)
bass := Bass(root - 12)
oneKick := Kick(1, 1)
twoKick := Kick(2, 1)
// Begin Copying Sequences to Main
// Progression
for _, chord := range progression[:4] {
main.CopyFrom(ChordProgression([]m.NoteGroup{chord}, 1))
main.CopyFrom(twoKick)
main.AdvanceCursor(1)
main.CopyFrom(twoKick)
main.AdvanceCursor(1)
}
//
main.Add(0, "Increasing Groups of Four A")
for i := 0; i < 4; i++ {
subdivisions := i + 1
main.Add(0, fmt.Sprintf("Groups of four: %v", subdivisions))
chips := ChipArp(m.Sus4Triad(root), subdivisions, 4)
bass := bassProg[i]
main.CopyFrom(bass, oneKick, chips)
main.AdvanceCursor(1)
}
// Measure
main.CopyFrom(bass)
main.CopyFrom(ChipArp(sus4, 6, 5))
main.CopyFrom(ChipArp(sus4.Transpose(-24), 6, 5))
main.AdvanceCursor(1)
// Measure
main.CopyFrom(Bass(d7[0] - 12 + 4))
main.CopyFrom(ChipArp(d7, 4, 4))
main.AdvanceCursor(1)
// Measure - Stabs
main.CopyFrom(oneKick)
main.CopyFrom(ChordProgression(chords[0:3], 0.75))
main.AdvanceCursor(0.75)
main.Add(0, "One")
main.CopyFrom(bass, twoKick)
main.CopyFrom(ChordProgression(chords[1:4], 0.75))
main.AdvanceCursor(0.75)
main.Add(0, "Two")
main.CopyFrom(bass, twoKick)
main.CopyFrom(ChordProgression(chords[1:4], 0.75))
main.AdvanceCursor(0.75)
main.Add(0, "Three")
main.CopyFrom(bass, twoKick)
main.CopyFrom(ChordProgression(chords, 0.75))
main.AdvanceCursor(0.75)
main.Add(0, "Increasing Groups of Four B")
for i := 0; i < 4; i++ {
subdivisions := i + i
c := ChipArp(m.Sus4Triad(root), subdivisions, 4)
b := bassProg[i]
main.CopyFrom(c, b).AdvanceCursor(1)
}
main.Add(0, "Increasing groups of four Interpolated")
for i := 1; i < 2; i++ {
c := IntArp(m.Sus4Triad(root), i, 7).CopyFrom(bass)
main.CopyFrom(c).AdvanceCursor(1)
}
// Measure 7
main.CopyFrom(oneKick)
main.CopyFrom(ChordProgression(chords[0:3], 0.75))
main.AdvanceCursor(0.75)
// Measure 8
main.CopyFrom(bass, twoKick)
main.CopyFrom(ChordProgression(chords[1:4], 0.75))
main.AdvanceCursor(0.75)
main.Add(0, "Stage 5")
main.CopyFrom(ThinArp(m.MinorTriad(root).AllOctaves().Under(root+36).Over(root), 24, 1.))
main.CopyFrom(bass)
main.AdvanceCursor(1)
for i, chord := range progression {
// bass := Bass(chord[0] - 12)
bass := bassProg[i]
main.CopyFrom(ChordProgression([]m.NoteGroup{chord}, 1))
main.CopyFrom(bass, twoKick)
main.AdvanceCursor(1)
main.CopyFrom(bass, twoKick)
main.AdvanceCursor(1)
}
// Progression
for _, chord := range progression[:4] {
main.CopyFrom(ChordProgression([]m.NoteGroup{chord}, 1))
main.CopyFrom(ChipArp(chord, 3, 4))
main.CopyFrom(twoKick)
main.AdvanceCursor(1)
main.CopyFrom(twoKick)
main.AdvanceCursor(1)
}
//
main.Add(0, "Increasing Groups of Four A")
for i := 0; i < 4; i++ {
subdivisions := i + 1
// Ingredients
chord := m.Sus4Triad(root)
chips := ChipArp(chord, subdivisions, 4)
pad := ChordProgression([]m.NoteGroup{chord}, 1)
bass := bassProg[i]
//
main.CopyFrom(bass, twoKick, chips, pad)
main.Add(0, fmt.Sprintf("Groups of four: %v", subdivisions))
main.AdvanceCursor(1)
}
// Measure
main.CopyFrom(bass, oneKick)
main.CopyFrom(ChipArp(sus4, 6, 5))
main.CopyFrom(ChordProgression([]m.NoteGroup{sus4}, 0.5))
main.CopyFrom(ChipArp(sus4.Transpose(-24), 6, 5))
main.AdvanceCursor(1)
// Measure
main.CopyFrom(oneKick)
main.CopyFrom(Bass(d7[0] - 12 + 4))
main.CopyFrom(ChordProgression([]m.NoteGroup{d7}, 0.5))
main.CopyFrom(ChipArp(d7, 4, 4))
main.AdvanceCursor(1)
// Measure - Stabs
main.Add(0, "Zero")
main.CopyFrom(oneKick)
main.CopyFrom(ChordProgression(chords[0:3], 0.75))
main.AdvanceCursor(0.75)
main.Add(0, "One")
main.CopyFrom(bass, twoKick)
main.CopyFrom(ChordProgression(chords[1:4], 0.75))
main.AdvanceCursor(0.75)
main.Add(0, "Two")
main.CopyFrom(bass, twoKick)
main.CopyFrom(ChordProgression(chords[1:4], 0.75))
main.AdvanceCursor(0.75)
main.Add(0, "Three")
main.CopyFrom(bass, twoKick)
main.CopyFrom(ChordProgression(chords, 0.75))
main.AdvanceCursor(0.75)
main.Add(0, "Increasing Groups of Four")
for i := 0; i < 4; i++ {
subdivisions := i + i
chord := m.Sus4Triad(root)
c := ChipArp(chord, subdivisions, 4)
p := ChordProgression([]m.NoteGroup{chord}, 1./.8*0.75)
b := bassProg[i]
main.CopyFrom(c, b, p, oneKick).AdvanceCursor(1)
}
main.Add(0, "Increasing groups of four Interpolated B")
for i := 1; i < 2; i++ {
c := IntArp(m.Sus4Triad(root), i, 7).CopyFrom(bass)
main.CopyFrom(c).AdvanceCursor(1)
}
// Measure 7
main.CopyFrom(oneKick)
main.CopyFrom(ChordProgression(chords[0:3], 0.75))
main.AdvanceCursor(0.75)
// Measure 8
main.CopyFrom(bass, twoKick)
main.CopyFrom(ChordProgression(chords[1:4], 0.75))
main.AdvanceCursor(0.75)
main.Add(0, "Stage 5")
r := ThinArp(m.MinorTriad(root).AllOctaves().Under(root+36).Over(root), 24, 1.)
main.CopyFrom(r, bass)
main.AdvanceCursor(1)
for i, chord := range progression {
bass := bassProg[i]
main.CopyFrom(ChordProgression([]m.NoteGroup{chord}, 1))
c := ThinArp(chord.AllOctaves().Over(chord[0]).Under(chord[0]+24), 24, 0.2)
main.CopyFrom(c)
main.CopyFrom(bass, twoKick)
main.AdvanceCursor(1)
main.CopyFrom(bass, twoKick)
main.CopyFrom(ThinArp(chord.AllOctaves().Over(chord[0]-12).Under(chord[0]+24), 24, .3))
main.AdvanceCursor(1)
}
////////////////////////////////////////////////////
// repeat the whole thing with different chord group
////////////////////////////////////////////////////
chords = []m.NoteGroup{
// m.Sus4Triad(m.F4),h\
m.MajorChord(m.G4),
m.MinorChord(m.A4),
m.MajorChord(m.F4),
m.MajorChord(m.C4),
}
progression = []m.NoteGroup{
// m.Sus4Triad(m.F4),h\
m.MajorChord(m.G4),
m.MinorChord(m.A4),
m.MajorChord(m.F4),
m.MajorChord(m.C5),
// Sus4s
m.Sus4Chord(m.A4).AllOctaves().Over(m.G3 + 1).Under(m.G5),
m.Sus4Chord(m.A4).AllOctaves().Over(m.A3 + 1).Under(m.G5),
m.Sus4Chord(m.A4).AllOctaves().Over(m.F3 + 1).Under(m.G5),
m.MajorChord(m.A4).AllOctaves().Over(m.G3 + 1).Under(m.G5),
// final
m.MajorChord(m.C5),
}
bassProg = []*m.Sequence{
// 4
Bass(m.G3),
Bass(m.A3),
Bass(m.F3),
Bass(m.C3),
// 4
Bass(m.G3),
Bass(m.A3),
Bass(m.F3),
Bass(m.C3),
// 4
Bass(m.C3),
}
final := m.NewSequence()
// Bass sequence
minor = progression[1]
root = minor[0]
sus4 = m.Sus4Triad(root)
d7 = m.DominantSeventh(root)
bass = Bass(root - 12)
oneKick = Kick(1, 1)
twoKick = Kick(2, 1)
// Begin Copying Sequences to final
// Progression
for _, chord := range progression[:4] {
final.CopyFrom(ChordProgression([]m.NoteGroup{chord}, 1))
final.CopyFrom(ChipArp(chord, 3, 4))
final.CopyFrom(twoKick)
final.AdvanceCursor(1)
final.CopyFrom(twoKick)
final.AdvanceCursor(1)
}
//
final.Add(0, "Increasing Groups of Four A")
for i := 0; i < 4; i++ {
subdivisions := i + 1
// Ingredients
chord := m.Sus4Triad(root)
chips := ChipArp(chord, subdivisions, 4)
pad := ChordProgression([]m.NoteGroup{chord}, 1)
bass := bassProg[i]
//
final.CopyFrom(bass, twoKick, chips, pad)
final.Add(0, fmt.Sprintf("Groups of four: %v", subdivisions))
final.AdvanceCursor(1)
}
// Measure
final.CopyFrom(bass, oneKick)
final.CopyFrom(ChipArp(sus4, 6, 5))
final.CopyFrom(ChordProgression([]m.NoteGroup{sus4}, 0.5))
final.CopyFrom(ChipArp(sus4.Transpose(-24), 6, 5))
final.AdvanceCursor(1)
// Measure
final.CopyFrom(oneKick)
final.CopyFrom(Bass(d7[0] - 12 + 4))
final.CopyFrom(ChordProgression([]m.NoteGroup{d7}, 0.5))
final.CopyFrom(ChipArp(d7, 4, 4))
final.AdvanceCursor(1)
// Measure - Stabs
final.Add(0, "Zero")
final.CopyFrom(oneKick)
final.CopyFrom(ChordProgression(chords[0:3], 0.75))
final.AdvanceCursor(0.75)
final.Add(0, "One")
final.CopyFrom(bass, twoKick)
final.CopyFrom(ChordProgression(chords[1:4], 0.75))
final.AdvanceCursor(0.75)
final.Add(0, "Two")
final.CopyFrom(bass, twoKick)
final.CopyFrom(ChordProgression(chords[1:4], 0.75))
final.AdvanceCursor(0.75)
final.Add(0, "Three")
final.CopyFrom(bass, twoKick)
final.CopyFrom(ChordProgression(chords, 0.75))
final.AdvanceCursor(0.75)
final.Add(0, "Increasing Groups of Four")
for i := 0; i < 4; i++ {
subdivisions := i + i
chord := m.Sus4Triad(root)
c := ChipArp(chord, subdivisions, 4)
p := ChordProgression([]m.NoteGroup{chord}, 1./.8*0.75)
b := bassProg[i]
final.CopyFrom(c, b, p, oneKick).AdvanceCursor(1)
}
final.Add(0, "Increasing groups of four Interpolated B")
for i := 1; i < 2; i++ {
c := IntArp(m.Sus4Triad(root), i, 7).CopyFrom(bass)
final.CopyFrom(c).AdvanceCursor(1)
}
// Measure 7
final.CopyFrom(oneKick)
final.CopyFrom(ChordProgression(chords[0:3], 0.75))
final.AdvanceCursor(0.75)
// Measure 8
final.CopyFrom(bass, twoKick)
final.CopyFrom(ChordProgression(chords[1:4], 0.75))
final.AdvanceCursor(0.75)
final.Add(0, "Stage 5")
r = ThinArp(m.MinorTriad(root).AllOctaves().Under(root+36).Over(root), 24, 1.)
final.CopyFrom(r, bass)
final.AdvanceCursor(1)
for i, chord := range progression {
bass := bassProg[i]
final.CopyFrom(ChordProgression([]m.NoteGroup{chord}, 1))
c := ThinArp(chord.AllOctaves().Over(chord[0]).Under(chord[0]+24), 24, 0.2)
final.CopyFrom(c)
final.CopyFrom(bass, twoKick)
final.AdvanceCursor(1)
final.CopyFrom(bass, twoKick)
final.CopyFrom(ThinArp(chord.AllOctaves().Over(chord[0]-24).Under(chord[0]+24), 24, .3))
final.AdvanceCursor(1)
}
// Copy Paste from Beginning!
for event := range main.Play(time.Millisecond * 1400) {
switch e := event.(type) {
case gm.Note:
// fmt.Println(e.Midi())
out.WriteShort(e.Midi())
case gm.CC:
out.WriteShort(e.Midi())
case string:
fmt.Println(e)
}
}
for event := range final.Play(time.Millisecond * 1200) {
switch e := event.(type) {
case gm.Note:
// fmt.Println(e.Midi())
out.WriteShort(e.Midi())
case gm.CC:
out.WriteShort(e.Midi())
case string:
fmt.Println(e)
}
}
time.Sleep(10 * time.Millisecond)
}
|
package main
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/niwek/niwek-swagger/controller"
"github.com/niwek/niwek-swagger/env"
)
func main() {
router := gin.Default()
// Ping function
router.GET("/ping", func(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{
"message": "pong",
})
})
v1 := router.Group("/v1")
{
v1.POST("/user", controller.CreateUser)
v1.GET("/user/:id", controller.GetUserByID)
}
router.Run(":" + env.EnvConfig.Port)
}
|
package bytes
func ToUint16(h byte,l byte) uint16 {
return uint16(h)<<8 | uint16(l)
} |
package main
import (
"fmt"
"math"
)
// A Unit is a measure or weight unit including a slice of
// equivalent labels (the first member is the default label),
// the type of measure ('volume' or 'mass')
// and the type equivalency for the unit. Type equivalency
// is the amount of reference units in this unit. Reference
// units are milliliters for volume and grams for mass.
type Unit struct {
Labels []string `json:"labels"`
MeasureType string `json:"measureType,omitempty"`
TypeEquivalent float64 `json:"typeEquivalent"`
}
// A FoodUnit is a description of colloqial measures for a particluar
// food and a equivalent weight in grams
type FoodUnit struct {
Unit
FoodId string `json:"foodID"`
}
// A Measure is a measurement amount with an assigned
// unit.
type Measure struct {
Unit `json:"unit"`
Amount float64 `json:"amount"`
}
// This struct represents a single record of the USDA
// Weight File as described at the below. Some fields are omitted.
// http://www.ars.usda.gov/SP2UserFiles/Place/80400525/Data/SR/SR28/sr28_doc.pdf
type USDAFoodWeight struct {
FoodID string `json:"foodID"`
Amount int32 `json:"amount"`
Measure string `json:"measureLabel"` //label or description
MassEq float64 `json:"massEq"` //mass of the described measure, in grams
}
// This struct represents a single record of the USDA
// Food Description file. Some fields are omitted.
type USDAFoodDesc struct {
FoodID string `json:"foodID"`
LongDesc string `json:"longDesc"`
ShortDesc string `json:"shortDesc"`
Aliases string `json:"aliases"`
Manufacturer string `json:"manuf"`
}
// This struct represents a single record of the USDA
// Nutrient Definition file. Some fields are omitted.
type USDANutrientDef struct {
NutID string `json:"nutID"`
Unit string `json:"unit"`
Tag string `json:"tag"`
Desc string `json:"desc"`
}
// This struct represents a single record of the USDA
// Nutrient Data file. Some fields are omitted
type USDANutrientData struct {
FoodID string `json:"foodID"`
NutID string `json:"nutID"`
Amount float64 `json:"amount"`
}
var UnitReference = []Unit{
{Labels: []string{"tsp","teaspoon"}, MeasureType: "volume", TypeEquivalent: 4.92892},
{Labels: []string{"tbsp","tablespoon"}, MeasureType: "volume", TypeEquivalent: 14.7868},
{Labels: []string{"fl oz","fluid ounce"}, MeasureType: "volume", TypeEquivalent: 29.5735},
{Labels: []string{"cup","c"}, MeasureType: "volume", TypeEquivalent: 236.5882365},
{Labels: []string{"pint","pt","p"}, MeasureType: "volume", TypeEquivalent: 473.176},
{Labels: []string{"l","litre"}, MeasureType: "volume", TypeEquivalent: 1000},
{Labels: []string{"ml","mils","millilitre"}, MeasureType: "volume", TypeEquivalent: 1},
{Labels: []string{"oz","ounce"}, MeasureType: "mass", TypeEquivalent: 28.3495},
{Labels: []string{"lb","pound"}, MeasureType: "mass", TypeEquivalent: 453.592},
{Labels: []string{"stone"}, MeasureType: "mass", TypeEquivalent: 6350.29},
{Labels: []string{"kg","kilogram"}, MeasureType: "mass", TypeEquivalent: 1000},
{Labels: []string{"g","gram"}, MeasureType: "mass", TypeEquivalent: 1},
}
var UnitMap = make(map[string]Unit)
// RegisterUnit adds a Unit struct to the Map of all valid units
func RegisterUnit(u Unit){
for _, r := range u.Labels {
UnitMap[r] = u
}
}
// Check if a label describes a valid Unit
func ValidUnit(label string) bool {
if _, ok := UnitMap[label]; ok {
return true
} else {
return false
}
}
// Stringer for Measure
func (m Measure) String() string {
return fmt.Sprintf("%.2f %s", m.Amount, m.Labels[0])
}
// Convert a Measure to a target Unit and return a new Measure
// of the converted amount and unit. The converted value is rounded
// to two decimal places.
func (m Measure) Convert(target string) (new Measure,ok bool) {
if ValidUnit(target) {
newUnit := UnitMap[target]
if m.MeasureType != newUnit.MeasureType {
ok = false
} else {
new.Amount = math.Floor((m.Amount * m.TypeEquivalent * 100 + .5) / newUnit.TypeEquivalent) / 100
new.Unit = newUnit
ok = true
}
} else {
ok = false
}
return
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// +build linux
package sysutil
import (
"os"
"github.com/cockroachdb/errors"
"golang.org/x/sys/unix"
)
// CreateLargeFile creates a large file at the given path with bytes size. On
// Linux, it uses the fallocate syscall to efficiently create a file of the
// given size. On other platforms, it naively writes the specified number of
// bytes, which can take a long time.
func CreateLargeFile(path string, bytes int64) error {
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
return err
}
defer f.Close()
if err := unix.Fallocate(int(f.Fd()), 0, 0, bytes); err != nil {
return errors.Wrap(err, "fallocate")
}
return errors.Wrap(f.Sync(), "fsync")
}
|
package hello
type Servlet struct{}
|
package main
import "fmt"
func main() {
var e, max, sum int
for fmt.Scan(&e); e != 0; fmt.Scan(&e) {
if e == max {
sum++
} else if e > max {
max = e
sum = 1
}
}
fmt.Print(sum)
}
// Последовательность состоит из натуральных чисел и завершается числом 0.
// Определите количество элементов этой последовательности, которые равны ее наибольшему элементу.
|
package network
import (
"sort"
corev1 "k8s.io/api/core/v1"
)
type NetworkCache struct {
nodeNetworks map[string]*NodeNetwork
podNetworks map[string]*PodNetwork
serviceNetworks map[string]*ServiceNetwork
}
func newNetworkCache() *NetworkCache {
return &NetworkCache{
nodeNetworks: make(map[string]*NodeNetwork),
podNetworks: make(map[string]*PodNetwork),
serviceNetworks: make(map[string]*ServiceNetwork),
}
}
func (nc *NetworkCache) GetNodeNetworks() NodeNetworks {
var nodeNetworks NodeNetworks
for _, nodeNetwork := range nc.nodeNetworks {
nodeNetworks = append(nodeNetworks, nodeNetwork)
}
sort.Sort(nodeNetworks)
return nodeNetworks
}
func (nc *NetworkCache) GetPodNetworks() PodNetworks {
var podNetworks PodNetworks
for _, podNetwork := range nc.podNetworks {
podNetworks = append(podNetworks, podNetwork)
}
sort.Sort(podNetworks)
return podNetworks
}
func (nc *NetworkCache) GetServiceNetworks() ServiceNetworks {
var serviceNetworks ServiceNetworks
for _, serviceNetwork := range nc.serviceNetworks {
serviceNetworks = append(serviceNetworks, serviceNetwork)
}
sort.Sort(serviceNetworks)
return serviceNetworks
}
func (nc *NetworkCache) OnNewNode(k8snode *corev1.Node) {
if _, ok := nc.nodeNetworks[k8snode.Name]; ok {
return
}
var ip string
for _, addr := range k8snode.Status.Addresses {
if addr.Type == corev1.NodeInternalIP || addr.Type == corev1.NodeExternalIP {
if ip == "" {
ip = addr.Address
}
}
}
nn := &NodeNetwork{
Name: k8snode.Name,
IP: ip,
}
nn.SetID(GenUUID())
nc.nodeNetworks[k8snode.Name] = nn
if k8snode.Spec.PodCIDR != "" {
nc.newPodNetworks(k8snode)
}
}
func (nc *NetworkCache) newPodNetworks(k8snode *corev1.Node) {
pn := &PodNetwork{
NodeName: k8snode.Name,
PodCIDR: k8snode.Spec.PodCIDR,
PodIPs: make([]PodIP, 0),
}
pn.SetID(GenUUID())
nc.podNetworks[k8snode.Name] = pn
}
func (nc *NetworkCache) OnNewPod(k8spod *corev1.Pod) {
if k8spod.Status.PodIP == "" || k8spod.Status.Phase != corev1.PodRunning {
return
}
podNetwork, ok := nc.podNetworks[k8spod.Spec.NodeName]
if ok == false {
return
}
if k8spod.Spec.HostNetwork == false {
podNetwork.PodIPs = append(podNetwork.PodIPs, PodIP{
Namespace: k8spod.Namespace,
Name: k8spod.Name,
IP: k8spod.Status.PodIP,
})
}
}
func (nc *NetworkCache) OnNewService(k8ssvc *corev1.Service) {
sn := &ServiceNetwork{
Namespace: k8ssvc.Namespace,
Name: k8ssvc.Name,
IP: k8ssvc.Spec.ClusterIP,
}
sn.SetID(GenUUID())
nc.serviceNetworks[genServiceKey(k8ssvc)] = sn
}
func genServiceKey(k8ssvc *corev1.Service) string {
return k8ssvc.Namespace + "/" + k8ssvc.Name
}
func (nc *NetworkCache) OnDeleteNode(k8snode *corev1.Node) {
delete(nc.nodeNetworks, k8snode.Name)
delete(nc.podNetworks, k8snode.Name)
}
func (nc *NetworkCache) OnDeletePod(k8spod *corev1.Pod) {
if podNetwork, ok := nc.podNetworks[k8spod.Spec.NodeName]; ok {
for i, podIP := range podNetwork.PodIPs {
if podIP.Namespace == k8spod.Namespace && podIP.Name == k8spod.Name {
podNetwork.PodIPs = append(podNetwork.PodIPs[:i], podNetwork.PodIPs[i+1:]...)
break
}
}
}
}
func (nc *NetworkCache) OnDeleteService(k8ssvc *corev1.Service) {
delete(nc.serviceNetworks, genServiceKey(k8ssvc))
}
func (nc *NetworkCache) OnUpdateNode(k8snode *corev1.Node) {
if _, ok := nc.nodeNetworks[k8snode.Name]; ok == false {
return
}
if pn, ok := nc.podNetworks[k8snode.Name]; ok {
pn.PodCIDR = k8snode.Spec.PodCIDR
return
}
nc.newPodNetworks(k8snode)
}
func (nc *NetworkCache) OnUpdateService(k8ssvc *corev1.Service) {
nc.OnNewService(k8ssvc)
}
func (nc *NetworkCache) OnUpdatePod(k8spodOld, k8spodNew *corev1.Pod) {
if k8spodNew.Status.Phase == corev1.PodSucceeded || k8spodNew.Status.Phase == corev1.PodFailed {
nc.OnDeletePod(k8spodNew)
return
}
if k8spodOld.Status.PodIP == k8spodNew.Status.PodIP {
return
}
podNetwork, ok := nc.podNetworks[k8spodNew.Spec.NodeName]
if ok == false {
return
}
podIP := PodIP{
Namespace: k8spodNew.Namespace,
Name: k8spodNew.Name,
IP: k8spodNew.Status.PodIP,
}
for i, p := range podNetwork.PodIPs {
if p.Namespace == k8spodNew.Namespace && p.Name == k8spodNew.Name {
podNetwork.PodIPs[i] = podIP
return
}
}
podNetwork.PodIPs = append(podNetwork.PodIPs, podIP)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.