text
stringlengths
11
4.05M
package controllers import ( "fmt" "net/http" "traceip/internal/helpers" "traceip/internal/models" "traceip/internal/services" "github.com/go-redis/redis/v8" "github.com/labstack/echo" ) //IPController IP endpoints handlers type IPController struct { RedisConn *redis.Client StatisticsService *services.StatisticsService } //Trace handler func (ipc *IPController) Trace(c echo.Context) error { params := make(map[string]interface{}) if err := c.Bind(&params); err != nil { return c.JSON(http.StatusBadRequest, helpers.ErrorResponse(err.Error())) } ipAddr := fmt.Sprintf("%v", params["ip"]) ipService := services.IPService{ RedisConn: ipc.RedisConn, } countryInfo, err := ipService.Trace(ipAddr) if err != nil { return c.JSON(http.StatusBadRequest, helpers.ErrorResponse(err.Error())) } currenciesService := services.CurrenciesService{ RedisConn: ipc.RedisConn, } currencies, err := currenciesService.Get() if err != nil { return c.JSON(http.StatusBadRequest, helpers.ErrorResponse(err.Error())) } distance := helpers.DistanceToBuenosAires(countryInfo.LatLng[0], countryInfo.LatLng[1]) / 1000.0 response := traceResponse(countryInfo, currencies, distance, "USD") go ipc.StatisticsService.AddEvent(countryInfo.Alpha2Code, distance) return c.JSON(http.StatusOK, response) } func traceResponse(countryInfo *models.CountryInfo, currencies *models.Currencies, distance float64, currencyBase string) (response map[string]interface{}) { response = make(map[string]interface{}) //Monedas currenciesRate := make(map[string]string) var ref float64 = 1.0 if currencies.Base != currencyBase { ref = currencies.Rates[currencyBase] } for _, currency := range countryInfo.Currencies { if currencyCode, ok := currency["code"]; ok { currency := ref / currencies.Rates[currencyCode] conversionEntry := fmt.Sprintf("%f %s", currency, currencyBase) currenciesRate[currencyCode] = conversionEntry } } //Horarios timezoneHour := make(map[string]string) for _, timezone := range countryInfo.TimeZones { timezoneHour[timezone] = helpers.GetTime(timezone) } //Idiomas idiomas := []map[string]string{} for _, language := range countryInfo.Languages { idiomas = append(idiomas, map[string]string{ "nombre": language["name"], "nombreNativo": language["nativeName"], "isoCode": language["iso639_1"], }) } response["monedas"] = currenciesRate response["idiomas"] = idiomas response["distanciaEstimada"] = distance response["pais"] = countryInfo.Name response["isoCode"] = countryInfo.Alpha2Code response["horarios"] = timezoneHour return response }
package main import ( "github.com/astaxie/beego" _ "test_proj/email/routers" //"test_proj/email/controllers" ) func main() { beego.Run() }
package entry import ( "math" "sort" "sync" "shared/common" "shared/utility/errors" "shared/utility/transfer" ) const ( CfgWorldItemData = "cfg_world_item_data" CfgWorldItemLevelUp = "cfg_world_item_level_up" CfgWorldItemAdvance = "cfg_world_item_advance" CfgWorldItemRandAttributes = "cfg_world_item_rand_attributes" CfgWorldItemRandAttributesValue = "cfg_world_item_rand_attributes_value" ) type WorldItemEntry struct { sync.RWMutex worldItems map[int32]WorldItemData levelEXP map[int32]int32 // 等级经验 key: 等级*10+稀有度(makeLevelEXPKey), val: 经验值 strengthenDiscount []WorldItemMaterialEXPDiscount // 强化折扣 advance map[int32]WorldItemAdvance // 进阶 key: 阶级*10+稀有度(makeAdvanceKey) maxLevel map[int8]int32 // 最高等级 } type WorldItemAdvance struct { LevelLimit int32 // 等级限制 GoldCost int32 // 金币消耗 // ItemCost int32 // 素材消耗 Stage int32 // 最高阶级 } type WorldItemRandAttr struct { Probs []int32 // 权重 ValueIDs []int32 // 数值ID } type WorldItemRandAttrValue struct { Probs []int32 // 概率 Ranges [][]int32 // 范围 } type WorldItemData struct { WID int32 `src:"Id"` Rarity int32 // 品质 // UseType int32 // 使用类型 1可使用 2可出售 3可使用且可出售 4不可使用不可出售 EXP int32 `src:"UseParam"` // 用作材料的强化经验 // SellPrice string // 出售价格 // Part int32 // 装备部位 // Careers []int32 `src:"WorldItemLmt"` // 穿戴职业限制 GoldPerExp float64 `src:"GoldperExp"` // 强化1经验消耗的金币 // IsLocked bool // // PassiveID string // 被动技能和升级规则 // AttributeID string // 被动属性和升级规则 // CanProduced bool // 是否能在建筑产出 AdvanceWID map[int32]bool `ignore:"true"` // 可以用作突破的世界级道具ID } type WorldItemMaterialEXPDiscount struct { Level int32 `json:"level"` Discount float64 `json:"discount"` } func NewWorldItemEntry() *WorldItemEntry { return &WorldItemEntry{ worldItems: map[int32]WorldItemData{}, levelEXP: map[int32]int32{}, strengthenDiscount: []WorldItemMaterialEXPDiscount{}, advance: map[int32]WorldItemAdvance{}, // randAttrs: map[int32]WorldItemRandAttr{}, // randAttrsValue: map[int32]WorldItemRandAttrValue{}, } } func (e *WorldItemEntry) Check(config *Config) error { // err := e.Reload(config, global) // if err != nil { // return err // } // // // 检查是否有0经验的等级 // for _, exp := range e.levelEXP { // // 缺少exp // if exp == 0 { // // todo: 定义err // return err // } // } return nil } func (e *WorldItemEntry) Reload(config *Config) error { e.Lock() defer e.Unlock() // ------------------------------------------------------------------------------------ worldItems := map[int32]WorldItemData{} for _, breakItems := range config.CfgWorldItemDataConfig.GetAllData() { worldItem := WorldItemData{ AdvanceWID: map[int32]bool{}, } err := transfer.Transfer(breakItems, &worldItem) if err != nil { return errors.WrapTrace(err) } for _, v := range breakItems.BreakItem { worldItem.AdvanceWID[v] = true } worldItems[worldItem.WID] = worldItem } // ------------------------------------------------------------------------------------ levelEXP := map[int32]int32{} for _, levelUp := range config.CfgWorldItemLevelUpDataConfig.GetAllData() { levelEXP[e.makeLevelEXPKey(levelUp.Level, int8(levelUp.Rarity))] = levelUp.Exp } // ------------------------------------------------------------------------------------ advance := map[int32]WorldItemAdvance{} for rarity, adv := range config.CfgWorldItemAdvanceConfig.GetAllData() { if len(adv.GoldCost) != int(adv.Stage) || // len(adv.ItemCost) != int(adv.Stage) || len(adv.LevelLimit) != int(adv.Stage) { return errors.Swrapf(common.ErrCSVFormatInvalid, "CfgWorldItemRandAttributes", rarity) } for i := int32(0); i < adv.Stage; i++ { advance[e.makeAdvanceKey(i+1, int8(rarity))] = WorldItemAdvance{ LevelLimit: adv.LevelLimit[i], GoldCost: adv.GoldCost[i], // ItemCost: adv.ItemCost[i], Stage: adv.Stage, } } } // ------------------------------------------------------------------------------------ e.worldItems = worldItems e.levelEXP = levelEXP e.advance = advance e.strengthenDiscount = config.WorldItemMaterialEXPDiscount sort.Slice(e.strengthenDiscount, func(i, j int) bool { return e.strengthenDiscount[i].Level < e.strengthenDiscount[i].Level }) e.maxLevel = config.WorldItemMaxLevel return nil } func (e *WorldItemEntry) makeLevelEXPKey(level int32, rarity int8) int32 { return level*10 + int32(rarity) } func (e *WorldItemEntry) makeAdvanceKey(stage int32, rarity int8) int32 { return stage*10 + int32(rarity) } // 执行下面依赖wid的查询需要先检查wid是否存在,下面使用wid的函数不包含error的必须调用 func (e *WorldItemEntry) CheckWIDExist(wid int32) error { e.RLock() defer e.RUnlock() _, ok := e.worldItems[wid] if !ok { return errors.Swrapf(common.ErrNotFoundInCSV, CfgWorldItemData, wid) } return nil } // 检查职业是否符合 // func (e *WorldItemEntry) CheckCareer(wid, career int32) error { // e.RLock() // defer e.RUnlock() // // worldItem, ok := e.worldItems[wid] // if !ok { // return errors.Swrapf(common.ErrNotFoundInCSV, CfgWorldItemData, wid) // } // // for _, v := range worldItem.Careers { // if career == v { // return nil // } // } // // return errors.Swrapf(common.ErrWorldItemNotMatchCareer, wid, career) // } // 检查进阶素材 func (e *WorldItemEntry) CheckAdvanceMaterials(target *common.WorldItem, materials []*common.WorldItem, itemID int32) error { e.RLock() defer e.RUnlock() data, ok := e.worldItems[target.WID] if !ok { return errors.Swrapf(common.ErrNotFoundInCSV, CfgWorldItemData, target.WID) } // 检查世界级道具 for _, worldItem := range materials { // 消耗世界级道具只能用本体 if target.WID != worldItem.WID { return errors.Swrapf(common.ErrWorldItemAdvanceMaterialNotMatch, worldItem, worldItem.WID) } } // 检查升星道具 if itemID != 0 && !data.AdvanceWID[itemID] { return errors.Swrapf(common.ErrWorldItemAdvanceMaterialNotMatch, target.WID, itemID) } // 下一阶级 nextStage := target.Stage.Value() + 1 // 检查材料数量,世界级道具和升星道具有一样就可以了 if int32(len(materials)) < 1 && itemID == 0 { return errors.Swrapf(common.ErrWorldItemAdvanceMaterialNoEnough, target.WID, target.Rarity, nextStage, materials) } return nil } // 检查是否满足进阶条件 func (e *WorldItemEntry) CheckAdvanceLevel(worldItem *common.WorldItem) error { e.RLock() defer e.RUnlock() needLevel := e.advance[e.makeAdvanceKey(worldItem.Stage.Value(), worldItem.Rarity)].LevelLimit if !worldItem.Level.Enough(needLevel) { return errors.Swrapf(common.ErrWorldItemAdvanceLevelNotEnough, worldItem.WID, worldItem.Level, needLevel) } return nil } // 检查是否满足进阶条件 func (e *WorldItemEntry) CheckStageUpToLimit(worldItem *common.WorldItem) error { e.RLock() defer e.RUnlock() key := e.makeAdvanceKey(1, worldItem.Rarity) advance, ok := e.advance[key] if !ok { return errors.Swrapf(common.ErrNotFoundInCSV, CfgEquipmentAdvance, key) } if worldItem.Stage.Value() >= advance.Stage { return common.ErrWorldItemStageUpToLimit } return nil } // 检查是否满足进阶条件 func (e *WorldItemEntry) AdvanceCostGold(worldItem *common.WorldItem) (int32, error) { e.RLock() defer e.RUnlock() key := e.makeAdvanceKey(worldItem.Stage.Value()+1, worldItem.Rarity) advance, ok := e.advance[key] if !ok { return 0, errors.Swrapf(common.ErrNotFoundInCSV, CfgWorldItemAdvance, key) } return advance.GoldCost, nil } // 初始化装备表格相关数据 func (e *WorldItemEntry) NewWorldItem(id int64, wid int32) (*common.WorldItem, error) { e.RLock() defer e.RUnlock() worldItem := common.NewWorldItem(id, wid) data, ok := e.worldItems[worldItem.WID] if !ok { return nil, errors.Swrapf(common.ErrNotFoundInCSV, CfgWorldItemData, worldItem.WID) } // 随机阵营 // worldItem.Camp = int8(rand.SinglePerm(data.CampProbs)) // 稀有度 worldItem.Rarity = int8(data.Rarity) return worldItem, nil } // 同步等级并解锁随机属性 func (e *WorldItemEntry) SyncLevelAndUnlockAttr(worldItem *common.WorldItem) error { e.RLock() defer e.RUnlock() maxLevel := e.maxLevel[worldItem.Rarity] // 当装备等级小于最大等级,循环判断经验是否足够 for worldItem.Level.Value() < maxLevel { // 获取下一个等级升级经验 nextLevelEXPKey := e.makeLevelEXPKey(worldItem.Level.Value()+1, worldItem.Rarity) nextLevelEXP, ok := e.levelEXP[nextLevelEXPKey] if !ok { return errors.Swrapf(common.ErrNotFoundInCSV, CfgWorldItemLevelUp, nextLevelEXPKey) } // 升级经验不足,跳出 if !worldItem.EXP.Enough(nextLevelEXP) { break } // 升级了! worldItem.Level.Plus(1) } return nil } // 计算装备强化经验 func (e *WorldItemEntry) StrengthenEXP(target *common.WorldItem, itemEXP int32, materials []*common.WorldItem, maxLevelByTeam int32) (int32, error) { e.RLock() defer e.RUnlock() // 计算材料的强化经验值 exps, err := e.strengthenMaterialsEXP(materials) if err != nil { return 0, errors.WrapTrace(err) } // 世界级道具等级最大值上限,经验溢出不保留,世界级道具最大等级 = 阶级加成 + 队伍等级加成 // maxLevel := e.maxLevel[target.Rarity] maxLevel := e.advance[e.makeAdvanceKey(target.Stage.Value(), target.Rarity)].LevelLimit + maxLevelByTeam expNow := target.EXP.Value() var addEXP int32 = itemEXP // 强化实际增加的经验 levelEXPKey := e.makeLevelEXPKey(maxLevel, target.Rarity) // 装备最大经验,不可溢出 maxEXP, ok := e.levelEXP[levelEXPKey] if !ok { return 0, errors.Swrapf(common.ErrNotFoundInCSV, CfgWorldItemLevelUp, levelEXPKey) } // 装备等级已经可以升到上限 for _, exp := range exps { if expNow+exp > maxEXP { // 检查经验溢出了还有后续装备的话就报错 // if i != len(exps)-1 { // return 0, errors.Swrapf(common.ErrWorldItemEXPUpToLimit, i) // } // 计算溢出后真正加上的经验 addEXP += maxEXP - expNow break } else { addEXP += exp } expNow += exp } // 注释是因为当初装备强化溢出经验也要保留,现在不要了 // // 装备等级上限受到账号等级制约 // limitEXP, err := manager.CSV.WorldItem.LevelEXP(worldItem.WID, limitLevel) // if err != nil { // return 0, errors.WrapTrace(err) // } // // for i, exp := range exps { // if expNow+exp > limitEXP { // // 检查经验溢出了还有后续装备的话就报错 // if i != len(exps)-1 { // return 0, errors.WrapTrace(common.ErrWorldItemEXPUpToLimit) // } // // // 同时超过账号限制和最大等级限制的情况 // if expNow+exp > maxEXP { // // 计算溢出后真正加上的经验 // addEXP += maxEXP - expNow // break // } // // // 可以溢出 // } // // addEXP += exp // expNow += exp // } return addEXP, nil } // 计算装备强化经验 func (e *WorldItemEntry) strengthenMaterialsEXP(materials []*common.WorldItem) ([]int32, error) { exps := make([]int32, 0, len(materials)) for _, worldItem := range materials { data, ok := e.worldItems[worldItem.WID] if !ok { return nil, errors.Swrapf(common.ErrNotFoundInCSV, CfgWorldItemData, worldItem.WID) } level := worldItem.Level.Value() // 装备等级 var sum float64 // 这个装备强化经验折扣后的和\ var lastEXP float64 // 上一级强化等级经验 // 计算强化经验的折扣 for _, val := range e.strengthenDiscount { if level >= val.Level { // 全额折扣 levelEXPKey := e.makeLevelEXPKey(val.Level, worldItem.Rarity) exp, ok := e.levelEXP[levelEXPKey] // 当前等级经验 if !ok { return nil, errors.Swrapf(common.ErrNotFoundInCSV, CfgWorldItemLevelUp, levelEXPKey) } sum += (float64(exp) - lastEXP) * (val.Discount / 10000) lastEXP = float64(exp) } else { // 部分折扣 sum += (float64(worldItem.EXP.Value()) - lastEXP) * (val.Discount / 10000) break } } // 基本经验 + 强化经验 exps = append(exps, data.EXP+int32(math.Trunc(sum))) } return exps, nil } // func (e *WorldItemEntry) StrengthenGoldCost(worldItem *common.WorldItem, addEXP int32) (int32, error) { // e.RLock() // defer e.RUnlock() // // data, ok := e.worldItems[worldItem.WID] // if !ok { // return 0, errors.Swrapf(common.ErrNotFoundInCSV, CfgWorldItemData, worldItem.WID) // } // // return data.GoldPerExp * addEXP, nil // } // 装备强化金币只计算装备固定值,不计算折扣金币 func (e *WorldItemEntry) StrengthenGoldCost(worldItem *common.WorldItem, itemEXP int32, materials []*common.WorldItem) (int32, error) { e.RLock() defer e.RUnlock() data, ok := e.worldItems[worldItem.WID] if !ok { return 0, errors.Swrapf(common.ErrNotFoundInCSV, CfgWorldItemData, worldItem.WID) } costGolds := data.GoldPerExp * float64(itemEXP) for _, material := range materials { md, ok := e.worldItems[material.WID] if !ok { return 0, errors.Swrapf(common.ErrNotFoundInCSV, CfgWorldItemData, material.WID) } costGolds += data.GoldPerExp * float64(md.EXP) } return int32(math.Floor(costGolds)), nil } // 计算强化的阶级 func (e *WorldItemEntry) CalAddStage(worldItem *common.WorldItem, materials []*common.WorldItem, itemID int32) (int32, error) { e.RLock() defer e.RUnlock() key := e.makeAdvanceKey(1, worldItem.Rarity) advance, ok := e.advance[key] if !ok { return 0, errors.Swrapf(common.ErrNotFoundInCSV, CfgWorldItemAdvance, key) } var allMaterialStage int32 = 0 for _, v := range materials { allMaterialStage += v.Stage.Value() + 1 } // 升星道具 if itemID != 0 { allMaterialStage++ } var retStage = worldItem.Stage.Value() + allMaterialStage // 溢出 if retStage > advance.Stage { retStage = advance.Stage } return retStage - worldItem.Stage.Value(), nil }
package client import ( "fmt" "log" "strings" "time" "github.com/Jeffail/gabs/v2" "github.com/go-resty/resty/v2" ) var uniswapPools map[string]bool var quickswapPools map[string]bool var balancerPools map[string]bool func init() { uniswapPools = map[string]bool{} quickswapPools = map[string]bool{} balancerPools = map[string]bool{} } type PoolResponse struct { ID string Name string TVL string DailyVolume string APY float32 Platform string Network string } func GetUniSwapPools(client *resty.Client) *PoolResponse { // get the tops pools from the subgraph payload := map[string]string{ "operationName": "topPools", "query": "query topPools {\n pools(first: 20, orderBy: createdAtTimestamp, orderDirection: desc) {\n id\n __typename\n createdAtTimestamp\n }\n}\n", } res, err := client.R().SetBody(payload).Post("https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v3") if err != nil { log.Println("Failed to query subgraph", err.Error()) return nil } jsonParsed, err := gabs.ParseJSON(res.Body()) if err != nil { log.Println("Failed to unmarshal json fromm subgraph", err.Error()) return nil } newpools := []string{} for _, child := range jsonParsed.Path("data.pools").Children() { pool := child.Path("id").Data().(string) if _, ok := uniswapPools[pool]; !ok { newpools = append(newpools, pool) } } if len(newpools) == 0 { log.Println("No new pools found") return nil } poolArrStr := "" for index, pool := range newpools { if index == 3 { break } poolArrStr += "\"" + pool + "\"," } poolArrStr = strings.TrimRight(poolArrStr, ",") payload = map[string]string{ "operationName": "pools", "query": "query pools {\n pools(\n where: {id_in: [" + poolArrStr + "]}\n orderBy: totalValueLockedUSD\n orderDirection: desc\n ) {\n id\n feeTier\n liquidity\n sqrtPrice\n tick\n token0 {\n id\n symbol\n name\n decimals\n derivedETH\n __typename\n }\n token1 {\n id\n symbol\n name\n decimals\n derivedETH\n __typename\n }\n token0Price\n token1Price\n volumeUSD\n txCount\n totalValueLockedToken0\n totalValueLockedToken1\n totalValueLockedUSD\n __typename\n }\n}\n", } res, err = client.R().SetBody(payload).Post("https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v3") if err != nil { log.Println("Failed to query subgraph", err.Error()) return nil } poolsDataJson, err := gabs.ParseJSON(res.Body()) if err != nil { log.Println("failed to parse pools response body") return nil } pool := poolsDataJson.Path("data.pools").Children()[0] poolResponse := PoolResponse{ ID: pool.Path("id").Data().(string), Name: pool.Path("token0.symbol").Data().(string) + "/" + pool.Path("token1.symbol").Data().(string), TVL: pool.Path("totalValueLockedUSD").Data().(string), Platform: "uniswap", Network: "Ethereum mainnet", } uniswapPools[pool.Path("id").Data().(string)] = true return &poolResponse } func GetQuickSwapPools(client *resty.Client) *PoolResponse { // get the tops pools from the subgraph payload := map[string]string{ "query": "{\n pairs(first: 5, orderBy: createdAtTimestamp, orderDirection: desc) {\n id,\n token0 {\n symbol,\n name\n },\n token1{\n symbol,\n name\n },\n volumeUSD\n totalSupply\n }\n}", } res, err := client.R().SetBody(payload).Post("https://api.thegraph.com/subgraphs/name/henrydapp/quickswap") if err != nil { log.Println("Failed to query subgraph", err.Error()) return nil } jsonParsed, err := gabs.ParseJSON(res.Body()) if err != nil { log.Println("Failed to unmarshal json fromm subgraph", err.Error()) return nil } // newpools := []string{} var pool *gabs.Container for _, child := range jsonParsed.Path("data.pairs").Children() { poolid := child.Path("id").Data().(string) if _, ok := quickswapPools[poolid]; !ok { pool = child break } } if pool == nil { log.Println("No new pools found") return nil } poolResponse := PoolResponse{ ID: pool.Path("id").Data().(string), Name: pool.Path("token0.symbol").Data().(string) + "/" + pool.Path("token1.symbol").Data().(string), TVL: pool.Path("volumeUSD").Data().(string), Platform: "QuickSwap", Network: "polygon mainnet", } quickswapPools[pool.Path("id").Data().(string)] = true return &poolResponse } func GetBalancerPools(client *resty.Client) *PoolResponse { // get the tops pools from the subgraph payload := map[string]string{ "query": "query { pools (first: 10, orderBy: \"createTime\", orderDirection: \"desc\", where: {totalShares_gt: 0.01, id_not_in: [\"\"], poolType_not: \"Element\", tokensList_contains: []}, skip: 0) { id poolType swapFee tokensList totalLiquidity totalSwapVolume totalSwapFee createTime totalShares owner factory amp tokens { symbol address balance weight } } }", } res, err := client.R().SetBody(payload).Post("https://api.thegraph.com/subgraphs/name/balancer-labs/balancer-polygon-v2") if err != nil { log.Println("Failed to query subgraph", err.Error()) return nil } jsonParsed, err := gabs.ParseJSON(res.Body()) if err != nil { log.Println("Failed to unmarshal json fromm subgraph", err.Error()) return nil } // newpools := []string{} var pool *gabs.Container for _, child := range jsonParsed.Path("data.pools").Children() { poolid := child.Path("id").Data().(string) if _, ok := balancerPools[poolid]; !ok { pool = child break } } if pool == nil { log.Println("No new pools found") return nil } tokenName := "" for _, child := range pool.Path("tokens").Children() { tokenName += "/" + child.Path("symbol").Data().(string) } tokenName = strings.Trim(tokenName, "/") poolResponse := PoolResponse{ ID: pool.Path("id").Data().(string), Name: tokenName, TVL: pool.Path("totalLiquidity").Data().(string), Platform: "Balancer", Network: "Polygon mainnet", } balancerPools[pool.Path("id").Data().(string)] = true return &poolResponse } func SendNotification(client *resty.Client, pool *PoolResponse) { payload := map[string]string{ "title": "New farm alert!", "message": "Found new farm " + pool.Name + " on " + pool.Platform + ", " + pool.Network + " at " + fmt.Sprint(time.Now().Format(time.RFC850)) + ".", "notificationType": "1", } // send notification _, err := client.R().SetBody(payload).Post("https://floating-hollows-80327.herokuapp.com/notification") if err != nil { log.Println("Failed to query subgraph", err.Error()) } } func GetNewFarms(client *resty.Client) { log.Println("Fetching new farms") uniswapPool := GetUniSwapPools(client) if uniswapPool != nil { log.Println("uniswap pool: ", uniswapPool) SendNotification(client, uniswapPool) } quickSwapPool := GetQuickSwapPools(client) if quickSwapPool != nil { log.Println("qcpool : ", quickSwapPool) SendNotification(client, quickSwapPool) } balancerPool := GetBalancerPools(client) if balancerPool != nil { log.Println("balancer pool : ", balancerPool) } }
package client import ( "encoding/json" "errors" "fmt" "io/ioutil" "net/http" "net/url" "strconv" "strings" ) type Params struct { Url string ApiKey string } type client struct { url string apiKey string } type requestParam struct { name string value string } func Create(params Params) *client { return &client{ url: params.Url, apiKey: params.ApiKey, } } func (c *client) GetCountries() ([]Country, error) { var countries []Country resp, err := c.request("get_countries", []requestParam{}) if err != nil { return nil, fmt.Errorf("getting the countries: %s", err) } if err = json.Unmarshal(resp, &countries); err != nil { return nil, fmt.Errorf("unmarshaling json: %s", err) } return countries, nil } func (c *client) GetLeagues(countryId int) ([]League, error) { var leagues []League var params []requestParam if countryId != 0 { params = append(params, requestParam{ name: "country_id", value: strconv.Itoa(countryId), }) } resp, err := c.request("get_leagues", params) if err != nil { return nil, fmt.Errorf("getting the leagues: %s", err) } if err = json.Unmarshal(resp, &leagues); err != nil { return nil, fmt.Errorf("unmarshaling json: %s", err) } return leagues, nil } func (c *client) GetStandings(leagueId int) ([]Standing, error) { var standings []Standing var params []requestParam params = append(params, requestParam{ name: "league_id", value: strconv.Itoa(leagueId), }) resp, err := c.request("get_standings", params) if err != nil { return nil, fmt.Errorf("getting the standings: %s", err) } if err = json.Unmarshal(resp, &standings); err != nil { return nil, fmt.Errorf("unmarshling json: %s", err) } return standings, nil } func (c *client) GetEvents(from, to string, countryId, leagueId, matchId int) ([]Event, error) { var events []Event var params []requestParam params = []requestParam{ requestParam{ name: "from", value: from, }, requestParam{ name: "to", value: to, }, } if countryId != 0 { params = append(params, requestParam{ name: "country_id", value: strconv.Itoa(countryId), }) } if leagueId != 0 { params = append(params, requestParam{ name: "league_id", value: strconv.Itoa(leagueId), }) } if matchId != 0 { params = append(params, requestParam{ name: "match_id", value: strconv.Itoa(matchId), }) } resp, err := c.request("get_events", params) if err != nil { return nil, fmt.Errorf("getting events: %s", err) } if err = json.Unmarshal(resp, &events); err != nil { return nil, fmt.Errorf("unmarshaling json: %s", err) } return events, nil } func (c *client) GetOdds(from, to string, matchId int) ([]Odds, error) { var odds []Odds var params []requestParam params = []requestParam{ requestParam{ name: "from", value: from, }, requestParam{ name: "to", value: to, }, } if matchId != 0 { params = append(params, requestParam{ name: "match_id", value: strconv.Itoa(matchId), }) } resp, err := c.request("get_odds", params) if err != nil { return nil, fmt.Errorf("getting odds: %s", err) } if err = json.Unmarshal(resp, &odds); err != nil { return nil, fmt.Errorf("unmarshaling json: %s", err) } return odds, nil } func (c *client) request(action string, params []requestParam) ([]byte, error) { httpClient := &http.Client{} u, err := url.Parse(c.url) if err != nil { return nil, err } query := u.Query() query.Add("action", action) query.Add("APIkey", c.apiKey) for _, param := range params { query.Add(param.name, param.value) } u.RawQuery = query.Encode() resp, err := httpClient.Get(u.String()) defer resp.Body.Close() if err != nil { return nil, err } if resp.StatusCode != http.StatusOK { return nil, errors.New("response status: " + resp.Status) } b, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } if strings.HasPrefix(string(b), "{\"error\":") { var errorResponse ErrorResponse if err = json.Unmarshal(b, &errorResponse); err != nil { return nil, err } return nil, errors.New("error response with message: " + errorResponse.Message) } return b, nil }
package health import ( "io/ioutil" "net" "net/url" "regexp" "github.com/cerana/cerana/acomm" "github.com/cerana/cerana/pkg/errors" "github.com/cerana/cerana/pkg/logrusx" ) // TCPResponseArgs are arguments for TCPResponse health checks. type TCPResponseArgs struct { Address string `json:"address"` Body []byte `json:"body"` Regexp string `json:"regexp"` } // TCPResponse makes a TCP request to the specified address and checks the // response for a match to a specified regex. func (h *Health) TCPResponse(req *acomm.Request) (interface{}, *url.URL, error) { var args TCPResponseArgs if err := req.UnmarshalArgs(&args); err != nil { return nil, nil, err } if args.Address == "" { return nil, nil, errors.Newv("missing arg: address", map[string]interface{}{"args": args, "missing": "address"}) } if args.Regexp == "" { return nil, nil, errors.Newv("missing arg: regexp", map[string]interface{}{"args": args, "missing": "regexp"}) } re, err := regexp.Compile(args.Regexp) if err != nil { return nil, nil, errors.Wrapv(err, map[string]interface{}{"regexp": args.Regexp}) } conn, err := net.DialTimeout("tcp", args.Address, h.config.RequestTimeout()) if err != nil { return nil, nil, errors.Wrapv(err, map[string]interface{}{"type": "tcp", "addr": args.Address, "timeout": h.config.RequestTimeout()}) } defer logrusx.LogReturnedErr(conn.Close, nil, "failed to close tcp conn") if len(args.Body) > 0 { if _, err = conn.Write(args.Body); err != nil { return nil, nil, errors.Wrapv(err, map[string]interface{}{"addr": args.Address, "body": string(args.Body)}) } if err = conn.(*net.TCPConn).CloseWrite(); err != nil { return nil, nil, errors.Wrapv(err, map[string]interface{}{"addr": args.Address}) } } tcpResp, err := ioutil.ReadAll(conn) if err != nil { return nil, nil, errors.Wrap(err) } if !re.Match(tcpResp) { return nil, nil, errors.Newv("response did not match", map[string]interface{}{"regexp": args.Regexp, "tcpResponse": string(tcpResp)}) } return nil, nil, nil }
// Given a sorted array of distinct integers and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order. // //   // Example 1: // Input: nums = [1,3,5,6], target = 5 // Output: 2 // Example 2: // Input: nums = [1,3,5,6], target = 2 // Output: 1 // Example 3: // Input: nums = [1,3,5,6], target = 7 // Output: 4 // Example 4: // Input: nums = [1,3,5,6], target = 0 // Output: 0 // Example 5: // Input: nums = [1], target = 0 // Output: 0 // //   // Constraints: // // // 1 <= nums.length <= 104 // -104 <= nums[i] <= 104 // nums contains distinct values sorted in ascending order. // -104 <= target <= 104 // // func searchInsert(nums []int, target int) int { if len(nums) == 0 { return 0 } for i, v := range nums { if v >= target { return i } } return len(nums) }
package examples import ( "io" "math/rand" "os" "github.com/go-echarts/go-echarts/v2/charts" "github.com/go-echarts/go-echarts/v2/components" "github.com/go-echarts/go-echarts/v2/opts" ) var ( itemCntLine = 6 fruits = []string{"Apple", "Banana", "Peach ", "Lemon", "Pear", "Cherry"} ) func generateLineItems() []opts.LineData { items := make([]opts.LineData, 0) for i := 0; i < itemCntLine; i++ { items = append(items, opts.LineData{Value: rand.Intn(300)}) } return items } func generateLineData(data []float32) []opts.LineData { items := make([]opts.LineData, 0) for i := 0; i < len(data); i++ { items = append(items, opts.LineData{Value: data[i]}) } return items } func lineBase() *charts.Line { line := charts.NewLine() line.SetGlobalOptions( charts.WithTitleOpts(opts.Title{Title: "basic line example", Subtitle: "This is the subtitle."}), ) line.SetXAxis(fruits). AddSeries("Category A", generateLineItems()) return line } func lineShowLabel() *charts.Line { line := charts.NewLine() line.SetGlobalOptions( charts.WithTitleOpts(opts.Title{ Title: "title and label options", Subtitle: "go-echarts is an awesome chart library written in Golang", Link: "https://github.com/go-echarts/go-echarts", }), ) line.SetXAxis(fruits). AddSeries("Category A", generateLineItems()). SetSeriesOptions( charts.WithLabelOpts(opts.Label{ Show: true, }), ) return line } func lineMarkPoint() *charts.Line { line := charts.NewLine() line.SetGlobalOptions( charts.WithTitleOpts(opts.Title{ Title: "markpoint options", }), ) line.SetXAxis(fruits).AddSeries("Category A", generateLineItems()). SetSeriesOptions( charts.WithMarkPointNameTypeItemOpts( opts.MarkPointNameTypeItem{Name: "Maximum", Type: "max"}, opts.MarkPointNameTypeItem{Name: "Average", Type: "average"}, opts.MarkPointNameTypeItem{Name: "Minimum", Type: "min"}, ), charts.WithMarkPointStyleOpts( opts.MarkPointStyle{Label: &opts.Label{Show: true}}), ) return line } func lineSplitLine() *charts.Line { line := charts.NewLine() line.SetGlobalOptions( charts.WithTitleOpts(opts.Title{ Title: "splitline options", }), charts.WithYAxisOpts(opts.YAxis{ SplitLine: &opts.SplitLine{ Show: true, }, }), ) line.SetXAxis(fruits).AddSeries("Category A", generateLineItems(), charts.WithLabelOpts( opts.Label{Show: true}, )) return line } func lineStep() *charts.Line { line := charts.NewLine() line.SetGlobalOptions( charts.WithTitleOpts(opts.Title{ Title: "step style", }), ) line.SetXAxis(fruits).AddSeries("Category A", generateLineItems()). SetSeriesOptions(charts.WithLineChartOpts( opts.LineChart{ Step: true, }), ) return line } func lineSmooth() *charts.Line { line := charts.NewLine() line.SetGlobalOptions( charts.WithTitleOpts(opts.Title{ Title: "smooth style", }), ) line.SetXAxis(fruits).AddSeries("Category A", generateLineItems()). SetSeriesOptions(charts.WithLineChartOpts( opts.LineChart{ Smooth: true, }), ) return line } func lineArea() *charts.Line { line := charts.NewLine() line.SetGlobalOptions( charts.WithTitleOpts(opts.Title{ Title: "area options", }), ) line.SetXAxis(fruits).AddSeries("Category A", generateLineItems()). SetSeriesOptions( charts.WithLabelOpts( opts.Label{ Show: true, }), charts.WithAreaStyleOpts( opts.AreaStyle{ Opacity: 0.2, }), ) return line } func lineSmoothArea() *charts.Line { line := charts.NewLine() line.SetGlobalOptions( charts.WithTitleOpts(opts.Title{Title: "smooth area"}), ) line.SetXAxis(fruits).AddSeries("Category A", generateLineItems()). SetSeriesOptions( charts.WithLabelOpts(opts.Label{ Show: true, }), charts.WithAreaStyleOpts(opts.AreaStyle{ Opacity: 0.2, }), charts.WithLineChartOpts(opts.LineChart{ Smooth: true, }), ) return line } func lineOverlap() *charts.Line { line := charts.NewLine() line.SetGlobalOptions( charts.WithTitleOpts(opts.Title{Title: "overlap rect-charts"}), ) line.SetXAxis(fruits). AddSeries("Category A", generateLineItems()) line.Overlap(esEffectStyle()) line.Overlap(scatterBase()) return line } func lineMulti() *charts.Line { line := charts.NewLine() line.SetGlobalOptions( charts.WithTitleOpts(opts.Title{ Title: "multi lines", }), charts.WithInitializationOpts(opts.Initialization{ Theme: "shine", }), ) line.SetXAxis(fruits). AddSeries("Category A", generateLineItems()). AddSeries("Category B", generateLineItems()). AddSeries("Category C", generateLineItems()). AddSeries("Category D", generateLineItems()) return line } func lineDemo() *charts.Line { line := charts.NewLine() line.SetGlobalOptions( charts.WithTitleOpts(opts.Title{ Title: "Search Time: Hash table vs Binary search", }), charts.WithYAxisOpts(opts.YAxis{ Name: "Cost time(ns)", SplitLine: &opts.SplitLine{ Show: false, }, }), charts.WithXAxisOpts(opts.XAxis{ Name: "Elements", }), ) line.SetXAxis([]string{"10e1", "10e2", "10e3", "10e4", "10e5", "10e6", "10e7"}). AddSeries("map", generateLineItems(), charts.WithLabelOpts(opts.Label{Show: true, Position: "bottom"})). AddSeries("slice", generateLineData([]float32{24.9, 34.9, 48.1, 58.3, 69.7, 123, 131}), charts.WithLabelOpts(opts.Label{Show: true, Position: "top"})). SetSeriesOptions( charts.WithMarkLineNameTypeItemOpts(opts.MarkLineNameTypeItem{ Name: "Average", Type: "average", }), charts.WithLineChartOpts(opts.LineChart{ Smooth: true, }), charts.WithMarkPointStyleOpts(opts.MarkPointStyle{ Label: &opts.Label{ Show: true, Formatter: "{a}: {b}", }, }), ) return line } type LineExamples struct{} func (LineExamples) Examples() { page := components.NewPage() page.AddCharts( lineBase(), lineShowLabel(), lineMarkPoint(), lineSplitLine(), lineStep(), lineSmooth(), lineArea(), lineSmoothArea(), lineOverlap(), lineMulti(), lineDemo(), ) f, err := os.Create("examples/html/line.html") if err != nil { panic(err) } page.Render(io.MultiWriter(f)) }
// http://mschoebel.info/2014/03/09/snippet-golang-webapp-login-logout.html // simple example for session management using secure cookies // Serve two pages - an index page providing a login form and // an internal page that is only accessible to authenticated users (= users that have used the login form). // The internal page provides a possibility to log out. // This has to be implemented using only the Golang standard packages and the Gorilla toolkit. package main import ( "fmt" "github.com/gorilla/mux" "github.com/gorilla/securecookie" "net/http" ) const ( USERNAME = "prezi" PASSWORD = "prezi" ) // A secure cookie handler is initialized. // The required parameters (hashKey and blockKey) are generated randomly. var cookieHandler = securecookie.New( securecookie.GenerateRandomKey(64), securecookie.GenerateRandomKey(32)) var router = mux.NewRouter() func main() { router.HandleFunc("/", indexPageHandler) router.HandleFunc("/internal", internalPageHandler) router.HandleFunc("/login", loginHandler).Methods("POST") router.HandleFunc("/logout", logoutHandler).Methods("POST") http.Handle("/", router) http.ListenAndServe(":8080", nil) } const indexPage = ` <h1>Login</h1> <form method="post" action="/login"> <label for="name">User name</label> <input type="text" id="name" name="name"> <label for="password">Password</label> <input type="password" id="password" name="password"> <button type="submit">Login</button> </form> ` func indexPageHandler(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, indexPage) } const internalPage = ` <h1>Internal</h1> <hr> <small>User: %s</small> <form method="post" action="/logout"> <button type="submit">Logout</button> </form> ` func internalPageHandler(w http.ResponseWriter, r *http.Request) { userName := getUserName(r) if userName != "" { fmt.Fprintf(w, internalPage, userName) } else { http.Redirect(w, r, "/", 302) } } func loginHandler(w http.ResponseWriter, r *http.Request) { name := r.FormValue("name") password := r.FormValue("password") redirectTarget := "/" if name != "" && password != "" { // check credentials if name == USERNAME && password == PASSWORD { setSession(name, w) redirectTarget = "/internal" } } http.Redirect(w, r, redirectTarget, 302) } func logoutHandler(w http.ResponseWriter, r *http.Request) { clearSession(w) http.Redirect(w, r, "/", 302) } func setSession(userName string, response http.ResponseWriter) { value := map[string]string{ "name": userName, } if encoded, err := cookieHandler.Encode("session", value); err == nil { cookie := &http.Cookie{ Name: "session", Value: encoded, Path: "/", } http.SetCookie(response, cookie) } } func getUserName(request *http.Request) (userName string) { if cookie, err := request.Cookie("session"); err == nil { cookieValue := make(map[string]string) if err = cookieHandler.Decode("session", cookie.Value, &cookieValue); err == nil { userName = cookieValue["name"] } } return userName } func clearSession(response http.ResponseWriter) { cookie := &http.Cookie{ Name: "session", Value: "", Path: "/", MaxAge: -1, } http.SetCookie(response, cookie) }
package passwduser import ( "io/ioutil" "os" "os/user" "reflect" "testing" ) func TestLookup(t *testing.T) { const passwdContent = ` root:x:0:0:root user:/root:/bin/bash adm:x:42:43:adm:/var/adm:/bin/false 111:x:222:333::/home/111:/bin/false this is just some garbage data ` tests := []struct { testDescription string username string expected User }{ { testDescription: "RootUser", username: "root", expected: User{ UID: "0", GID: "0", Username: "root", Name: "root", HomeDir: "/root", }, }, { testDescription: "NonRootUser", username: "adm", expected: User{ UID: "42", GID: "43", Username: "adm", Name: "adm", HomeDir: "/var/adm", }, }, { testDescription: "NumericUsernames", username: "111", expected: User{ UID: "222", GID: "333", Username: "111", Name: "111", HomeDir: "/home/111", }, }, } passwdFile, err := writePasswdFile(passwdContent) if err != nil { t.Logf("got unexpected error: %s", err.Error()) t.Fatal() } passwdFilePath = passwdFile.Name() t.Run("Group", func(t *testing.T) { for _, test := range tests { test := test t.Run(test.testDescription, func(t *testing.T) { t.Parallel() actual, err := Lookup(test.username) if err != nil { t.Logf( "got unexpected error when looking up user '%s': %s", test.username, err.Error(), ) t.Fail() return } if !reflect.DeepEqual(test.expected, *actual) { t.Logf("username: %v", test.username) t.Logf("got: %#v", actual) t.Logf("expected: %#v", test.expected) t.Fail() } }) } }) if err := os.Remove(passwdFile.Name()); err != nil { t.Logf("got unexpected error: %s", err.Error()) t.Fatal() } } func TestLookupID(t *testing.T) { const passwdContent = ` root:x:0:0:root user:/root:/bin/bash adm:x:42:43:adm:/var/adm:/bin/false 111:x:222:333::/home/111:/bin/false this is just some garbage data ` tests := []struct { testDescription string uid string expected User }{ { testDescription: "RootUser", uid: "0", expected: User{ UID: "0", GID: "0", Username: "root", Name: "root", HomeDir: "/root", }, }, { testDescription: "NonRootUser", uid: "42", expected: User{ UID: "42", GID: "43", Username: "adm", Name: "adm", HomeDir: "/var/adm", }, }, { testDescription: "NumericUsernames", uid: "222", expected: User{ UID: "222", GID: "333", Username: "111", Name: "111", HomeDir: "/home/111", }, }, } passwdFile, err := writePasswdFile(passwdContent) if err != nil { t.Logf("got unexpected error: %s", err.Error()) t.Fatal() } passwdFilePath = passwdFile.Name() t.Run("Group", func(t *testing.T) { for _, test := range tests { test := test t.Run(test.testDescription, func(t *testing.T) { t.Parallel() actual, err := LookupID(test.uid) if err != nil { t.Logf( "got unexpected error when looking up user id '%s': %s", test.uid, err.Error(), ) t.Fail() return } if !reflect.DeepEqual(test.expected, *actual) { t.Logf("uid: %v", test.uid) t.Logf("got: %#v", actual) t.Logf("expected: %#v", test.expected) t.Fail() } }) } }) if err := os.Remove(passwdFile.Name()); err != nil { t.Logf("got unexpected error: %s", err.Error()) t.Fatal() } } func TestLookupErrors(t *testing.T) { const passwdContent = ` root:x:0:0:root user:/root:/bin/bash adm:x:42:43:adm:/var/adm:/bin/false 111:x:222:333::/home/111:/bin/false this is just some garbage data ` tests := []struct { testDescription string username string expectedError string }{ { "NonExistingUsername", "test", user.UnknownUserError("test").Error(), }, { "NonExistingNumbericUsername", "222", user.UnknownUserError("222").Error(), }, } passwdFile, err := writePasswdFile(passwdContent) if err != nil { t.Logf("got unexpected error: %s", err.Error()) t.Fatal() } passwdFilePath = passwdFile.Name() t.Run("Group", func(t *testing.T) { for _, test := range tests { test := test t.Run(test.testDescription, func(t *testing.T) { t.Parallel() _, err := Lookup(test.username) if err == nil { t.Logf("expected error, got nil when looking up username '%s'", test.username, ) t.Fail() return } actualError := err.Error() if actualError != test.expectedError { t.Logf("username: %v", test.username) t.Logf("got error: %v", actualError) t.Logf("expected error: %v", test.expectedError) t.Fail() } }) } }) if err := os.Remove(passwdFile.Name()); err != nil { t.Logf("got unexpected error: %s", err.Error()) t.Fatal() } } func TestLookupIDErrors(t *testing.T) { const passwdContent = ` root:x:0:0:root user:/root:/bin/bash adm:x:42:43:adm:/var/adm:/bin/false 111:x:222:333::/home/111:/bin/false this is just some garbage data ` tests := []struct { testDescription string uid string expectedError string }{ { "NonExistingUsername", "-20", user.UnknownUserIdError(-20).Error(), }, { "NonExistingNumbericUsername", "111", user.UnknownUserIdError(111).Error(), }, } passwdFile, err := writePasswdFile(passwdContent) if err != nil { t.Logf("got unexpected error: %s", err.Error()) t.Fatal() } passwdFilePath = passwdFile.Name() t.Run("Group", func(t *testing.T) { for _, test := range tests { test := test t.Run(test.testDescription, func(t *testing.T) { t.Parallel() _, err := LookupID(test.uid) if err == nil { t.Logf("expected error, got nil when looking up user id '%s'", test.uid, ) t.Fail() return } actualError := err.Error() if actualError != test.expectedError { t.Logf("uid: %v", test.uid) t.Logf("got error: %v", actualError) t.Logf("expected error: %v", test.expectedError) t.Fail() } }) } }) if err := os.Remove(passwdFile.Name()); err != nil { t.Logf("got unexpected error: %s", err.Error()) t.Fatal() } } func writePasswdFile(passwdContent string) (*os.File, error) { passwdFile, err := ioutil.TempFile("", "") if err != nil { return nil, err } _, err = passwdFile.WriteString(passwdContent) if err != nil { return nil, err } if err = passwdFile.Close(); err != nil { return nil, err } return passwdFile, nil }
package main import ( "context" "encoding/binary" "fmt" "io" logging "github.com/ipfs/go-log/v2" pool "github.com/libp2p/go-buffer-pool" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" ) var log = logging.Logger("perf") const ( ID = "/perf/1.0.0" blockSize = 64 << 10 ) type PerfService struct { Host host.Host } func NewPerfService(h host.Host) *PerfService { ps := &PerfService{h} h.SetStreamHandler(ID, ps.PerfHandler) return ps } func (ps *PerfService) PerfHandler(s network.Stream) { u64Buf := make([]byte, 8) if _, err := io.ReadFull(s, u64Buf); err != nil { log.Errorw("err", err) s.Reset() return } bytesToSend := binary.BigEndian.Uint64(u64Buf) if _, err := drainStream(s); err != nil { log.Errorw("err", err) s.Reset() return } if err := sendBytes(s, bytesToSend); err != nil { log.Errorw("err", err) s.Reset() return } s.CloseWrite() } func (ps *PerfService) RunPerf(ctx context.Context, p peer.ID, bytesToSend uint64, bytesToRecv uint64) error { s, err := ps.Host.NewStream(ctx, p, ID) if err != nil { return err } sizeBuf := make([]byte, 8) binary.BigEndian.PutUint64(sizeBuf, bytesToRecv) _, err = s.Write(sizeBuf) if err != nil { return err } if err := sendBytes(s, bytesToSend); err != nil { return err } s.CloseWrite() recvd, err := drainStream(s) if err != nil { return err } if recvd != bytesToRecv { return fmt.Errorf("expected to recv %d bytes, got %d", bytesToRecv, recvd) } return nil } func sendBytes(s io.Writer, bytesToSend uint64) error { buf := pool.Get(blockSize) defer pool.Put(buf) for bytesToSend > 0 { toSend := buf if bytesToSend < blockSize { toSend = buf[:bytesToSend] } n, err := s.Write(toSend) if err != nil { return err } bytesToSend -= uint64(n) } return nil } func drainStream(s io.Reader) (uint64, error) { var recvd int64 recvd, err := io.Copy(io.Discard, s) if err != nil && err != io.EOF { return uint64(recvd), err } return uint64(recvd), nil }
/* * Npcf_SMPolicyControl API * * Session Management Policy Control Service © 2019, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TSDSI, TTA, TTC). All rights reserved. * * API version: 1.0.4 * Generated by: OpenAPI Generator (https://openapi-generator.tech) */ package openapi type UpPathChgEvent struct { NotificationUri string `json:"notificationUri"` // It is used to set the value of Notification Correlation ID in the notification sent by the SMF. NotifCorreId string `json:"notifCorreId"` DnaiChgType DnaiChangeType `json:"dnaiChgType"` }
package main import ( "golang.org/x/net/context" "google.golang.org/appengine" "google.golang.org/appengine/aetest" "google.golang.org/appengine/user" "testing" ) func TestUser(t *testing.T) { inst, err := aetest.NewInstance(nil) if err != nil { t.Fatal(err) } defer inst.Close() req, err := inst.NewRequest("GET", "/", nil) if err != nil { t.Fatalf("Failed to create req: %v", err) } testFindOrCreateWithoutAuth(t, appengine.NewContext(req)) u := &user.User{Email: "email@test.com"} aetest.Login(u, req) testFindOrCreateWithAuth(t, appengine.NewContext(req)) } func testFindOrCreateWithoutAuth(t *testing.T, ctx context.Context) { u, err := FindOrCreateUser(ctx) if err != nil { t.Fatal(err) } if u != nil { t.Fatal("Expected nil user") } } func testFindOrCreateWithAuth(t *testing.T, ctx context.Context) { u2, err := FindOrCreateUser(ctx) if err != nil { t.Fatal(err) } if u2 == nil { t.Fatal("User should not be nil") } user := user.Current(ctx) if u2.Email != user.Email { t.Fatal("Email doesn't match") } if u2.Key.Parent().StringID() != user.ID { t.Fatal("Parent ID doesn't match") } }
package persistence import "testing" func TestGetCategory(t *testing.T) { c,err := GetCategory("BIRDS") if err != nil { t.Error(err) } t.Log(c,"categoryId:",c.CategoryId) } func TestGetCategoryList(t *testing.T) { r,err := GetCategoryList() if err != nil { t.Error(err) } t.Log(r[1]) }
package elevengo type _UserInfo struct { UserId string } type _OfflineToken struct { Sign string Time int64 QuotaTotal int QuotaRemain int } type _BasicResult struct { State bool `json:"state"` Error *string `json:"error"` ErrorType *string `json:"errtype"` MessageCode int `json:"msg_code"` Message *string `json:"msg"` } type _FileRetrieveResult struct { _BasicResult ErrorNo int `json:"errNo"` } type _FileOperateResult struct { _BasicResult ErrorNo string `json:"errno"` } type _InnerFileListData struct { ParentId *string `json:"pid"` CategoryId string `json:"cid"` FileId *string `json:"fid"` Name string `json:"n"` Size int64 `json:"s"` PickCode string `json:"pc"` Sha1 *string `json:"sha"` CreateTime string `json:"tp"` UpdateTime string `json:"te"` } type _FileListResult struct { _FileRetrieveResult TotalCount int `json:"count"` SysCount int `json:"sys_count"` Offset int `json:"offset"` Limit int `json:"limit"` PageSize int `json:"page_size"` Data []*_InnerFileListData `json:"data"` } type _FileSearchResult struct { _FileRetrieveResult TotalCount int `json:"count"` Offset int `json:"offset"` PageSize int `json:"page_size"` Data []*_InnerFileListData `json:"data"` } type _FileAddResult struct { _FileOperateResult AreaId NumberString `json:"aid"` CategoryId NumberString `json:"cid"` CategoryName string `json:"cname"` FileId string `json:"file_id"` FileName string `json:"file_name"` } type _FileDownloadResult struct { _FileRetrieveResult FileId string `json:"file_id"` FileName string `json:"file_name"` FileSize string `json:"file_size"` Pickcode string `json:"pickcode"` FileUrl string `json:"file_url"` } type _FileUploadInitResult struct { AccessKeyId string `json:"accessid"` Callback string `json:"callback"` Expire int `json:"expire"` UploadUrl string `json:"host"` ObjectKey string `json:"object"` Policy string `json:"policy"` Signature string `json:"signature"` } type _InnerFileUploadData struct { CategoryId string `json:"cid"` FileId string `json:"file_id"` FileName string `json:"file_name"` FizeSize string `json:"file_size"` PickCode string `json:"pick_code"` Sha1 string `json:"sha1"` } type _FileUploadResult struct { State bool `json:"state"` Code int `json:"code"` Message string `json:"message"` Data *_InnerFileUploadData `json:"data"` } type _OfflineSpaceResult struct { State bool `json:"state"` Data float64 `json:"data"` Size string `json:"size"` Url string `json:"url"` BtUrl string `json:"bt_url"` Limit int64 `json:"limit"` Sign string `json:"sign"` Time int64 `json:"time"` } type _OfflineBasicResult struct { State bool `json:"state"` ErrorNo int `json:"errno"` ErrorCode int `json:"errcode"` ErrorType string `json:"errtype"` ErrorMessage *string `json:"error_msg"` } type _OfflineListResult struct { _OfflineBasicResult Page int `json:"page"` PageCount int `json:"page_count"` PageRow int `json:"page_row"` Count int `json:"count"` Quota int `json:"quota"` QuotaTotal int `json:"total"` Tasks []*OfflineTask `json:"tasks"` } type _OfflineAddResult struct { _OfflineBasicResult InfoHash string `json:"info_hash"` Name string `json:"name"` } type _OfflineGetDirResult struct { CategoryId string `json:"cid"` } type _TorrentFile struct { Path string `json:"path"` Size int64 `json:"size"` } type _OfflineTorrentInfoResult struct { _OfflineBasicResult TorrentName string `json:"torrent_name"` InfoHash string `json:"info_hash"` FileSize int64 `json:"file_size"` FileCount int `json:"file_count"` FileList []*_TorrentFile `json:"torrent_filelist_web"` } type _CaptchaSignResult struct { State bool `json:"state"` Sign string `json:"sign"` }
package migration import ( "fmt" "io/ioutil" "path/filepath" "sort" "strconv" "strings" "github.com/jmoiron/sqlx" _ "github.com/lib/pq" // nolint ) // upFiles search for migration up files and return // a sorted array with the path of all found files func upFiles(dir string) (files []string, err error) { files, err = filepath.Glob(filepath.Join(dir, "*.up.sql")) return } // downFiles search for migration down files and return // a sorted array with the path of all found files func downFiles(dir string, n int) (files []string, err error) { files, err = filepath.Glob(filepath.Join(dir, "*.down.sql")) sort.Sort(sort.Reverse(sort.StringSlice(files))) files = files[len(files)-n:] return } func up(source string, start, n int, db *sqlx.DB) (err error) { files, err := upFiles(source) if err != nil { return } err = execUp(files, start, n, db) return } func down(source string, start, n int, db *sqlx.DB) (err error) { nfiles, err := migrationMax(db) if err != nil { return } files, err := downFiles(source, nfiles) if err != nil { return } err = execDown(files, start, n, db) return } func execDown(files []string, start, n int, db *sqlx.DB) (err error) { i := len(files) if i == 0 { return } for _, f := range files[start:n] { var b []byte b, err = ioutil.ReadFile(f) // nolint if err != nil { return } _, err = db.Exec(string(b)) if err != nil { return } err = deleteMigrations(i, db) if err != nil { return } i-- } return } func execUp(files []string, start, n int, db *sqlx.DB) (err error) { if n == 0 { n = len(files) } i := start for _, f := range files[start:n] { var b []byte b, err = ioutil.ReadFile(f) // nolint if err != nil { return } _, err = db.Exec(string(b)) if err != nil { return } i++ err = insertMigrations(i, db) if err != nil { return } } return } func parsePar(m []string) (n int, err error) { if len(m) > 1 { n, err = strconv.Atoi(m[1]) if err != nil { err = fmt.Errorf("invalid syntax") return } } return } // Run parse and performs the required migration func Run(source, database, migrate string) (err error) { var start, n int //"postgres://postgres@localhost:5432/cesar?sslmode=disable") db, err := open(database) if err != nil { return } m := strings.Split(migrate, " ") if len(m) > 2 { err = fmt.Errorf("the number of migration parameters is incorrect") return } switch m[0] { case "up": n, err = parsePar(m) if err != nil { return } err = initSchemaMigrations(db) if err != nil { return } start, err = migrationMax(db) if err != nil { return } err = up(source, start, n, db) case "down": n, err = parsePar(m) if err != nil { return } if n == 0 { n++ } err = down(source, 0, n, db) default: err = fmt.Errorf("unknown migration command") } return } func open(database string) (db *sqlx.DB, err error) { db, err = sqlx.Open("postgres", database) if err != nil { err = fmt.Errorf("error open db: %v", err) return } err = db.Ping() if err != nil { err = fmt.Errorf("error ping db: %v", err) } return } func insertMigrations(n int, db *sqlx.DB) (err error) { sql := `INSERT INTO schema_migrations ("version") VALUES ($1)` _, err = db.Exec(sql, n) return } func deleteMigrations(n int, db *sqlx.DB) (err error) { sql := `DELETE FROM schema_migrations WHERE "version"=$1` _, err = db.Exec(sql, n) return } func schemaMigrationsExists(db *sqlx.DB) (b bool, err error) { s := struct { Select interface{} `db:"s"` }{} err = db.Get(&s, "SELECT to_regclass('schema_migrations') AS s") b = s.Select != nil return } func createMigrationTable(db *sqlx.DB) (err error) { sql := `CREATE TABLE schema_migrations (version bigint NOT NULL, CONSTRAINT schema_migrations_pkey PRIMARY KEY (version))` _, err = db.Exec(sql) return } func migrationMax(db *sqlx.DB) (m int, err error) { s := struct { Max int `db:"m"` }{} err = db.Get(&s, `SELECT coalesce(max("version"),0) AS m FROM schema_migrations`) m = s.Max return } func initSchemaMigrations(db *sqlx.DB) (err error) { var b bool b, err = schemaMigrationsExists(db) if err != nil { return } if !b { err = createMigrationTable(db) } return }
package environment import ( "bytes" "log" "os" "strconv" "strings" "time" ) //note: the sintax `json:"id"` bind the identifier in json to the variable in this code //example: X uint64 `json:"x"` bind the name the x name in json to X point struct // Point define a 2-axis coordinate type type Point struct { Row int `json:"row"` Column int `json:"column"` } // Ground is where the characters go by in the map. It has a fixed cost to pass through it type Ground struct { Name string `json:"name"` ID string `json:"id"` Cost int `json:"cost"` } // Temple is a special place in the map with fixed cost to pass through it, // Code irrelevante -> the cost are little bite higher than the ground because the temple keep the Gold Knights. type Temple struct { Name string `json:"name"` Difficulty float64 `json:"difficulty"` Position Point `json:"position"` } type Saint struct { Name string `json:"name"` Power float64 `json:"power"` Lives int `json:"lives"` } //Environment of the game type Environment struct { AvailableTime float64 `json:"availableTime"` Start Point `json:"start"` End Point `json:"end"` Grounds []Ground `json:"grounds"` Temples []Temple `json:"temples"` Saints []Saint `json:"saints"` Map [][]string `json:"map"` printed bool } func (m Environment) String() string { var buffer bytes.Buffer buffer.WriteString(" ") for i := int(0); i < 2*int(len(m.Map)); i = i + 1 { if (i % 20) == 0 { buffer.WriteString(strconv.FormatInt(int64(i/2), 10)) } else { buffer.WriteString(" ") } } buffer.WriteString("\r\n") aux := func(numlines int) string { //the objective here is print the row numbers aligned if numlines >= 10 { //if true, will print the number higher than 10 return strconv.FormatInt(int64(numlines), 10) } //else, will print a space plus a number lower than 10 return " " + strconv.FormatInt(int64(numlines), 10) } for numlines, lines := range m.Map { buffer.WriteString(aux(numlines) + ": ") for _, c := range lines { // print each simbol in map(m.Base) buffer.WriteString(" ") switch c { case "M": buffer.WriteString(" ") case "P": buffer.WriteString("~") case "R": buffer.WriteString("=") case "_": buffer.WriteString("T") case "S": //S is not defined buffer.WriteString("S") case "E": // E is not defined buffer.WriteString("E") default: log.Fatalln("Caracter Inválido: ", c) } } buffer.WriteString("\r\n") } return buffer.String() } func clear(amount int) { for i := int(0); i < amount; i++ { os.Stdout.WriteString("\033[A\033[2K") } // what those below do? os.Stdout.Seek(0, 0) os.Stdout.Truncate(0) /* you probably want this as well */ os.Stdout.Sync() } // // Print the map func (m *Environment) Print() { dat := []byte(m.String()) i := int(0) // work around to get line number strings.Map(func(r rune) rune { if r == '\n' { i = i + 1 } return r }, m.String()) if m.printed { clear(int(i)) } else { m.printed = true } os.Stdout.Write(dat) os.Stdout.Sync() time.Sleep(2 << 31) }
package exec import ( "fmt" "github.com/cloudposse/atmos/pkg/schema" u "github.com/cloudposse/atmos/pkg/utils" ) // processHelp processes help commands func processHelp(componentType string, command string) error { cliConfig := schema.CliConfiguration{} cliConfig.Logs.Level = u.LogLevelTrace if len(command) == 0 { u.PrintMessage(fmt.Sprintf("'atmos' supports all native '%s' commands.\n", componentType)) u.PrintMessage("In addition, the 'component' argument and 'stack' flag are required to generate the variables and backend config for the component in the stack.\n") u.PrintMessage(fmt.Sprintf("atmos %s <command> <component> -s <stack> [options]", componentType)) u.PrintMessage(fmt.Sprintf("atmos %s <command> <component> --stack <stack> [options]", componentType)) if componentType == "terraform" { u.PrintMessage("\nAdditions and differences from native terraform:") u.PrintMessage(" - before executing other 'terraform' commands, 'atmos' runs 'terraform init'") u.PrintMessage(" - you can skip over atmos calling 'terraform init' if you know your project is already in a good working state by using " + "the '--skip-init' flag like so 'atmos terraform <command> <component> -s <stack> --skip-init") u.PrintMessage(" - 'atmos terraform deploy' command executes 'terraform apply -auto-approve' (sets the '-auto-approve' flag when running 'terraform apply')") u.PrintMessage(" - 'atmos terraform deploy' command supports '--deploy-run-init=true/false' flag to enable/disable running 'terraform init' " + "before executing the command") u.PrintMessage(" - 'atmos terraform apply' and 'atmos terraform deploy' commands support '--from-plan' flag. If the flag is specified, " + "the commands will use the planfile previously generated by 'atmos terraform plan' command instead of generating a new planfile") u.PrintMessage(" - 'atmos terraform apply' and 'atmos terraform deploy' commands commands support '--planfile' flag to specify the path " + "to a planfile. The '--planfile' flag should be used instead of the planfile argument in the native 'terraform apply <planfile>' command") u.PrintMessage(" - 'atmos terraform clean' command deletes the '.terraform' folder, '.terraform.lock.hcl' lock file, " + "and the previously generated 'planfile' and 'varfile' for the specified component and stack. Use --skip-lock-file flag to skip deleting the lock file.") u.PrintMessage(" - 'atmos terraform workspace' command first runs 'terraform init -reconfigure', then 'terraform workspace select', " + "and if the workspace was not created before, it then runs 'terraform workspace new'") u.PrintMessage(" - 'atmos terraform import' command searches for 'region' in the variables for the specified component and stack, " + "and if it finds it, sets 'AWS_REGION=<region>' ENV var before executing the command") u.PrintMessage(" - 'atmos terraform generate backend' command generates a backend config file for an 'atmos' component in a stack") u.PrintMessage(" - 'atmos terraform generate backends' command generates backend config files for all 'atmos' components in all stacks") u.PrintMessage(" - 'atmos terraform generate varfile' command generates a varfile for an 'atmos' component in a stack") u.PrintMessage(" - 'atmos terraform generate varfiles' command generates varfiles for all 'atmos' components in all stacks") u.PrintMessage(" - 'atmos terraform shell' command configures an environment for an 'atmos' component in a stack and starts a new shell " + "allowing executing all native terraform commands inside the shell without using atmos-specific arguments and flags") } if componentType == "helmfile" { u.PrintMessage("\nAdditions and differences from native helmfile:") u.PrintMessage(" - 'atmos helmfile generate varfile' command generates a varfile for the component in the stack") u.PrintMessage(" - 'atmos helmfile' commands support '[global options]' using the command-line flag '--global-options'. " + "Usage: atmos helmfile <command> <component> -s <stack> [command options] [arguments] --global-options=\"--no-color --namespace=test\"") u.PrintMessage(" - before executing the 'helmfile' commands, 'atmos' runs 'aws eks update-kubeconfig' to read kubeconfig from " + "the EKS cluster and use it to authenticate with the cluster. This can be disabled in 'atmos.yaml' CLI config " + "by setting 'components.helmfile.use_eks' to 'false'") } err := ExecuteShellCommand(cliConfig, componentType, []string{"--help"}, "", nil, false, "") if err != nil { return err } } else { u.PrintMessage(fmt.Sprintf("'atmos' supports native '%s %s' command with all the options, arguments and flags.\n", componentType, command)) u.PrintMessage("In addition, 'component' and 'stack' are required in order to generate variables for the component in the stack.\n") u.PrintMessage(fmt.Sprintf("atmos %s %s <component> -s <stack> [options]", componentType, command)) u.PrintMessage(fmt.Sprintf("atmos %s %s <component> --stack <stack> [options]", componentType, command)) err := ExecuteShellCommand(cliConfig, componentType, []string{command, "--help"}, "", nil, false, "") if err != nil { return err } } return nil }
// Copyright 2019 - 2022 The Samply Community // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fhir import "encoding/json" // THIS FILE IS GENERATED BY https://github.com/samply/golang-fhir-models // PLEASE DO NOT EDIT BY HAND // InsurancePlan is documented here http://hl7.org/fhir/StructureDefinition/InsurancePlan type InsurancePlan struct { Id *string `bson:"id,omitempty" json:"id,omitempty"` Meta *Meta `bson:"meta,omitempty" json:"meta,omitempty"` ImplicitRules *string `bson:"implicitRules,omitempty" json:"implicitRules,omitempty"` Language *string `bson:"language,omitempty" json:"language,omitempty"` Text *Narrative `bson:"text,omitempty" json:"text,omitempty"` Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"` ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"` Identifier []Identifier `bson:"identifier,omitempty" json:"identifier,omitempty"` Status *PublicationStatus `bson:"status,omitempty" json:"status,omitempty"` Type []CodeableConcept `bson:"type,omitempty" json:"type,omitempty"` Name *string `bson:"name,omitempty" json:"name,omitempty"` Alias []string `bson:"alias,omitempty" json:"alias,omitempty"` Period *Period `bson:"period,omitempty" json:"period,omitempty"` OwnedBy *Reference `bson:"ownedBy,omitempty" json:"ownedBy,omitempty"` AdministeredBy *Reference `bson:"administeredBy,omitempty" json:"administeredBy,omitempty"` CoverageArea []Reference `bson:"coverageArea,omitempty" json:"coverageArea,omitempty"` Contact []InsurancePlanContact `bson:"contact,omitempty" json:"contact,omitempty"` Endpoint []Reference `bson:"endpoint,omitempty" json:"endpoint,omitempty"` Network []Reference `bson:"network,omitempty" json:"network,omitempty"` Coverage []InsurancePlanCoverage `bson:"coverage,omitempty" json:"coverage,omitempty"` Plan []InsurancePlanPlan `bson:"plan,omitempty" json:"plan,omitempty"` } type InsurancePlanContact struct { Id *string `bson:"id,omitempty" json:"id,omitempty"` Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"` ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"` Purpose *CodeableConcept `bson:"purpose,omitempty" json:"purpose,omitempty"` Name *HumanName `bson:"name,omitempty" json:"name,omitempty"` Telecom []ContactPoint `bson:"telecom,omitempty" json:"telecom,omitempty"` Address *Address `bson:"address,omitempty" json:"address,omitempty"` } type InsurancePlanCoverage struct { Id *string `bson:"id,omitempty" json:"id,omitempty"` Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"` ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"` Type CodeableConcept `bson:"type" json:"type"` Network []Reference `bson:"network,omitempty" json:"network,omitempty"` Benefit []InsurancePlanCoverageBenefit `bson:"benefit" json:"benefit"` } type InsurancePlanCoverageBenefit struct { Id *string `bson:"id,omitempty" json:"id,omitempty"` Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"` ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"` Type CodeableConcept `bson:"type" json:"type"` Requirement *string `bson:"requirement,omitempty" json:"requirement,omitempty"` Limit []InsurancePlanCoverageBenefitLimit `bson:"limit,omitempty" json:"limit,omitempty"` } type InsurancePlanCoverageBenefitLimit struct { Id *string `bson:"id,omitempty" json:"id,omitempty"` Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"` ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"` Value *Quantity `bson:"value,omitempty" json:"value,omitempty"` Code *CodeableConcept `bson:"code,omitempty" json:"code,omitempty"` } type InsurancePlanPlan struct { Id *string `bson:"id,omitempty" json:"id,omitempty"` Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"` ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"` Identifier []Identifier `bson:"identifier,omitempty" json:"identifier,omitempty"` Type *CodeableConcept `bson:"type,omitempty" json:"type,omitempty"` CoverageArea []Reference `bson:"coverageArea,omitempty" json:"coverageArea,omitempty"` Network []Reference `bson:"network,omitempty" json:"network,omitempty"` GeneralCost []InsurancePlanPlanGeneralCost `bson:"generalCost,omitempty" json:"generalCost,omitempty"` SpecificCost []InsurancePlanPlanSpecificCost `bson:"specificCost,omitempty" json:"specificCost,omitempty"` } type InsurancePlanPlanGeneralCost struct { Id *string `bson:"id,omitempty" json:"id,omitempty"` Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"` ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"` Type *CodeableConcept `bson:"type,omitempty" json:"type,omitempty"` GroupSize *int `bson:"groupSize,omitempty" json:"groupSize,omitempty"` Cost *Money `bson:"cost,omitempty" json:"cost,omitempty"` Comment *string `bson:"comment,omitempty" json:"comment,omitempty"` } type InsurancePlanPlanSpecificCost struct { Id *string `bson:"id,omitempty" json:"id,omitempty"` Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"` ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"` Category CodeableConcept `bson:"category" json:"category"` Benefit []InsurancePlanPlanSpecificCostBenefit `bson:"benefit,omitempty" json:"benefit,omitempty"` } type InsurancePlanPlanSpecificCostBenefit struct { Id *string `bson:"id,omitempty" json:"id,omitempty"` Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"` ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"` Type CodeableConcept `bson:"type" json:"type"` Cost []InsurancePlanPlanSpecificCostBenefitCost `bson:"cost,omitempty" json:"cost,omitempty"` } type InsurancePlanPlanSpecificCostBenefitCost struct { Id *string `bson:"id,omitempty" json:"id,omitempty"` Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"` ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"` Type CodeableConcept `bson:"type" json:"type"` Applicability *CodeableConcept `bson:"applicability,omitempty" json:"applicability,omitempty"` Qualifiers []CodeableConcept `bson:"qualifiers,omitempty" json:"qualifiers,omitempty"` Value *Quantity `bson:"value,omitempty" json:"value,omitempty"` } type OtherInsurancePlan InsurancePlan // MarshalJSON marshals the given InsurancePlan as JSON into a byte slice func (r InsurancePlan) MarshalJSON() ([]byte, error) { return json.Marshal(struct { OtherInsurancePlan ResourceType string `json:"resourceType"` }{ OtherInsurancePlan: OtherInsurancePlan(r), ResourceType: "InsurancePlan", }) } // UnmarshalInsurancePlan unmarshals a InsurancePlan. func UnmarshalInsurancePlan(b []byte) (InsurancePlan, error) { var insurancePlan InsurancePlan if err := json.Unmarshal(b, &insurancePlan); err != nil { return insurancePlan, err } return insurancePlan, nil }
/* Copyright 2019 Baidu, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package reporter import ( "testing" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/baidu/ote-stack/pkg/clustermessage" ) func newDaemonset() *appsv1.DaemonSet { return &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, }, } } func (f *fixture) newDaemonsetReporter() *DaemonsetReporter { ctx := f.newReportContext() daemonsetReporter := &DaemonsetReporter{ ctx: ctx, SyncChan: ctx.SyncChan, } return daemonsetReporter } func TestStartDaemonsetReporter(t *testing.T) { f := newFixture(t) ctx := f.newReportContext() err := startDaemonsetReporter(ctx) assert.Nil(t, err) ctx = &ReporterContext{} err = startDaemonsetReporter(ctx) assert.NotNil(t, err) } func TestHandleDaemonset(t *testing.T) { f := newFixture(t) daemonset := newDaemonset() daemonsetReporter := f.newDaemonsetReporter() daemonsetReporter.handleDaemonset(daemonset) data := <-daemonsetReporter.SyncChan assert.NotNil(t, data) assert.Equal(t, clustermessage.CommandType_EdgeReport, data.Head.Command) assert.Equal(t, clusterName, data.Head.ClusterName) close(daemonsetReporter.SyncChan) } func TestDeleteDaemonset(t *testing.T) { f := newFixture(t) daemonset := newDaemonset() daemonsetReporter := f.newDaemonsetReporter() daemonsetReporter.deleteDaemonset(daemonset) data := <-daemonsetReporter.SyncChan assert.NotNil(t, data) assert.Equal(t, clustermessage.CommandType_EdgeReport, data.Head.Command) assert.Equal(t, clusterName, data.Head.ClusterName) close(daemonsetReporter.SyncChan) } func TestDsSendToSyncChan(t *testing.T) { f := newFixture(t) daemonset := newDaemonset() daemonsetReporter := f.newDaemonsetReporter() drs := &DaemonsetResourceStatus{ UpdateMap: map[string]*appsv1.DaemonSet{ name: daemonset, }, } daemonsetReporter.sendToSyncChan(drs) data := <-daemonsetReporter.SyncChan assert.NotNil(t, data) assert.Equal(t, clustermessage.CommandType_EdgeReport, data.Head.Command) assert.Equal(t, clusterName, data.Head.ClusterName) close(daemonsetReporter.SyncChan) }
package main import ( "fmt" "os" "strings" "rand" "time" "bytes" "strconv" ) const ( MAX_PHONES = 20 DICTIONARY_FILENAME = "/usr/share/dict/web2" DICTIONARY_SIZE = 500000 // words (strings) BUFFER_SIZE = 100 // bytes ) var soundexDict []soundex type soundex struct { word string // plain word code string // soundex code of word } // values for soundex conversion var soundexValue = map[byte] byte { 'b': '1', 'f': '1', 'p': '1', 'v': '1', 'c': '2', 'g': '2', 'j': '2', 'k': '2', 'q': '2', 's': '2', 'x': '2', 'z': '2', 'd': '3', 't': '3', 'l': '4', 'm': '5', 'n': '5', 'r': '6', } func hamming(word string) string { return hammingRecursion(word, 0) } func hammingRecursion(word string, recDepth int) string { if recDepth > 1 { // limit recursion return word } codeByte := stringToByte(soundexCode(word)) if recDepth > 0 { // mutate a soundex code a little y := rand.Intn(6) // new soundex digit p := rand.Intn(3) // new soundex digit position for i := 0; i < 4; i++ { if i == p { codeByte[i] = strconv.Itoa(y)[0] } } } table := make([]string, len(soundexDict)/2) // reserve space for tmp table counter := 0 for i := 0; i < len(soundexDict); i++ { if bytes.Compare(stringToByte(soundexDict[i].code), codeByte) == 0 && len(soundexDict[i].word) == len(word) { table[counter] = soundexDict[i].word counter++ } } table = table[0:counter] x := rand.Intn(counter) if counter < 2 { // recursively search for a new word with slightly less strict rules return hammingRecursion(word, recDepth+1) } return table[x] // return random matching word } func sort(s []soundex) { quicksort(s, 0, len(s)-1) } func quicksort(s []soundex, left int, right int) { if right>left { // choose pivot point from the middle pivot := partition(s, left, right, (left+right)/2) quicksort(s, left, pivot-1) quicksort(s, pivot+1, right) } } // partition for quicksort func partition(s []soundex, left int, right int, pivot int) int { pivotVal := stringToByte(s[pivot].code) s[pivot], s[right] = s[right], s[pivot] store := left for i := left; i< right; i++ { soundexVal := stringToByte(s[i].code) if bytes.Compare(soundexVal, pivotVal) <=0 { s[i], s[store] = s[store], s[i] store++ } } s[store], s[right] = s[right], s[store] return store } // calculate soundex code for the given string func soundexCode(word string) string { w := stringToByte(strings.ToLower(word)) for i := 1; i < len(w); i++ { // replace consonants with their soundex code tmp, ok := soundexValue[w[i]] if ok { w[i] = tmp } else { w[i] = '0' } } w2 := make([]byte, 50) counter := 0 var prevChar uint8 = '*' for i := 0; i < len(w); i++ { // strip repeating digits (not 0's) if w[i] != '0' && w[i] == prevChar { // skip character } else { w2[counter] = w[i] prevChar = w[i] counter++ } } w2 = w2[0:counter] // trim w2 to correct size w = w2 w2 = make([]byte, 50) counter = 0 for i := 0; i < len(w); i++ { // strip all 0's if w[i] == '0' { // skip character } else { w2[counter] = w[i] counter++ } } w2 = w2[0:counter] // trim w2 to correct size counter = 0 for i := 0; i < len(w2); i++ { // copy w2->w, don't copy vowels if w2[i] != '*' { w[counter] = w2[i] counter++ } } w = w[0:counter] // trim w to correct size for ;len(w) < 4; { // extend the size to 4 digits if necessary w = w[0:len(w)+1] w[len(w)-1] = '0' } w = w[0:4] // limit the soundex code to 4 characters return string(w) } // converts the given string into a byte array func stringToByte(s string) []byte { // borrowed from golan.org cap := len(s) + 5 b := make([]byte, len(s), cap) for i := 0; i < len(s); i++ { b[i] = s[i] } return b } // locates a word from the dictionary func search(word string) int { low := 0 up := len(soundexDict)-1 var mid int for low < up { mid = (up + low) / 2 bWord := stringToByte(strings.ToLower(soundexCode(word))) bDictWord := stringToByte(strings.ToLower(soundexDict[mid].code)) comp := bytes.Compare(bWord, bDictWord) if comp < 0 { up = mid } else if comp > 0 { low = mid + 1 } else { return mid } } return -1 // word not found from dictionary } func phone(input chan []string, output chan []string, phoneNumber int) { message := <-input fmt.Printf ("phone %d received a message: %v\n", phoneNumber, message) mutateMessage(message) output <- message } func createMessage() []string { tmp := "The quick brown fox jumps over the lazy dog" return strings.Split(strings.ToLower(tmp), " ", 0) } func mutateMessage(message []string) { i := rand.Intn(len(message)) // choose a word to be mutated message[i] = hamming(message[i]) // get new word using soundex && hamming code } func readDictionaryFile () { soundexDict = make([]soundex, DICTIONARY_SIZE) SEPARATOR := "\n" dictionarySize := len(soundexDict) counter := 0 name := DICTIONARY_FILENAME permissions := 0666 file, err := os.Open(name, os.O_RDONLY, permissions) defer file.Close() if err == nil { buffer := make([]byte, BUFFER_SIZE*2) n, err := file.Read(buffer[0:BUFFER_SIZE]) startIndex := 0 j := 0 for err == nil && counter<dictionarySize { array := strings.Split(string(buffer[0:startIndex+n]), SEPARATOR, 0) wordLength := 0 for i := 0; i < len(array)-1; i++ { if counter < dictionarySize { wordLength += len(array[i])+1 word := strings.ToLower(array[i]) soundexDict[counter] = soundex{word, soundexCode(word)} if counter>dictionarySize-1 { break } } counter++ } if wordLength<n+j { tmp := j for j = 0; j < n-wordLength+tmp; j++ { // copy leftover to the beginning buffer[j] = buffer[wordLength+j] } startIndex = j // don't fill buffer from beginning } else { j = 0 startIndex = 0 // fill buffer from beginning } n, err = file.Read(buffer[startIndex:startIndex+BUFFER_SIZE-j]) } } fmt.Printf("dictionary contains %d words\n", counter) soundexDict = soundexDict[0:counter-1] // shrink soundexDict to actual size sort(soundexDict) } func main() { rand.Seed(time.Nanoseconds()) readDictionaryFile() message := createMessage() var firstInputChannel chan []string var lastOutputChannel chan []string var previousOutputChannel chan []string for i := 0; i<MAX_PHONES; i++ { var inputChannel chan []string outputChannel := make(chan []string) if i==0 { inputChannel = make(chan []string) firstInputChannel = inputChannel previousOutputChannel = outputChannel } else if i==MAX_PHONES-1 { lastOutputChannel = outputChannel } if i>0 { inputChannel = previousOutputChannel previousOutputChannel = outputChannel } // start a phone go phone(inputChannel, outputChannel, i) } fmt.Printf ("sending message to the first phone: %v\n", message) firstInputChannel <- message reply := <-lastOutputChannel fmt.Printf ("reply: %v\n", reply) }
package worker import ( "bytes" "encoding/json" "errors" "fmt" "net/http" "time" ) const footerMsg = ` Показаны %d нод(ы) из %d, остальные ищи в логах. Ошибки могут быть из-за того, что не забутстраплены беты/сенды из списка, на этих нодах высокое значение Load Average либо остановлен redis@shared.service. Для более подробной информации включи DEBUG в джобе. ` type slackMessage struct { Attachments []attachmentStruct `json:"attachments"` } type attachmentStruct struct { Color string `json:"color"` Title string `json:"title"` TitleLink string `json:"title_link"` Text string `json:"text"` Fields []fieldsStruct `json:"fields"` Footer string `json:"footer"` } type fieldsStruct struct { Title string `json:"title"` Value string `json:"value"` Short bool `json:"short"` } func (w *workerStruct) appendErrorHost(name string) { w.mt.Lock() w.errorHosts = append(w.errorHosts, fieldsStruct{ Title: "Node", Value: name, Short: true, }) w.mt.Unlock() } func (w *workerStruct) sendSlackMessage() error { var lastFourNode []fieldsStruct if len(w.errorHosts) > 4 { lastFourNode = w.errorHosts[0:4] } else { lastFourNode = w.errorHosts } msg := &slackMessage{ Attachments: []attachmentStruct{ { Color: "warning", Title: "Ошибка копирования справочников", TitleLink: w.config.BuildUrl, Text: "На следующие ноды не удалось скопировать справочники (refs:*)\nиз продового редиса:", Fields: lastFourNode, Footer: fmt.Sprintf(footerMsg, len(lastFourNode), len(w.errorHosts)), }, }, } msgBody, err := json.Marshal(&msg) if err != nil { return err } req, err := http.NewRequest(http.MethodPost, w.config.SlackHookUrl, bytes.NewBuffer(msgBody)) if err != nil { return err } req.Header.Add("Content-Type", "application/json") client := &http.Client{Timeout: 5 * time.Second} resp, err := client.Do(req) if err != nil { return err } buf := new(bytes.Buffer) buf.ReadFrom(resp.Body) if buf.String() != "ok" { return errors.New("response from slack not ok") } return nil }
func toLowerCase(str string) string { r := []rune(str) for i:=0;i<len(r);i++ { if r[i]>='A' && r[i]<='Z' { r[i]+= 32 } } return string(r) }
package main import "fmt" func main() { i := 38 fmt.Printf("%7b %#3o %d %#x %s \n", i, i, i, i, string(i)) i = 40 fmt.Printf("%7b %#3o %d %#x %s \n", i, i, i, i, string(i)) ch := 'e' fmt.Printf("\n%T\n", ch) fmt.Printf("%7b %#3o %d %#x %s \n", ch, ch, ch, ch, string(ch)) ch = 'E' fmt.Printf("\n%T\n", ch) fmt.Printf("%7b %#3o %d %#x %s \n", ch, ch, ch, ch, string(ch)) i = 958 fmt.Printf("%7b %#3o %d %#x %s \n", i, i, i, i, string(i)) ch = 'P' fmt.Printf("\n%T\n", ch) fmt.Printf("%7b %#3o %d %#x %s \n", ch, ch, ch, ch, string(ch)) ch = 'A' fmt.Printf("%v \t %v \t\t\t %b\n", string(ch), []byte(string(ch)), ch) ch = '$' fmt.Printf("%v \t %v \t\t\t %b\n", string(ch), []byte(string(ch)), ch) ch = 'こ' fmt.Printf("%v \t %v \t\t\t %b\n", string(ch), []byte(string(ch)), ch) ch = '世' fmt.Printf("%v \t %v \t\t\t %b\n", string(ch), []byte(string(ch)), ch) }
package main import ( "archive/tar" "archive/zip" "bytes" "compress/gzip" "context" "errors" "fmt" "io" "io/ioutil" "net/http" "os" "os/exec" "path/filepath" "strings" "github.com/AlexanderEkdahl/rope/version" ) // ParseSdistFilename returns a Sdist package from a given filename func ParseSdistFilename(filename, suffix string) (*Sdist, error) { sep := strings.LastIndex(filename, "-") if sep < 0 { return nil, fmt.Errorf("expected sdist filename to be <name>-<version>%s, got: %s", suffix, filename) } versionString := strings.TrimSuffix(filename, suffix)[sep+1:] v, valid := version.Parse(versionString) if !valid { return nil, fmt.Errorf("invalid version: '%s'", versionString) } return &Sdist{ name: NormalizePackageName(filename[:sep]), version: v, filename: filename, suffix: suffix, }, nil } // Sdist is an abstraction over an "sdist" source distribution. // This format is deprecated in favour of the Wheel format for // distributing binary packages. // // Installing sdist packages requires invoking the Python // interpreter which may in turn execute arbitary code. type Sdist struct { name string // Canonical name version version.Version filename string suffix string // url is only set when the package was found in a remote package repository. url string // Wheel built from source distribituion wheel *Wheel } // Name returns the canonical name of the source distribution package. func (s *Sdist) Name() string { return s.name } // Version returns the canonical version of the source distribution package. func (s *Sdist) Version() version.Version { return s.version } // Dependencies returns the transitive dependencies of this package. func (s *Sdist) Dependencies() []Dependency { return nil } func (s *Sdist) extractDependencies(ctx context.Context) error { // if s.wheel == nil { // if err := s.convert(ctx); err != nil { // return fmt.Errorf("converting sdist to wheel: %w", err) // } // } return nil } // Shim to wrap setup.py invocation with setuptools. This allows rope // to install legacy packages. This is the same method as used by pip. // // https://github.com/pypa/pip/blob/9cbe8fbdd0a1bd1bd4e483c9c0a556e9910ef8bb/src/pip/_internal/utils/setuptools_build.py#L14-L20 const setuptoolsShim = `import sys, setuptools, tokenize; sys.argv[0] = 'setup.py'; __file__='setup.py';f=getattr(tokenize, 'open', open)(__file__);code=f.read().replace('\\r\\n', '\\n');f.close();exec(compile(code, __file__, 'exec'))` // convert uses `setuptools` to build a binary distribution from // a source distribution. func (s *Sdist) convert(ctx context.Context) error { fmt.Println("converting sdist:", s.filename) body, err := s.fetch(ctx) if err != nil { return err } defer body.Close() tmp, err := ioutil.TempDir("", fmt.Sprintf("%s-%s-*", s.name, s.version)) if err != nil { return err } defer os.RemoveAll(tmp) switch s.suffix { case ".tar.gz", ".tgz": if err := s.untar(body, tmp); err != nil { return err } case ".zip": if err := s.unzip(body, tmp); err != nil { return err } } root := filepath.Join(tmp, strings.TrimSuffix(s.filename, s.suffix)) if _, err := os.Stat(root); errors.Is(err, os.ErrNotExist) { return fmt.Errorf("invalid source distribution: expected %s to exist after extraction", root) } wheelPath := filepath.Join(tmp, "wheel") installCmd := exec.CommandContext( ctx, "python", "-c", setuptoolsShim, "bdist_wheel", "-d", wheelPath, ) installCmd.Dir = root // Ensure command is not inherenting PYTHONPATH which may inadvertendely // use a version of system dependencies that is too old due to a minimal // version selected by this program... installCmd.Env = append(os.Environ(), "PYTHONPATH=") output, err := installCmd.CombinedOutput() if err != nil { fmt.Println(string(output)) return err } matches, err := filepath.Glob(filepath.Join(wheelPath, "*.whl")) if err != nil { return err } if len(matches) != 1 { return fmt.Errorf("expected a single .whl file to be in: %s", wheelPath) } filename := filepath.Base(matches[0]) whl, err := ParseWheelFilename(filename) if err != nil { return err } if !whl.Compatible(env) { return fmt.Errorf("built source distribution is incompatible with the current environment: '%s'", filename) } whl.Path = matches[0] if err := whl.extractDependencies(ctx); err != nil { return fmt.Errorf("failed extracting dependencies from built wheel: %w", err) } // Cache resulting wheel cachedPath, err := cache.AddWheel(whl, matches[0]) if err != nil { return err } whl.Path = cachedPath s.wheel = whl return nil } func (s *Sdist) untar(body io.Reader, tmp string) error { gzipReader, err := gzip.NewReader(body) if err != nil { return err } defer gzipReader.Close() tr := tar.NewReader(gzipReader) for { hdr, err := tr.Next() if err == io.EOF { break } else if err != nil { return fmt.Errorf("reading tar header: %w", err) } switch hdr.Typeflag { case tar.TypeDir: // Some tar files are somehow built without directory entries so // these can not be relied upon. case tar.TypeReg: // TODO: Final directory should be created with 0500 if err := os.MkdirAll(filepath.Dir(filepath.Join(tmp, hdr.Name)), 0777); err != nil { return err } out, err := os.Create(filepath.Join(tmp, hdr.Name)) if err != nil { return err } if _, err := io.Copy(out, tr); err != nil { out.Close() return err } if err := out.Close(); err != nil { return err } } } return nil } func (s *Sdist) unzip(body io.Reader, tmp string) error { // Risks using too much memory by using a memory backed buffer as intermediate storage. var buf bytes.Buffer if _, err := io.Copy(&buf, body); err != nil { return err } r, err := zip.NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len())) if err != nil { return err } for _, file := range r.File { f, err := file.Open() if err != nil { return err } if file.FileInfo().IsDir() { continue } target := filepath.Join(tmp, file.Name) if err := os.MkdirAll(filepath.Dir(target), 0777); err != nil { return err } dst, err := os.Create(target) if err != nil { return err } if _, err := io.Copy(dst, f); err != nil { return err } if err := f.Close(); err != nil { return err } } return nil } // Install extracts the source distribution and invokes the Python interpreter to // run a shim around setuptools to create a Python wheel package. If successful // the wheel is then installed. func (s *Sdist) Install(ctx context.Context) (string, error) { if s.wheel == nil { if err := s.convert(ctx); err != nil { return "", fmt.Errorf("converting sdist to wheel: %w", err) } } return s.wheel.Install(ctx) } func (s *Sdist) fetch(ctx context.Context) (io.ReadCloser, error) { r, err := http.NewRequestWithContext(ctx, http.MethodGet, s.url, nil) if err != nil { return nil, err } res, err := http.DefaultClient.Do(r) if err != nil { return nil, err } if res.StatusCode != http.StatusOK { return nil, fmt.Errorf("failed HTTP request: %s", res.Status) } // TODO: Verify checksum return res.Body, nil }
package port import ( "errors" "github.com/threez/intm/internal/model" ) var ErrInvalidContent = errors.New("invalid interval") type IntervalReader interface { // Read reads an interval at a time and returns // nil, io.EOF when there are no more intervals Read() (*model.Interval, error) }
package repository import ( "context" "github.com/caos/zitadel/internal/user/model" ) type UserRepository interface { UserByID(ctx context.Context, id string) (*model.UserView, error) CreateUser(ctx context.Context, user *model.User) (*model.User, error) RegisterUser(ctx context.Context, user *model.User, resourceOwner string) (*model.User, error) DeactivateUser(ctx context.Context, id string) (*model.User, error) ReactivateUser(ctx context.Context, id string) (*model.User, error) LockUser(ctx context.Context, id string) (*model.User, error) UnlockUser(ctx context.Context, id string) (*model.User, error) SearchUsers(ctx context.Context, request *model.UserSearchRequest) (*model.UserSearchResponse, error) UserChanges(ctx context.Context, id string, lastSequence uint64, limit uint64, sortAscending bool) (*model.UserChanges, error) GetGlobalUserByEmail(ctx context.Context, email string) (*model.UserView, error) IsUserUnique(ctx context.Context, userName, email string) (bool, error) UserMfas(ctx context.Context, userID string) ([]*model.MultiFactor, error) SetOneTimePassword(ctx context.Context, password *model.Password) (*model.Password, error) RequestSetPassword(ctx context.Context, id string, notifyType model.NotificationType) error ProfileByID(ctx context.Context, userID string) (*model.Profile, error) ChangeProfile(ctx context.Context, profile *model.Profile) (*model.Profile, error) EmailByID(ctx context.Context, userID string) (*model.Email, error) ChangeEmail(ctx context.Context, email *model.Email) (*model.Email, error) CreateEmailVerificationCode(ctx context.Context, userID string) error PhoneByID(ctx context.Context, userID string) (*model.Phone, error) ChangePhone(ctx context.Context, email *model.Phone) (*model.Phone, error) RemovePhone(ctx context.Context, userID string) error CreatePhoneVerificationCode(ctx context.Context, userID string) error AddressByID(ctx context.Context, userID string) (*model.Address, error) ChangeAddress(ctx context.Context, address *model.Address) (*model.Address, error) }
package endure import ( "encoding/json" "encoding/xml" "io" yaml "gopkg.in/yaml.v2" ) // NewJSONFileStorage creates a new value of the FileStorage type with // the json.Marshal and json.Unmarshal funcs. func NewJSONFileStorage(filename string) Storage { return &FileStorage{ Filename: filename, Marshal: json.Marshal, Unmarshal: json.Unmarshal, } } // NewXMLFileStorage creates a new value of the FileStorage type with // the xml.Marshal and xml.Unmarshal funcs. func NewXMLFileStorage(filename string) Storage { return &FileStorage{ Filename: filename, Marshal: xml.Marshal, Unmarshal: xml.Unmarshal, } } // NewYAMLFileStorage creates a new value of the FileStorage type with // the yaml.Marshal and yaml.Unmarshal funcs. func NewYAMLFileStorage(filename string) Storage { return &FileStorage{ Filename: filename, Marshal: yaml.Marshal, Unmarshal: yaml.Unmarshal, } } // NewJSONReadWriterStorage creates a new value of the ReadWriterStorage type // with the json.Marshal and json.Unmarshal funcs. func NewJSONReadWriterStorage(rw io.ReadWriter) Storage { return &ReadWriterStorage{ RW: rw, Marshal: json.Marshal, Unmarshal: json.Unmarshal, } } // NewXMLReadWriterStorage creates a new value of the ReadWriterStorage type // with the xml.Marshal and xml.Unmarshal funcs. func NewXMLReadWriterStorage(rw io.ReadWriter) Storage { return &ReadWriterStorage{ RW: rw, Marshal: xml.Marshal, Unmarshal: xml.Unmarshal, } } // NewYAMLReadWriterStorage creates a new value of the ReadWriterStorage type // with the yaml.Marshal and yaml.Unmarshal funcs. func NewYAMLReadWriterStorage(rw io.ReadWriter) Storage { return &ReadWriterStorage{ RW: rw, Marshal: yaml.Marshal, Unmarshal: yaml.Unmarshal, } }
package layout_test import ( "testing" "github.com/waybeams/assert" "github.com/waybeams/waybeams/pkg/ctrl" surface "github.com/waybeams/waybeams/pkg/env/fake" "github.com/waybeams/waybeams/pkg/fakes" "github.com/waybeams/waybeams/pkg/layout" "github.com/waybeams/waybeams/pkg/opts" "github.com/waybeams/waybeams/pkg/spec" ) func createStubApp() *spec.Spec { root := ctrl.VBox(opts.Key("root"), opts.Width(800), opts.Height(600), opts.Child(ctrl.HBox(opts.Key("header"), opts.Padding(5), opts.FlexWidth(1), opts.Height(80), opts.Child(ctrl.Box(opts.Key("logo"), opts.Width(50), opts.Height(50))), opts.Child(ctrl.Box(opts.Key("content"), opts.FlexWidth(1), opts.FlexHeight(1))), )), opts.Child(ctrl.Box(opts.Key("body"), opts.Padding(5), opts.FlexWidth(1), opts.FlexHeight(1))), opts.Child(ctrl.Box(opts.Key("footer"), opts.FlexWidth(1), opts.Height(60))), ) return root } func TestLayout(t *testing.T) { var fakeSurface = func() spec.Surface { return surface.NewSurface() } t.Run("createStubApp works as expected", func(t *testing.T) { root := createStubApp() assert.Equal(root.Key(), "root") assert.Equal(root.ChildCount(), 3) }) t.Run("Spread remainder", func(t *testing.T) { root := layout.Layout(fakes.Fake( opts.Width(152), opts.LayoutType(spec.HorizontalFlowLayoutType), opts.Child(fakes.Fake(opts.FlexWidth(1))), opts.Child(fakes.Fake(opts.FlexWidth(1))), opts.Child(fakes.Fake(opts.FlexWidth(1))), ), fakeSurface()) assert.Equal(root.ChildAt(0).Width(), 51) assert.Equal(root.ChildAt(1).Width(), 51) assert.Equal(root.ChildAt(2).Width(), 50) }) t.Run("Stack parent dimensions grow to encapsulate children", func(t *testing.T) { root := ctrl.Box( opts.Key("root"), opts.Width(40), opts.Height(45), opts.Child(ctrl.Box( opts.Key("one"), opts.Width(50), opts.Height(55), opts.Child(ctrl.Box( opts.Key("two"), opts.Width(60), opts.Height(65), )), )), ) layout.Layout(root, fakeSurface()) one := spec.FirstByKey(root, "one") assert.Equal(one.Width(), 60, "one.W") assert.Equal(one.Height(), 65, "one.H") assert.Equal(root.Width(), 60, "root.W") assert.Equal(root.Height(), 65, "root.H") }) t.Run("Oversized flex values should not break layouts", func(t *testing.T) { root := ctrl.VBox( opts.Width(100), opts.Height(120), opts.Child(fakes.Fake( opts.Key("one"), opts.FlexHeight(3), opts.FlexWidth(1), )), opts.Child(fakes.Fake( opts.Key("two"), opts.Height(20), opts.FlexWidth(1), )), ) layout.Layout(root, fakeSurface()) // Prior to a bug fix where we added math.Floor to flowGetUnitSize, we were getting // oversizing containers because of floating point remainders. assert.Equal(root.Height(), 120) assert.Equal(root.ChildAt(0).Height(), 100) assert.Equal(root.ChildAt(1).Height(), 20) }) t.Run("GetFlexibleChildren", func(t *testing.T) { t.Run("Scales flex children", func(t *testing.T) { root := ctrl.HBox( opts.Key("root"), opts.Padding(5), opts.Width(100), opts.Height(110), opts.Child(ctrl.Box( opts.Key("one"), opts.Padding(10), opts.FlexWidth(1), opts.FlexHeight(1), )), opts.Child(ctrl.Box( opts.Key("two"), opts.FlexWidth(1), opts.FlexHeight(1), )), ) layout.Layout(root, fakeSurface()) one := spec.FirstByKey(root, "one") two := spec.FirstByKey(root, "two") assert.Equal(one.Width(), 45, "one width") assert.Equal(two.Width(), 45, "two width") assert.Equal(one.Height(), 100, "one height") assert.Equal(two.Height(), 100, "two height") }) }) t.Run("Spread remainder", func(t *testing.T) { root := ctrl.HBox( opts.Width(152), opts.Child(ctrl.Box( opts.Key("one"), opts.FlexWidth(1), opts.FlexHeight(1), )), opts.Child(ctrl.Box( opts.Key("two"), opts.FlexWidth(1), opts.FlexHeight(1), )), opts.Child(ctrl.Box( opts.Key("three"), opts.FlexWidth(1), opts.FlexHeight(1), )), ) layout.Layout(root, fakeSurface()) one := spec.FirstByKey(root, "one") two := spec.FirstByKey(root, "two") three := spec.FirstByKey(root, "three") assert.Equal(one.Width(), 51) assert.Equal(two.Width(), 51) assert.Equal(three.Width(), 50) }) t.Run("Basic, nested layout", func(t *testing.T) { root := ctrl.VBox( opts.Key("root"), opts.Width(100), opts.Height(300), opts.Child(ctrl.HBox( opts.Key("header"), opts.FlexWidth(1), opts.Height(100), opts.Child(ctrl.Box( opts.Key("logo"), opts.Width(200), opts.Height(100), )), )), opts.Child(ctrl.Box( opts.Key("content"), opts.FlexHeight(1), opts.FlexWidth(1), )), opts.Child(ctrl.Box( opts.Key("footer"), opts.Height(80), opts.FlexWidth(1), )), ) layout.Layout(root, fakeSurface()) header := spec.FirstByKey(root, "header") footer := spec.FirstByKey(root, "footer") content := spec.FirstByKey(root, "content") assert.Equal(header.Height(), 100) assert.Equal(footer.Height(), 80) assert.Equal(content.Height(), 120) }) t.Run("Nested, flexible controls should expand", func(t *testing.T) { root := ctrl.Box( opts.Key("root"), opts.Width(100), opts.Child(ctrl.Box( opts.Key("one"), opts.FlexWidth(1), opts.Child(ctrl.Box( opts.Key("two"), opts.FlexWidth(1), )), )), ) layout.Layout(root, fakeSurface()) one := spec.FirstByKey(root, "one") two := spec.FirstByKey(root, "two") assert.Equal(one.Width(), 100) assert.Equal(two.Width(), 100) }) t.Run("Gutter is supported", func(t *testing.T) { root := ctrl.VBox( opts.Padding(5), opts.Gutter(10), opts.Child(ctrl.Box(opts.Width(100), opts.Height(20))), opts.Child(ctrl.Box(opts.Width(100), opts.Height(20))), opts.Child(ctrl.Box(opts.Width(100), opts.Height(20))), ) layout.Layout(root, fakeSurface()) kids := root.Children() one := kids[0] two := kids[1] three := kids[2] assert.Equal(one.Y(), 5) assert.Equal(two.Y(), 35) assert.Equal(three.Y(), 65) }) t.Run("Layouts with larger children", func(t *testing.T) { t.Run("Does not shrink larger parent", func(t *testing.T) { root := ctrl.Box( opts.Width(50), opts.Height(50), opts.Child(ctrl.Box( opts.Width(10), opts.Height(10), )), ) assert.Equal(root.Height(), 50) assert.Equal(root.Width(), 50) }) t.Run("Vertical", func(t *testing.T) { root := ctrl.VBox( opts.Gutter(5), opts.Padding(5), opts.Child(ctrl.Box(opts.Width(20), opts.Height(20))), opts.Child(ctrl.Box(opts.Width(20), opts.Height(20))), ) layout.Layout(root, fakeSurface()) assert.Equal(root.Height(), 55) assert.Equal(root.Width(), 30) }) t.Run("Horizontal", func(t *testing.T) { root := ctrl.HBox( opts.Gutter(5), opts.Padding(5), opts.Child(ctrl.Box(opts.Width(20), opts.Height(20))), opts.Child(ctrl.Box(opts.Width(20), opts.Height(20))), ) layout.Layout(root, fakeSurface()) assert.Equal(root.Height(), 30) assert.Equal(root.Width(), 55) }) }) t.Run("Align center", func(t *testing.T) { root := ctrl.Box( opts.HAlign(spec.AlignCenter), opts.VAlign(spec.AlignCenter), opts.Padding(5), opts.Width(60), opts.Height(60), // This should be positioned in the center even though three blew out the size. opts.Child(ctrl.Box(opts.Key("one"), opts.Width(75), opts.Height(75))), opts.Child(ctrl.Box(opts.Key("two"), opts.Width(50), opts.Height(50))), // Three will blow out the assigned parent dimensions. opts.Child(ctrl.Box(opts.Key("three"), opts.Width(25), opts.Height(25))), ) layout.Layout(root, fakeSurface()) one := spec.FirstByKey(root, "one") two := spec.FirstByKey(root, "two") three := spec.FirstByKey(root, "three") assert.Equal(root.Width(), 85) assert.Equal(root.Height(), 85) assert.Equal(one.X(), 5) assert.Equal(one.Y(), 5) assert.Equal(two.X(), 17.5) assert.Equal(two.Y(), 17.5) assert.Equal(three.X(), 30) assert.Equal(three.Y(), 30) }) t.Run("Align last", func(t *testing.T) { root := ctrl.Box( opts.HAlign(spec.AlignRight), opts.VAlign(spec.AlignBottom), opts.Padding(5), opts.Width(60), opts.Height(60), // This should be positioned in the center even though three blew out. opts.Child(ctrl.Box( opts.Key("one"), opts.Width(75), opts.Height(75), )), opts.Child(ctrl.Box( opts.Key("two"), opts.Width(50), opts.Height(50), )), // Three will blow out the assigned parent dimensions. opts.Child(ctrl.Box( opts.Key("three"), opts.Width(25), opts.Height(25), )), ) layout.Layout(root, fakeSurface()) one := spec.FirstByKey(root, "one") two := spec.FirstByKey(root, "two") three := spec.FirstByKey(root, "three") assert.Equal(root.Width(), 85) assert.Equal(root.Height(), 85) assert.Equal(one.X(), 5) assert.Equal(one.Y(), 5) assert.Equal(two.X(), 30) assert.Equal(two.Y(), 30) assert.Equal(three.X(), 55) assert.Equal(three.Y(), 55) }) t.Run("Distribute space after limit", func(t *testing.T) { root := ctrl.VBox( opts.Key("root"), opts.Width(100), opts.Height(100), opts.Child(ctrl.Box( opts.Key("one"), opts.Width(100), opts.FlexHeight(1), opts.MaxHeight(20), )), opts.Child(ctrl.Box( opts.Key("two"), opts.Width(100), opts.FlexHeight(1), opts.MaxHeight(30), )), opts.Child(ctrl.Box( opts.Key("three"), opts.Width(100), opts.FlexHeight(1), )), ) layout.Layout(root, fakeSurface()) one := spec.FirstByKey(root, "one") two := spec.FirstByKey(root, "two") three := spec.FirstByKey(root, "three") assert.Equal(one.Height(), 20) assert.Equal(two.Height(), 30) assert.Equal(three.Height(), 50) }) t.Run("Todo Item Height", func(t *testing.T) { root := ctrl.VBox( opts.Key("Todo Items"), opts.MinHeight(300), opts.FlexWidth(1), opts.Child(ctrl.HBox( opts.Key("item-0"), opts.FlexWidth(1), opts.Child(ctrl.Button( opts.Key("btn"), opts.Text("Some Label Value"), )), )), opts.Child(ctrl.HBox( opts.Key("item-1"), opts.FlexWidth(1), opts.Child(ctrl.Button( opts.Key("btn"), opts.Text("Other Label Value"), )), )), ) layout.Layout(root, fakeSurface()) assert.Equal(root.Height(), 300) child := spec.FirstByKey(root, "item-0") assert.Equal(child.Width(), 172) assert.Equal(child.Height(), 34) child = spec.FirstByKey(root, "item-1") assert.Equal(child.Y(), 34) assert.Equal(child.Width(), 182) assert.Equal(child.Height(), 34) }) }
package kojiapi_test import ( "encoding/json" "net/http/httptest" "testing" "github.com/osbuild/osbuild-composer/internal/jobqueue/testjobqueue" "github.com/osbuild/osbuild-composer/internal/kojiapi" "github.com/osbuild/osbuild-composer/internal/kojiapi/api" distro_mock "github.com/osbuild/osbuild-composer/internal/mocks/distro" rpmmd_mock "github.com/osbuild/osbuild-composer/internal/mocks/rpmmd" "github.com/osbuild/osbuild-composer/internal/upload/koji" "github.com/osbuild/osbuild-composer/internal/worker" "github.com/stretchr/testify/require" ) func newTestKojiServer(t *testing.T) *kojiapi.Server { rpm_fixture := rpmmd_mock.BaseFixture() rpm := rpmmd_mock.NewRPMMDMock(rpm_fixture) require.NotNil(t, rpm) distros, err := distro_mock.NewDefaultRegistry() require.NoError(t, err) require.NotNil(t, distros) workers := worker.NewServer(nil, testjobqueue.New(), "") require.NotNil(t, workers) server := kojiapi.NewServer(nil, workers, rpm, distros, map[string]koji.GSSAPICredentials{}) require.NotNil(t, server) return server } func TestStatus(t *testing.T) { server := newTestKojiServer(t) handler := server.Handler("/api/composer-koji/v1") req := httptest.NewRequest("GET", "/api/composer-koji/v1/status", nil) req.Header.Set("Content-Type", "application/json") rec := httptest.NewRecorder() handler.ServeHTTP(rec, req) resp := rec.Result() require.Equal(t, 200, resp.StatusCode) var status api.Status err := json.NewDecoder(resp.Body).Decode(&status) require.NoError(t, err) require.Equal(t, "OK", status.Status) }
package backend import ( "archive/tar" "bytes" "fmt" "io" "io/ioutil" "net/http" "net/url" "path/filepath" "runtime" "strconv" "strings" "sync" "time" gocontext "context" dockertypes "github.com/docker/docker/api/types" dockercontainer "github.com/docker/docker/api/types/container" docker "github.com/docker/docker/client" "github.com/docker/go-connections/tlsconfig" humanize "github.com/dustin/go-humanize" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/travis-ci/worker/config" "github.com/travis-ci/worker/context" "github.com/travis-ci/worker/image" "github.com/travis-ci/worker/metrics" "github.com/travis-ci/worker/ssh" ) const ( defaultDockerImageSelectorType = "tag" // DockerMinSupportedAPIVersion of 1.24 means the client library in use here // can only support docker-engine 1.12 and above // (https://docs.docker.com/release-notes/docker-engine/#1120-2016-07-28). DockerMinSupportedAPIVersion = "1.24" ) var ( defaultDockerNumCPUer dockerNumCPUer = &stdlibNumCPUer{} defaultDockerSSHDialTimeout = 5 * time.Second defaultInspectInterval = 500 * time.Millisecond defaultExecCmd = "bash /home/travis/build.sh" defaultTmpfsMap = map[string]string{"/run": "rw,nosuid,nodev,exec,noatime,size=65536k"} dockerHelp = map[string]string{ "ENDPOINT / HOST": "[REQUIRED] tcp or unix address for connecting to Docker", "CERT_PATH": "directory where ca.pem, cert.pem, and key.pem are located (default \"\")", "CMD": "command (CMD) to run when creating containers (default \"/sbin/init\")", "EXEC_CMD": fmt.Sprintf("command to run via exec/ssh (default %q)", defaultExecCmd), "INSPECT_INTERVAL": fmt.Sprintf("time to wait between container inspections as duration (default %q)", defaultInspectInterval), "TMPFS_MAP": fmt.Sprintf("\"+\"-delimited key:value map of tmpfs mounts (example \"/run:rw,exec+/run/lock:rw,exec\", default %q)", defaultTmpfsMap), "MEMORY": "memory to allocate to each container (0 disables allocation, default \"4G\")", "SHM": "/dev/shm to allocate to each container (0 disables allocation, default \"64MiB\")", "CONTAINER_LABELS": "comma- or space-delimited key:value pairs of labels to apply to each container (default \"\")", "CPUS": "cpu count to allocate to each container (0 disables allocation, default 2)", "CPU_SET_SIZE": "size of available cpu set (default detected locally via runtime.NumCPU)", "NATIVE": "upload and run build script via docker API instead of over ssh (default false)", "PRIVILEGED": "run containers in privileged mode (default false)", "SSH_DIAL_TIMEOUT": fmt.Sprintf("connection timeout for ssh connections (default %v)", defaultDockerSSHDialTimeout), "IMAGE_SELECTOR_TYPE": fmt.Sprintf("image selector type (\"tag\", \"api\", or \"env\", default %q)", defaultDockerImageSelectorType), "IMAGE_SELECTOR_URL": "URL for image selector API, used only when image selector is \"api\"", "BINDS": "Bind mount a volume (example: \"/var/run/docker.sock:/var/run/docker.sock\", default \"\")", "SECURITY_OPT": "Security configuration (example: \"seccomp=unconfined\") to turn off seccomp confinement for the container", } ) func init() { Register("docker", "Docker", dockerHelp, newDockerProvider) } type dockerNumCPUer interface { NumCPU() int } type stdlibNumCPUer struct{} func (nc *stdlibNumCPUer) NumCPU() int { return runtime.NumCPU() } type dockerProvider struct { client docker.CommonAPIClient sshDialer ssh.Dialer sshDialTimeout time.Duration runPrivileged bool runCmd []string runBinds []string setSecurityOpt []string runMemory uint64 runShm uint64 runCPUs uint runNative bool execCmd []string inspectInterval time.Duration tmpFs map[string]string imageSelector image.Selector containerLabels map[string]string httpProxy, httpsProxy, ftpProxy, noProxy string cpuSetsMutex sync.Mutex cpuSets []bool } type dockerInstance struct { client docker.CommonAPIClient provider *dockerProvider container *dockertypes.ContainerJSON startBooting time.Time imageName string runNative bool } type dockerTagImageSelector struct { client *docker.Client } func newDockerProvider(cfg *config.ProviderConfig) (Provider, error) { client, err := buildDockerClient(cfg) if err != nil { return nil, err } runNative := false if cfg.IsSet("NATIVE") { v, err := strconv.ParseBool(cfg.Get("NATIVE")) if err != nil { return nil, err } runNative = v } cpuSetSize := 0 if defaultDockerNumCPUer != nil { cpuSetSize = defaultDockerNumCPUer.NumCPU() } if cfg.IsSet("CPU_SET_SIZE") { v, err := strconv.ParseInt(cfg.Get("CPU_SET_SIZE"), 10, 64) if err != nil { return nil, err } cpuSetSize = int(v) } if cpuSetSize < 2 { cpuSetSize = 2 } privileged := false if cfg.IsSet("PRIVILEGED") { v, err := strconv.ParseBool(cfg.Get("PRIVILEGED")) if err != nil { return nil, err } privileged = v } cmd := []string{"/sbin/init"} if cfg.IsSet("CMD") { cmd = strings.Split(cfg.Get("CMD"), " ") } execCmd := strings.Split(defaultExecCmd, " ") if cfg.IsSet("EXEC_CMD") { execCmd = strings.Split(cfg.Get("EXEC_CMD"), " ") } inspectInterval := defaultInspectInterval if cfg.IsSet("INSPECT_INTERVAL") { v, err := time.ParseDuration(cfg.Get("INSPECT_INTERVAL")) if err != nil { return nil, err } inspectInterval = v } binds := []string{} if cfg.IsSet("BINDS") { binds = strings.Split(cfg.Get("BINDS"), " ") } securityOpt := []string{} if cfg.IsSet("SECURITY_OPT") { securityOpt = strings.Split(cfg.Get("SECURITY_OPT"), " ") } tmpFs := str2map(cfg.Get("TMPFS_MAP"), " ") if len(tmpFs) == 0 { tmpFs = defaultTmpfsMap } memory := uint64(1024 * 1024 * 1024 * 4) if cfg.IsSet("MEMORY") { if parsedMemory, err := humanize.ParseBytes(cfg.Get("MEMORY")); err == nil { memory = parsedMemory } } shm := uint64(1024 * 1024 * 64) if cfg.IsSet("SHM") { if parsedShm, err := humanize.ParseBytes(cfg.Get("SHM")); err == nil { shm = parsedShm } } cpus := uint64(2) if cfg.IsSet("CPUS") { if parsedCPUs, err := strconv.ParseUint(cfg.Get("CPUS"), 10, 64); err == nil { cpus = parsedCPUs } } sshDialTimeout := defaultDockerSSHDialTimeout if cfg.IsSet("SSH_DIAL_TIMEOUT") { sshDialTimeout, err = time.ParseDuration(cfg.Get("SSH_DIAL_TIMEOUT")) if err != nil { return nil, err } } sshDialer, err := ssh.NewDialerWithPassword("travis") if err != nil { return nil, errors.Wrap(err, "couldn't create SSH dialer") } imageSelectorType := defaultDockerImageSelectorType if cfg.IsSet("IMAGE_SELECTOR_TYPE") { imageSelectorType = cfg.Get("IMAGE_SELECTOR_TYPE") } imageSelector, err := buildDockerImageSelector(imageSelectorType, client, cfg) if err != nil { return nil, errors.Wrap(err, "couldn't build docker image selector") } containerLabels := map[string]string{} if cfg.IsSet("CONTAINER_LABELS") { containerLabels = str2map(cfg.Get("CONTAINER_LABELS"), " ,") } httpProxy := cfg.Get("HTTP_PROXY") httpsProxy := cfg.Get("HTTPS_PROXY") ftpProxy := cfg.Get("FTP_PROXY") noProxy := cfg.Get("NO_PROXY") return &dockerProvider{ client: client, sshDialer: sshDialer, sshDialTimeout: sshDialTimeout, runPrivileged: privileged, runCmd: cmd, runBinds: binds, setSecurityOpt: securityOpt, runMemory: memory, runShm: shm, runCPUs: uint(cpus), runNative: runNative, imageSelector: imageSelector, containerLabels: containerLabels, httpProxy: httpProxy, httpsProxy: httpsProxy, ftpProxy: ftpProxy, noProxy: noProxy, execCmd: execCmd, inspectInterval: inspectInterval, tmpFs: tmpFs, cpuSets: make([]bool, cpuSetSize), }, nil } func buildDockerClient(cfg *config.ProviderConfig) (*docker.Client, error) { // check for both DOCKER_ENDPOINT and DOCKER_HOST, the latter for // compatibility with docker's own env vars. if !cfg.IsSet("ENDPOINT") && !cfg.IsSet("HOST") { return nil, ErrMissingEndpointConfig } endpoint := cfg.Get("ENDPOINT") if endpoint == "" { endpoint = cfg.Get("HOST") } var httpClient *http.Client if cfg.IsSet("CERT_PATH") { certPath := cfg.Get("CERT_PATH") tlsOptions := tlsconfig.Options{ CAFile: filepath.Join(certPath, "ca.pem"), CertFile: filepath.Join(certPath, "cert.pem"), KeyFile: filepath.Join(certPath, "key.pem"), InsecureSkipVerify: cfg.Get("TLS_VERIFY") == "", } tlsc, err := tlsconfig.Client(tlsOptions) if err != nil { return nil, err } httpClient = &http.Client{ Transport: &http.Transport{ TLSClientConfig: tlsc, }, CheckRedirect: docker.CheckRedirect, } } dockerAPIVersion := DockerMinSupportedAPIVersion if cfg.IsSet("API_VERSION") { dockerAPIVersion = cfg.Get("API_VERSION") } return docker.NewClient(endpoint, dockerAPIVersion, httpClient, nil) } func buildDockerImageSelector(selectorType string, client *docker.Client, cfg *config.ProviderConfig) (image.Selector, error) { switch selectorType { case "tag": return &dockerTagImageSelector{client: client}, nil case "env": return image.NewEnvSelector(cfg) case "api": baseURL, err := url.Parse(cfg.Get("IMAGE_SELECTOR_URL")) if err != nil { return nil, errors.Wrap(err, "failed to parse image selector URL") } return image.NewAPISelector(baseURL), nil default: return nil, fmt.Errorf("invalid image selector type %q", selectorType) } } // dockerImageNameForID returns a human-readable name for the image with the requested ID. // Currently, we are using the tag that includes the stack-name (e.g "travisci/ci-garnet:packer-1505167479") and reverting back to the ID if nothing is found. func (p *dockerProvider) dockerImageNameForID(ctx gocontext.Context, imageID string) string { images, err := p.client.ImageList(ctx, dockertypes.ImageListOptions{All: true}) if err != nil { return imageID } for _, image := range images { if image.ID == imageID { for _, tag := range image.RepoTags { if strings.HasPrefix(tag, "travisci/ci-") { return tag } } } } return imageID } func (p *dockerProvider) SupportsProgress() bool { return false } func (p *dockerProvider) StartWithProgress(ctx gocontext.Context, startAttributes *StartAttributes, _ Progresser) (Instance, error) { return p.Start(ctx, startAttributes) } func (p *dockerProvider) Start(ctx gocontext.Context, startAttributes *StartAttributes) (Instance, error) { var ( imageID string imageName string ) logger := context.LoggerFromContext(ctx).WithField("self", "backend/docker_provider") if startAttributes.ImageName != "" { imageName = startAttributes.ImageName } else { selectedImageID, err := p.imageSelector.Select(ctx, &image.Params{ Language: startAttributes.Language, Infra: "docker", }) if err != nil { logger.WithField("err", err).Error("couldn't select image") return nil, err } imageID = selectedImageID imageName = p.dockerImageNameForID(ctx, imageID) } containerName := hostnameFromContext(ctx) existingContainer, err := p.client.ContainerInspect(ctx, containerName) if err == nil { err := p.client.ContainerRemove(ctx, existingContainer.ID, dockertypes.ContainerRemoveOptions{ Force: true, RemoveLinks: false, RemoveVolumes: true, }) if err != nil { logger.WithField("err", err).Error("couldn't remove preexisting container before create") } else { logger.Warn("removed preexisting container before create") } } labels := map[string]string{ "travis.dist": startAttributes.Dist, } for key, value := range p.containerLabels { labels[key] = value } r, ok := context.RepositoryFromContext(ctx) if ok { labels["travis.repo"] = r } jid, ok := context.JobIDFromContext(ctx) if ok { labels["travis.job_id"] = strconv.FormatUint(jid, 10) } dockerConfig := &dockercontainer.Config{ Cmd: p.runCmd, Image: imageID, Hostname: strings.ToLower(containerName), Domainname: "travisci.net", Labels: labels, } dockerHostConfig := &dockercontainer.HostConfig{ Binds: p.runBinds, Privileged: p.runPrivileged, Tmpfs: p.tmpFs, ShmSize: int64(p.runShm), Resources: dockercontainer.Resources{ Memory: int64(p.runMemory), }, } if len(p.setSecurityOpt) > 0 { dockerHostConfig.SecurityOpt = p.setSecurityOpt } useCPUSets := p.runCPUs != uint(0) cpuSets := "" if useCPUSets { cpuSets, err = p.checkoutCPUSets(ctx) if err != nil { logger.WithFields(logrus.Fields{ "err": err, "cpu_set_length": len(p.cpuSets), "run_cpus": p.runCPUs, }).Error("couldn't checkout CPUSets") return nil, err } if cpuSets != "" { dockerHostConfig.Resources.CpusetCpus = cpuSets } } logger.WithFields(logrus.Fields{ "config": fmt.Sprintf("%#v", dockerConfig), "host_config": fmt.Sprintf("%#v", dockerHostConfig), }).Debug("creating container") container, err := p.client.ContainerCreate( ctx, dockerConfig, dockerHostConfig, nil, containerName) if err != nil { logger.WithField("err", err).Error("couldn't create container") if useCPUSets { p.checkinCPUSets(ctx, cpuSets) } err := p.client.ContainerRemove(ctx, container.ID, dockertypes.ContainerRemoveOptions{ Force: true, RemoveLinks: false, RemoveVolumes: true, }) if err != nil { logger.WithField("err", err).Error("couldn't remove container after create failure") } return nil, err } startBooting := time.Now() err = p.client.ContainerStart(ctx, container.ID, dockertypes.ContainerStartOptions{}) if err != nil { logger.WithField("err", err).Error("couldn't start container") if useCPUSets { p.checkinCPUSets(ctx, cpuSets) } return nil, err } containerReady := make(chan dockertypes.ContainerJSON) errChan := make(chan error) go func(id string) { for { container, err := p.client.ContainerInspect(ctx, id) if err != nil { errChan <- err return } if container.State != nil && container.State.Running { containerReady <- container return } } }(container.ID) select { case container := <-containerReady: metrics.TimeSince("worker.vm.provider.docker.boot", startBooting) return &dockerInstance{ client: p.client, provider: p, runNative: p.runNative, container: &container, imageName: imageName, startBooting: startBooting, }, nil case err := <-errChan: return nil, err case <-ctx.Done(): if ctx.Err() == gocontext.DeadlineExceeded { if useCPUSets { p.checkinCPUSets(ctx, cpuSets) } metrics.Mark("worker.vm.provider.docker.boot.timeout") } return nil, ctx.Err() } } func (p *dockerProvider) Setup(ctx gocontext.Context) error { return nil } func (p *dockerProvider) checkoutCPUSets(ctx gocontext.Context) (string, error) { p.cpuSetsMutex.Lock() defer p.cpuSetsMutex.Unlock() cpuSets := []int{} for i, checkedOut := range p.cpuSets { if !checkedOut { cpuSets = append(cpuSets, i) } if len(cpuSets) == int(p.runCPUs) { break } } if len(cpuSets) != int(p.runCPUs) { return "", fmt.Errorf("not enough free CPUsets") } cpuSetsString := []string{} for _, cpuSet := range cpuSets { p.cpuSets[cpuSet] = true cpuSetsString = append(cpuSetsString, fmt.Sprintf("%d", cpuSet)) } logger := context.LoggerFromContext(ctx).WithField("self", "backend/docker_provider") logger.WithField("cpu_sets", cpuSetsString).Info("checked out") return strings.Join(cpuSetsString, ","), nil } func (p *dockerProvider) checkinCPUSets(ctx gocontext.Context, sets string) { p.cpuSetsMutex.Lock() defer p.cpuSetsMutex.Unlock() logger := context.LoggerFromContext(ctx).WithField("self", "backend/docker_provider") for _, cpuString := range strings.Split(sets, ",") { cpu, err := strconv.ParseUint(cpuString, 10, 64) if err != nil { logger.WithFields(logrus.Fields{ "err": err, "cpu_string": cpuString, }).Error("couldn't parse CPU string; CPU set not checked in") continue } if !p.cpuSets[int(cpu)] { logger.WithField("cpu_set", cpuString).Info("already checked in") continue } p.cpuSets[int(cpu)] = false } logger.WithField("cpu_sets", sets).Info("checked in") } func (i *dockerInstance) sshConnection(ctx gocontext.Context) (ssh.Connection, error) { var err error container, err := i.client.ContainerInspect(ctx, i.container.ID) if err != nil { return nil, err } i.container = &container time.Sleep(2 * time.Second) return i.provider.sshDialer.Dial(fmt.Sprintf("%s:22", i.container.NetworkSettings.IPAddress), "travis", i.provider.sshDialTimeout) } func (i *dockerInstance) Warmed() bool { return false } func (i *dockerInstance) SupportsProgress() bool { return false } func (i *dockerInstance) UploadScript(ctx gocontext.Context, script []byte) error { if i.runNative { return i.uploadScriptNative(ctx, script) } return i.uploadScriptSCP(ctx, script) } func (i *dockerInstance) uploadScriptNative(ctx gocontext.Context, script []byte) error { tarBuf := &bytes.Buffer{} tw := tar.NewWriter(tarBuf) err := tw.WriteHeader(&tar.Header{ Name: "/home/travis/build.sh", Mode: 0755, Size: int64(len(script)), }) if err != nil { return err } _, err = tw.Write(script) if err != nil { return err } err = tw.Close() if err != nil { return err } return i.client.CopyToContainer(ctx, i.container.ID, "/", bytes.NewReader(tarBuf.Bytes()), dockertypes.CopyToContainerOptions{}) } func (i *dockerInstance) uploadScriptSCP(ctx gocontext.Context, script []byte) error { conn, err := i.sshConnection(ctx) if err != nil { return err } defer conn.Close() existed, err := conn.UploadFile("build.sh", script) if existed { return ErrStaleVM } if err != nil { return errors.Wrap(err, "couldn't upload build script") } return nil } func (i *dockerInstance) RunScript(ctx gocontext.Context, output io.Writer) (*RunResult, error) { if i.runNative { return i.runScriptExec(ctx, output) } return i.runScriptSSH(ctx, output) } func (i *dockerInstance) runScriptExec(ctx gocontext.Context, output io.Writer) (*RunResult, error) { execConfig := dockertypes.ExecConfig{ AttachStdin: false, AttachStdout: true, AttachStderr: true, Detach: false, Tty: true, Cmd: i.provider.execCmd, User: "travis", } if i.provider.httpProxy != "" { execConfig.Env = append(execConfig.Env, "HTTP_PROXY="+i.provider.httpProxy) execConfig.Env = append(execConfig.Env, "http_proxy="+i.provider.httpProxy) } if i.provider.httpsProxy != "" { execConfig.Env = append(execConfig.Env, "HTTPS_PROXY="+i.provider.httpsProxy) execConfig.Env = append(execConfig.Env, "https_proxy="+i.provider.httpsProxy) } if i.provider.ftpProxy != "" { execConfig.Env = append(execConfig.Env, "FTP_PROXY="+i.provider.ftpProxy) execConfig.Env = append(execConfig.Env, "ftp_proxy="+i.provider.ftpProxy) } if i.provider.noProxy != "" { execConfig.Env = append(execConfig.Env, "NO_PROXY="+i.provider.noProxy) execConfig.Env = append(execConfig.Env, "no_proxy="+i.provider.noProxy) } exec, err := i.client.ContainerExecCreate(ctx, i.container.ID, execConfig) if err != nil { return &RunResult{Completed: false}, err } hijackedResponse, err := i.client.ContainerExecAttach(ctx, exec.ID, dockertypes.ExecStartCheck{ Detach: execConfig.Detach, Tty: execConfig.Tty, }) if err != nil { return &RunResult{Completed: false}, err } defer hijackedResponse.Close() tee := io.TeeReader(hijackedResponse.Reader, output) firstByte := make(chan struct{}) go func() { buf := make([]byte, 8192) didFirstByte := false for { select { case <-ctx.Done(): return default: } n, _ := tee.Read(buf) if n != 0 && !didFirstByte { firstByte <- struct{}{} didFirstByte = true } } }() <-firstByte for { inspect, err := i.client.ContainerExecInspect(ctx, exec.ID) if err != nil { return &RunResult{Completed: false}, err } if !inspect.Running { return &RunResult{Completed: true, ExitCode: int32(inspect.ExitCode)}, nil } select { case <-time.After(i.provider.inspectInterval): continue case <-ctx.Done(): return &RunResult{Completed: false}, ctx.Err() } } } func (i *dockerInstance) runScriptSSH(ctx gocontext.Context, output io.Writer) (*RunResult, error) { conn, err := i.sshConnection(ctx) if err != nil { return &RunResult{Completed: false}, errors.Wrap(err, "couldn't connect to SSH server") } defer conn.Close() exitStatus, err := conn.RunCommand(strings.Join(i.provider.execCmd, " "), output) return &RunResult{Completed: err != nil, ExitCode: exitStatus}, errors.Wrap(err, "error running script") } func (i *dockerInstance) DownloadTrace(ctx gocontext.Context) ([]byte, error) { if i.runNative { return i.downloadTraceNative(ctx) } return i.downloadTraceSSH(ctx) } func (i *dockerInstance) downloadTraceNative(ctx gocontext.Context) ([]byte, error) { r, _, err := i.client.CopyFromContainer(ctx, i.container.ID, "/tmp/build.trace") if r != nil { defer r.Close() } if err != nil { return nil, errors.Wrap(err, "couldn't copy trace from container") } found := false tr := tar.NewReader(r) for { hdr, err := tr.Next() if err == io.EOF { break } if err != nil { return nil, errors.Wrap(err, "couldn't parse tar") } if hdr.Name == "build.trace" { found = true break } } if !found { return nil, errors.Wrap(err, "couldn't find trace in tar") } buf, err := ioutil.ReadAll(tr) if err != nil { return nil, errors.Wrap(err, "couldn't read contents of file") } return buf, nil } func (i *dockerInstance) downloadTraceSSH(ctx gocontext.Context) ([]byte, error) { conn, err := i.sshConnection(ctx) if err != nil { return nil, errors.Wrap(err, "couldn't connect to SSH server") } defer conn.Close() buf, err := conn.DownloadFile("/tmp/build.trace") if err != nil { return nil, errors.Wrap(err, "couldn't download trace") } return buf, nil } func (i *dockerInstance) Stop(ctx gocontext.Context) error { defer i.provider.checkinCPUSets(ctx, i.container.HostConfig.Resources.CpusetCpus) logger := context.LoggerFromContext(ctx).WithField("self", "backend/docker_provider") timeout := 30 * time.Second err := i.client.ContainerStop(ctx, i.container.ID, &timeout) if err != nil { logger.Warn("couldn't stop container") return err } return i.client.ContainerRemove(ctx, i.container.ID, dockertypes.ContainerRemoveOptions{ Force: true, RemoveLinks: false, RemoveVolumes: true, }) } func (i *dockerInstance) ID() string { if i.container == nil { return "{unidentified}" } return i.container.ID[0:7] } func (i *dockerInstance) ImageName() string { return i.imageName } func (i *dockerInstance) StartupDuration() time.Duration { if i.container == nil { return zeroDuration } containerCreated, err := time.Parse(time.RFC3339Nano, i.container.Created) if err != nil { return zeroDuration } return i.startBooting.Sub(containerCreated) } func (s *dockerTagImageSelector) Select(ctx gocontext.Context, params *image.Params) (string, error) { images, err := s.client.ImageList(ctx, dockertypes.ImageListOptions{All: true}) if err != nil { return "", errors.Wrap(err, "failed to list docker images") } imageID, err := findDockerImageByTag([]string{ "travis:" + params.Language, params.Language, "travis:default", "default", }, images) return imageID, err } //findDockerImageByTag returns the ID of the image which matches the requested search tags func findDockerImageByTag(searchTags []string, images []dockertypes.ImageSummary) (string, error) { for _, searchTag := range searchTags { for _, image := range images { if searchTag == image.ID { return image.ID, nil } for _, tag := range image.RepoTags { if tag == searchTag { return image.ID, nil } } } } return "", fmt.Errorf("failed to find matching docker image tag") }
package leetcode_0148_排序链表 /* 在 O(n log n) 时间复杂度和常数级空间复杂度下,对链表进行排序。 示例 1: 输入: 4->2->1->3 输出: 1->2->3->4 示例 2: 输入: -1->5->3->4->0 输出: -1->0->3->4->5 */ // Definition for singly-linked list. type ListNode struct { Val int Next *ListNode } func sortList(head *ListNode) *ListNode { // 先返回异常值:head就是空的或者head的next是空的 // 也就是len(head) <=1 的情况 if head == nil || head.Next == nil { return head } // 分成左右两个链表 left, right := split(head) // 合并已经排过序的列表 return merge(sortList(left), sortList(right)) } // 从中间切成两个,所以要有快慢指针 // 慢指针走一步,快指针走两步,等快指针走到头,慢指针刚好走到中间 func split(head *ListNode) (left, right *ListNode) { slow, fast := head, head // 确保当len(head)==2,会均分成两个list var slowPre *ListNode for fast != nil && fast.Next != nil { // 保存slow slowPre = slow // V(fast) = 2V(slow) slow, fast = slow.Next, fast.Next.Next } // 切断list,slow的下一个设成nil就成两个list了 slowPre.Next = nil // 赋值给左右两个list left, right = head, slow return left, right } // 排序+合并 func merge(left, right *ListNode) *ListNode { // 初始化地址 cur := &ListNode{} // 同时创建头 headPre := cur // 左右均不为空的情况 for left != nil && right != nil { // 左边的小于右边的值 if left.Val < right.Val { // cur总是存小的值,然后left指向下一个 cur.Next, left = left, left.Next } else { // cur 总是存小的值,然后right指向下一个 cur.Next, right = right, right.Next } // cur 指向下一个 cur = cur.Next } // 把剩下的也接上 if left == nil { cur.Next = right } else { cur.Next = left } // 返回head,也就是整个链表 return headPre.Next }
package main import ( "log" "golang.org/x/net/context" firebase "firebase.google.com/go" "google.golang.org/api/option" ) func main() { opt := option.WithCredentialsFile("path/to/serviceAccountKey.json") app, err := firebase.NewApp(context.Background(), nil, opt) if err != nil { log.Fatalf("error initializing app: %v\n", err) } log.Println("Golang + Firebase running", app) }
package models import "go.mongodb.org/mongo-driver/mongo" // 库存数量不足时,通知的是仓库管理员 type MessageType struct { ID int64 `json:"id" bson:"id"` // 1 商品库存 2 仓库库存 3 订单 4 结算单(可能会拆分为客户和供应商两种) Name string `json:"name" bson:"name"` // 类型名字 Template string `json:"template" bson:"template"` // 类型模板 } func getMessageTypeCollection() *mongo.Collection { return Client.Collection("message_type") }
package model type SearchResults struct { MediaContainer struct { Metadata []struct { Genre []struct { Tag string `json:"tag"` } `json:"Genre"` Role []struct { Tag string `json:"tag"` } `json:"Role"` AddedAt int64 `json:"addedAt"` AllowSync bool `json:"allowSync"` Art string `json:"art"` Banner string `json:"banner"` ChildCount int64 `json:"childCount"` ContentRating string `json:"contentRating"` Duration int64 `json:"duration"` Index int64 `json:"index"` Key string `json:"key"` LastViewedAt int64 `json:"lastViewedAt"` LeafCount int64 `json:"leafCount"` LibrarySectionID int64 `json:"librarySectionID"` LibrarySectionTitle string `json:"librarySectionTitle"` LibrarySectionUUID string `json:"librarySectionUUID"` OriginallyAvailableAt string `json:"originallyAvailableAt"` Personal bool `json:"personal"` Rating float64 `json:"rating"` RatingKey string `json:"ratingKey"` SourceTitle string `json:"sourceTitle"` Studio string `json:"studio"` Summary string `json:"summary"` Theme string `json:"theme"` Thumb string `json:"thumb"` Title string `json:"title"` Type string `json:"type"` UpdatedAt int64 `json:"updatedAt"` ViewCount int64 `json:"viewCount"` ViewedLeafCount int64 `json:"viewedLeafCount"` Year int64 `json:"year"` } `json:"Metadata"` Provider []struct { Key string `json:"key"` Title string `json:"title"` Type string `json:"type"` } `json:"Provider"` Identifier string `json:"identifier"` MediaTagPrefix string `json:"mediaTagPrefix"` MediaTagVersion int64 `json:"mediaTagVersion"` Size int64 `json:"size"` } `json:"MediaContainer"` }
package webhook import ( "encoding/json" "reflect" ) func Unmarshal(str string) Webhook { var webhook Webhook err := json.Unmarshal([]byte(str), &webhook) if err != nil { return Webhook{} } return webhook } type image struct { namespace string name string tag string } var ( validImages = []image{ image{"joaofnfernandes", "test", "latest"}, } ) func (w *Webhook) isDefault() bool { defaultWebhook := Webhook{} return reflect.DeepEqual(*w, defaultWebhook) } func (w *Webhook) IsValid() bool { if w.isDefault() { return false } for _, v := range validImages { if v.namespace == w.Repository.Namespace && v.name == w.Repository.Name && v.tag == w.PushData.Tag { return true } } return false }
package models import "time" type BaseModel struct { ID uint `json:"id" gorm:"primary_key"` CreatedAt time.Time `json:"created_at" gorm:"default:now()"` }
package access import ( "errors" "time" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "github.com/mgalela/akses/db" "github.com/mgalela/akses/utils" ) //TokenCache represent token cache document type TokenCache struct { ID string `json:"id" bson:"_id"` Token string `json:"token" bson:"token"` UserID string `json:"userid" bson:"userid"` Expired time.Time } //TokenCreate create a token func TokenCreate(session *mgo.Session, user *User) (string, error) { var tc TokenCache if err := db.TokenCaches(session).Find(bson.M{"userid": user.ID}).One(&tc); err == nil { // checks old token if !time.Now().After(tc.Expired) { return tc.Token, nil } else { tc.Expired = time.Now().UTC().AddDate(1, 0, 0) if errr := db.TokenCaches(session).Update(bson.M{"userid": user.ID}, bson.M{"$set": tc}); errr != nil { return "", errr } return tc.Token, nil } } tc = TokenCache{ ID: bson.NewObjectId().Hex(), Token: user.Token, UserID: user.ID, Expired: time.Now().AddDate(1, 0, 0), } if err := db.TokenCaches(session).Insert(tc); err != nil { return "", err } return tc.Token, nil } //TokenMap map token to username func TokenMap(session *mgo.Session, token string) (*TokenCache, error) { var tc TokenCache if err := db.TokenCaches(session).Find(bson.M{"token": token}).Sort("-expired").One(&tc); err != nil { utils.Log.Info("cannot find token=", token) return nil, err } if time.Now().UTC().After(tc.Expired) { utils.Log.Info("Token Map : token expired, please do login") return nil, errors.New("Token Expired,Please Login") } return &tc, nil } //TokenDelete delete token func TokenDelete(session *mgo.Session, tokenid string) error { return db.TokenCaches(session).RemoveId(tokenid) }
// Package kindergarten implements a solution of the exercise titiled `Kindergarten Garden'. package kindergarten import ( "errors" "sort" "strings" ) // Garden represents a collection of a child and his/her own grasses. type Garden map[string][]string var plants = map[byte]string{'G': "grass", 'C': "clover", 'R': "radishes", 'V': "violets"} // NewGarden is a ctor of Garden func NewGarden(diagram string, children []string) (*Garden, error) { for _, c := range diagram { switch c { case 'G', 'C', 'R', 'V', '\n': continue default: return nil, errors.New("invalid cup code") } } sorted := make([]string, cap(children)) copy(sorted, children) sort.Slice(sorted, func(i, j int) bool { return sorted[i] < sorted[j] }) g := Garden{} rows := strings.Split(diagram, "\n")[1:] if len(rows) != 2 || len(rows[0]) != len(rows[1]) || len(rows[0])&1 == 1 { return nil, errors.New("invalid input") } for _, child := range sorted { if _, found := g[child]; found { return nil, errors.New("duplicate name") } g[child] = []string{plants[rows[0][0]], plants[rows[0][1]], plants[rows[1][0]], plants[rows[1][1]]} rows[0], rows[1] = rows[0][2:], rows[1][2:] } return &g, nil } // Plants returns the plants which the child owns. func (g *Garden) Plants(child string) ([]string, bool) { plants, ok := map[string][]string(*g)[child] return plants, ok } /* BenchmarkNewGarden-4 96256 12029 ns/op 7356 B/op 99 allocs/op BenchmarkGarden_Plants-4 6137862 214 ns/op 0 B/op 0 allocs/op */
package config const ( RabbitURL = "amqp://admin:123456@192.168.2.3:5672/" ExchangeName = "oss.transfer.exchange" ExchangeType = "direct" QueueName = "oss.transfer.queue" RoutingKey = "oss" )
package main import ( "context" "fmt" "google.golang.org/grpc" "myRPC/myService/game/generate" ) //func main() { // req := &pbGuide.Point{ // Latitude:100, // Longitude:10000, // } // conn, err := grpc.Dial("127.0.0.1:8889", grpc.WithInsecure()) // if err != nil { // fmt.Println("Dial:", err) // return // } // // 函数结束时关闭连接 // defer conn.Close() // client := pbGuide.NewRouteGuideClient(conn) // resp,err := client.GetFeature(context.TODO(),req) // if err != nil { // fmt.Println("GetFeature:", err) // return // } // fmt.Printf("resp:%v",resp) //} func main() { req := &pbGame.StartGameRequest{ StartName:"", } conn, err := grpc.Dial("47.92.212.70:8888", grpc.WithInsecure()) if err != nil { fmt.Println("Dial:", err) return } // 函数结束时关闭连接 defer conn.Close() client := pbGame.NewGameServiceClient(conn) resp,err := client.StartGame(context.TODO(),req) if err != nil { fmt.Println("StartGame err:", err) return } fmt.Printf("resp:%v",resp) }
package main import ( "net/http" "net/http/httputil" "os" "runtime/debug" "strings" "time" "github.com/go-openapi/loads" "github.com/go-openapi/runtime/middleware" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/z7zmey/golang-microservice-boilerplate/internal/endpoint" "github.com/z7zmey/golang-microservice-boilerplate/internal/repository" "github.com/z7zmey/golang-microservice-boilerplate/internal/service" "github.com/z7zmey/golang-microservice-boilerplate/internal/transport/rest/handler" "github.com/z7zmey/golang-microservice-boilerplate/internal/transport/rest/server/restapi" "github.com/z7zmey/golang-microservice-boilerplate/internal/transport/rest/server/restapi/operations" ) var cfg Config func init() { Cmd.Flags().AddFlagSet(cfg.Flags()) } var Cmd = &cobra.Command{ Use: "serve", Short: "run server", RunE: func(cmd *cobra.Command, args []string) error { BindEnv(cmd) l := logrus.New() l.SetFormatter(new(logrus.JSONFormatter)) l.Hooks.Add(newServiceLogHook("boilerplate")) // load embedded swagger file swaggerSpec, err := loads.Analyzed(restapi.SwaggerJSON, "") if err != nil { return errors.WithStack(err) } // create new service API api := operations.NewBoilerplateMicroserviceAPI(swaggerSpec) api.Logger = func(s string, i ...interface{}) { l.Log(logrus.InfoLevel, s) } server := restapi.NewServer(api) defer func() { if err := server.Shutdown(); err != nil { logrus.WithError(err).Error() } }() // set the port this service will be run on server.Port = cfg.Port server.Host = cfg.Host server.ReadTimeout = cfg.ReadTimeout server.WriteTimeout = cfg.WriteTimeout elasticsearchRepo := repository.NewCarRepo() searchService := service.NewSearchService(elasticsearchRepo) searchEndpoint := endpoint.NewBoilerplateEndpoint(searchService) apiHandler := handler.NewApiHandler(searchEndpoint) apiHandler.ConfigureHandlers(api) server.SetHandler(PanicRecovery(l, AccessLogger(l, api.Serve(middleware.PassthroughBuilder)))) // serve API if err := server.Serve(); err != nil { return errors.WithStack(err) } return nil }, } // BindEnv parse config values from environment variables func BindEnv(cmd *cobra.Command) { cmd.Flags().VisitAll(func(f *pflag.Flag) { envVar := strings.ToUpper(f.Name) if val := os.Getenv(envVar); val != "" { if err := cmd.Flags().Set(f.Name, val); err != nil { logrus.WithError(err).Error("failed to set flag") } } }) } func PanicRecovery(l *logrus.Logger, next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { defer func() { if rerr := recover(); rerr != nil { defer func(rw http.ResponseWriter) { rw.WriteHeader(http.StatusInternalServerError) }(w) httprequest, err := httputil.DumpRequest(r, false) if err != nil { l.WithError(err).Errorf("failed to dump the request on panic %v", rerr) return } l.WithError(errors.New("panic recovery")). WithField("stack", string(debug.Stack())). WithField("request", string(httprequest)). Error("panic") } }() next.ServeHTTP(w, r) }) } func AccessLogger(l *logrus.Logger, next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { start := time.Now() next.ServeHTTP(w, r) // todo add request status code l.WithFields(logrus.Fields{ "method": r.Method, "path": r.URL, "took-seconds": time.Since(start).Seconds(), }).Info("REST request") }) }
package httpServer import ( "encoding/json" "github.com/astaxie/beego/logs" "github.com/valyala/fasthttp" "time" ) var ( strContentType = []byte("Content-Type") strApplicationJSON = []byte("application/json") ) func GenerateResp(result interface{}, code int, message string) map[string]map[string]interface{} { resp := make(map[string]map[string]interface{}) resp["data"] = make(map[string]interface{}) resp["data"]["result"] = result resp["status"] = make(map[string]interface{}) resp["status"]["code"] = code resp["status"]["message"] = message return resp } func DoJSONWrite(ctx *fasthttp.RequestCtx, code int, obj interface{}) { ctx.Response.Header.SetCanonical(strContentType, strApplicationJSON) ctx.Response.SetStatusCode(code) start := time.Now() if err := json.NewEncoder(ctx).Encode(obj); err != nil { elapsed := time.Since(start) logs.Error("", elapsed, err.Error(), obj) ctx.Error(err.Error(), fasthttp.StatusInternalServerError) } }
package grpchook import ( "context" "fmt" "strings" "github.com/SecuritasCrimePrediction/apitools-go/notification" "google.golang.org/grpc" ) type interceptor struct { configs EndpointConfig recipients []notification.Sender } // The hook for streaming endpoints // Assembles all endpoint configurations to one EndpointConfig and returns the hook function func StreamNotificationInterceptor(recipients []notification.Sender, configs ...EndpointConfig) grpc.StreamServerInterceptor { endpointConfigs := EndpointConfig{} for _, conf := range configs { for k, v := range conf { endpointConfigs[k] = v } } i := interceptor{ recipients: recipients, configs: endpointConfigs, } return i.streamHook } // The hook function for streaming endpoints // It will call the deciding function to decide if a notification should be sent or not. func (i interceptor) streamHook(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) (err error) { endpointParts := strings.Split(info.FullMethod, "/") endpointName := endpointParts[len(endpointParts)-1] // handle request err = handler(srv, stream) // if no configuration exist for this endpoint, just return the response conf, exists := i.configs[endpointName] if !exists { return } // Determine whether a notification should be sent if conf.ShouldNotifyForErr(stream.Context(), err) { switch err { case nil: i.sendInfoMsg(endpointName, "") default: i.sendErrorMsg(endpointName, err.Error()) } } return } // The hook for unary endpoints // Assembles all endpoint configurations to one EndpointConfig and returns the hook function func UnaryNotificationInterceptor(recipients []notification.Sender, configs ...EndpointConfig) grpc.UnaryServerInterceptor { endpointConfigs := EndpointConfig{} for _, conf := range configs { for k, v := range conf { endpointConfigs[k] = v } } i := interceptor{ recipients: recipients, configs: endpointConfigs, } return i.unaryHook } // The hook function for unary endpoints // It will call the deciding function to decide if a notification should be sent or not. func (i interceptor) unaryHook(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { endpointParts := strings.Split(info.FullMethod, "/") endpointName := endpointParts[len(endpointParts)-1] // handle request resp, err = handler(ctx, req) // if no configuration exist for this endpoint, just return the response conf, exist := i.configs[endpointName] if !exist { return } // Determine whether a notification should be sent if conf.ShouldNotifyForErr(ctx, err) { switch err { case nil: i.sendInfoMsg(endpointName, resp) default: i.sendErrorMsg(endpointName, err.Error()) } } return } // Send an info message on all notification channels func (i interceptor) sendInfoMsg(methodName string, info interface{}) { for _, recipient := range i.recipients { recipient.Info(fmt.Sprintf("Request to endpoint %s recieved\nExtra info: %+v", methodName, info)) } } // Send an error message on all notification channels func (i interceptor) sendErrorMsg(methodName string, errStr string) { for _, recipient := range i.recipients { recipient.Alert(fmt.Sprintf("Error occurred in a call to %s\nError: %s", methodName, errStr)) } }
package controllers import ( "github.com/gin-gonic/gin" "github.com/mickaelmagniez/elastic-alert/models" "net/http" "github.com/mickaelmagniez/elastic-alert/store" ) type AlertsController struct{} func (AlertsController) All(c *gin.Context) { alerts, err := store.AllAlerts() if err != nil { c.JSON(http.StatusInternalServerError, gin.H{"error": err}) } else { c.JSON(http.StatusOK, alerts) } } func (AlertsController) Get(c *gin.Context) { id := c.Param("id") alert, err := store.GetAlert(id) if err != nil { c.JSON(http.StatusInternalServerError, gin.H{"error": err}) } else { c.JSON(http.StatusOK, alert) } } func (AlertsController) Create(c *gin.Context) { var alert models.Alert if err := c.ShouldBindJSON(&alert); err == nil { alert, err := store.CreateAlert(alert) if err != nil { c.JSON(http.StatusInternalServerError, gin.H{"error": err}) } else { c.JSON(http.StatusOK, alert) } } else { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) } } func (AlertsController) Update(c *gin.Context) { var alert models.Alert if err := c.ShouldBindJSON(&alert); err == nil { alert, err := store.UpdateAlert(alert) if err != nil { c.JSON(http.StatusInternalServerError, gin.H{"error": err}) } else { c.JSON(http.StatusOK, alert) } } else { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) } } func (AlertsController) Delete(c *gin.Context) { id := c.Param("id") id, err := store.DeleteAlert(id) if err != nil { c.JSON(http.StatusInternalServerError, gin.H{"error": err}) } else { c.JSON(http.StatusNoContent, nil) } }
package domain var ( DefaultStore NOSQLStore ) type NOSQLStore interface { CreateKeyspace(keyspace_name string) error DropKeyspace(keyspace_name string) error ShowKeyspaces() ([]Keyspace, error) ShowColumnFamily(ks, cf string) ([]map[string]string, error) } type Columnfamily struct { Name string } type Row struct { } type Column struct { Name string Value string // should be something more generic Timestamp int } type Keyspace struct { Name string Columnfamilies []Columnfamily }
package main //808. 分汤 //有 A 和 B 两种类型 的汤。一开始每种类型的汤有 n 毫升。有四种分配操作: // //提供 100ml 的 汤A 和 0ml 的 汤B 。 //提供 75ml 的 汤A 和 25ml 的 汤B 。 //提供 50ml 的 汤A 和 50ml 的 汤B 。 //提供 25ml 的 汤A 和 75ml 的 汤B 。 //当我们把汤分配给某人之后,汤就没有了。每个回合,我们将从四种概率同为 0.25 的操作中进行分配选择。如果汤的剩余量不足以完成某次操作,我们将尽可能分配。当两种类型的汤都分配完时,停止操作。 // //注意 不存在先分配 100 ml 汤B 的操作。 // //需要返回的值: 汤A 先分配完的概率 + 汤A和汤B 同时分配完的概率 / 2。返回值在正确答案 10-5 的范围内将被认为是正确的。 // // // //示例 1: // //输入: n = 50 //输出: 0.62500 //解释:如果我们选择前两个操作,A 首先将变为空。 //对于第三个操作,A 和 B 会同时变为空。 //对于第四个操作,B 首先将变为空。 //所以 A 变为空的总概率加上 A 和 B 同时变为空的概率的一半是 0.25 *(1 + 1 + 0.5 + 0)= 0.625。 //示例 2: // //输入: n = 100 //输出: 0.71875 // // //提示: // //0 <= n <= 10^9 func soupServings(n int) float64 { n = (n + 24) / 25 if n >= 179 { return 1 } dp := make([][]float64, n+1) for i := range dp { dp[i] = make([]float64, n+1) } dp[0][0] = 0.5 for i := 1; i <= n; i++ { dp[0][i] = 1 } for i := 1; i <= n; i++ { for j := 1; j <= n; j++ { dp[i][j] = (dp[max(0, i-4)][j] + dp[max(0, i-3)][max(0, j-1)] + dp[max(0, i-2)][max(0, j-2)] + dp[max(0, i-1)][max(0, j-3)]) / 4 } } return dp[n][n] }
package authJwtToken import ( "encoding/json" "github.com/ethereal-go/ethereal" "github.com/ethereal-go/ethereal/root/app" "github.com/ethereal-go/ethereal/root/config" "github.com/justinas/alice" "net/http" ) type MiddlewareJWTToken struct { EtherealClaims StatusError int ResponseError string authenticated bool responseWriter http.ResponseWriter included bool // flag is enabled or disabled authJwtToken } func (m MiddlewareJWTToken) Add(where *[]alice.Constructor, application *app.Application) { confToken := config.GetCnf("AUTH.JWT_TOKEN").(string) if confToken == "local" { m.included = true *where = append(*where, func(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { m.responseWriter = w if check, err := m.Verify(r); !check { m.ResponseError = handlerErrorToken(err).Error() } else { m.authenticated = true } ethereal.CtxStruct(application, m) handler.ServeHTTP(w, r) }) }) } else if confToken == "global" { // check jwt token all queries.. *where = append(*where, func(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if check, err := m.Verify(r); !check { json.NewEncoder(w).Encode(handlerErrorToken(err).Error()) w.WriteHeader(m.StatusError) } }) }) } } func GetMiddlewareJwtToken() MiddlewareJWTToken { return MiddlewareJWTToken{ ResponseError: http.StatusText(http.StatusNetworkAuthenticationRequired), StatusError: http.StatusNetworkAuthenticationRequired, } }
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha3 import "fmt" const ( // ClusterctlLabelName defines the label that is applied to all the components managed by clusterctl. ClusterctlLabelName = "clusterctl.cluster.x-k8s.io" // ClusterctlCoreLabelName defines the label that is applied to all the core objects managed by clusterctl. ClusterctlCoreLabelName = "clusterctl.cluster.x-k8s.io/core" // ClusterctlResourceLifecyleLabelName defines the label that documents the lifecyle for a specific resource. // e.g. resources shared between instances of the same provider. e.g. CRDs, ValidatingWebhookConfiguration, MutatingWebhookConfiguration etc. // are marked as shared ClusterctlResourceLifecyleLabelName = "clusterctl.cluster.x-k8s.io/lifecycle" ) // ResourceLifecycle configures the lifecycle of a resource type ResourceLifecycle string const ( // ResourceLifecycleShared is the value we use when tagging resources to indicate // that the resource is shared between multiple instance of a provider, and should not be deleted // if an instance of the provider is deleted. ResourceLifecycleShared = ResourceLifecycle("shared") ) // ManifestLabel returns the cluster.x-k8s.io/provider label value for a provider/type. // Please note that this label uniquely identifies the provider, e.g. bootstrap-kubeadm, but not the instances of // the provider, e.g. namespace-1/bootstrap-kubeadm and namespace-2/bootstrap-kubeadm. func ManifestLabel(name string, providerType ProviderType) string { switch providerType { case CoreProviderType: return name case BootstrapProviderType: return fmt.Sprintf("bootstrap-%s", name) case ControlPlaneProviderType: return fmt.Sprintf("control-plane-%s", name) case InfrastructureProviderType: return fmt.Sprintf("infrastructure-%s", name) default: return fmt.Sprintf("unknown-type-%s", name) } }
package main import ( "fmt" "io/ioutil" "log" "net/http" "github.com/julienschmidt/httprouter" "github.com/szydell/mstools" "github.com/szydell/rjgtm/rjerr" "github.com/szydell/rjgtm/rjshared" ) func getGlvn(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { glvn := string(ps.ByName("glvn")) log.Println("GET glvn:" + glvn) w.Header().Set("Content-Type", "application/json") reply, err := workers.doWork("getGlvn", glvn) if err != nil { tmpID, tmpDescr := rjerr.ErrorTypeAndMessage(err) log.Println(tmpID, tmpDescr) http.Error(w, tmpDescr, tmpID) return } fmt.Fprintf(w, "%s", reply) log.Println("200 ", reply, err) } func getGvStat(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { log.Println("GET gvstat") w.Header().Set("Content-Type", "application/json") reply, err := workers.doWork("gvStats", "") if err != nil { tmpID, tmpDescr := rjerr.ErrorTypeAndMessage(err) log.Println(tmpID, tmpDescr) http.Error(w, tmpDescr, tmpID) return } fmt.Fprintf(w, "%s", reply) log.Println("200", reply, err) } func deleteGvStat(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { log.Println("DELETE gvstat") w.Header().Set("Content-Type", "application/json") reply, err := workers.doWork("cleanGvStats", "") if err != nil { tmpID, tmpDescr := rjerr.ErrorTypeAndMessage(err) log.Println(tmpID, tmpDescr) http.Error(w, tmpDescr, tmpID) return } fmt.Fprintf(w, "%s", reply) log.Println("200", reply, err) } func setGlvn(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { var glvn rjshared.Glvn glvn.Key = ps.ByName("glvn") log.Println("SET ", glvn.Key) body, err := ioutil.ReadAll(r.Body) if err != nil { mstools.ErrCheck(err) } glvn.Value = string(body) w.Header().Set("Content-Type", "application/json") reply, err := workers.doWork("setGlvn", glvn) if err != nil { tmpID, tmpDescr := rjerr.ErrorTypeAndMessage(err) log.Println(tmpID, tmpDescr) http.Error(w, tmpDescr, tmpID) return } fmt.Fprintf(w, "%s", reply) log.Println("200", reply, err) } func orderGlvn(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { glvn := string(ps.ByName("glvn")) log.Println("ORDER glvn:" + glvn) w.Header().Set("Content-Type", "application/json") reply, err := workers.doWork("orderGlvn", glvn) if err != nil { tmpID, tmpDescr := rjerr.ErrorTypeAndMessage(err) log.Println(tmpID, tmpDescr) http.Error(w, tmpDescr, tmpID) return } fmt.Fprintf(w, "%s", reply) log.Println("200 ", reply, err) } func prevGlvn(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { glvn := string(ps.ByName("glvn")) log.Println("PREVIOUS glvn:" + glvn) w.Header().Set("Content-Type", "application/json") reply, err := workers.doWork("prevGlvn", glvn) if err != nil { tmpID, tmpDescr := rjerr.ErrorTypeAndMessage(err) log.Println(tmpID, tmpDescr) http.Error(w, tmpDescr, tmpID) return } fmt.Fprintf(w, "%s", reply) log.Println("200 ", reply, err) } func queryGlvn(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { glvn := string(ps.ByName("glvn")) log.Println("QUERY glvn:" + glvn) w.Header().Set("Content-Type", "application/json") reply, err := workers.doWork("queryGlvn", glvn) if err != nil { tmpID, tmpDescr := rjerr.ErrorTypeAndMessage(err) log.Println(tmpID, tmpDescr) http.Error(w, tmpDescr, tmpID) return } fmt.Fprintf(w, "%s", reply) log.Println("200 ", reply, err) } func dataGlvn(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { glvn := string(ps.ByName("glvn")) log.Println("DATA glvn:" + glvn) w.Header().Set("Content-Type", "application/json") reply, err := workers.doWork("dataGlvn", glvn) if err != nil { tmpID, tmpDescr := rjerr.ErrorTypeAndMessage(err) log.Println(tmpID, tmpDescr) http.Error(w, tmpDescr, tmpID) return } fmt.Fprintf(w, "%s", reply) log.Println("200 ", reply, err) }
// Based on Gophercises tutorial // https://gophercises.com/exercises/image package graphgenerator import ( "math/rand" "os" svg "github.com/ajstarks/svgo" ) func rn(n int) int { return rand.Intn(n) } // main - generates an SVG graph based on inputs given. func main() { data := []struct { Label string Value int }{ {"Train", 25000}, {"Test", 25000}, {"Unlabeled", 50000}, } // []int{25, 25, 100} canvas := svg.New(os.Stdout) width := len(data)*80 + 10 max := 0 height := 300 for _, item := range data { if item.Value > max { max = item.Value } } canvas.Start(width, height) canvas.Rect(0, 0, width, height, "fill: white") for i, val := range data { percent := val.Value * (height - 50) / max canvas.Rect(i*60+10, (height-50)-percent, 50, percent, "fill:rgb(77,200,232)") canvas.Text(i*60+35, height-20, val.Label, "font-size: 14pt; fill: black; text-anchor: middle") canvas.Text(i*60+35, height+1, string(val.Value), "font-size: 14pt; fill: black; text-anchor: middle") } canvas.End() } // Note: Commented out for SVGo version // w, h := len(data)*60+10, 100 // r := image.Rect(0, 0, w, h) // img := image.NewRGBA(r) // bg := image.NewUniform(color.RGBA{240, 240, 240, 255}) // // blue := image.NewUniform(color.RGBA{250, 180, 180, 255}) // draw.Draw(img, r, bg, image.Point{0, 0}, draw.Src) // // for y := 0; y < h; y++ { // // for x := 0; x < w; x++ { // // img.Set(x, y, color.RGBA{255, 255, 255, 255}) // // } // // } // mask := image.NewRGBA(image.Rect(0, 0, w, h)) // for y := 0; y < h; y++ { // for x := 0; x < w; x++ { // alpha := uint8(0) // switch { // case y < 30: // alpha = 255 // case y < 50: // alpha = 100 // } // mask.Set(x, y, color.RGBA{ // R: uint8((x + y) & 255), // G: uint8((x + y) << 1 & 255), // B: uint8((x + y) << 2 & 255), // A: alpha, // }) // } // } // for i, dp := range data { // x0, y0 := (i*60 + 10), 100-dp // x1, y1 := (i+1)*60-1, 100 // bar := image.Rect(x0, y0, x1, y1) // grey := image.NewUniform(color.RGBA{180, 180, 180, 255}) // draw.Draw(img, bar, grey, image.Point{0, 0}, draw.Src) // red := image.NewUniform(color.RGBA{255, 0, 0, 255}) // draw.DrawMask(img, bar, red, image.Point{0, 0}, mask, image.Point{x0, y0}, draw.Over) // // for x := i*60 + 10; x < (i+1)*60; x++ { // // for y := 100; y >= (100 - dp); y-- { // // img.Set(x, y, color.RGBA{180, 180, 250, 255}) // // } // // } // } // f, err := os.Create("image.png") // if err != nil { // panic(err) // } // defer f.Close() // err = png.Encode(f, img) // if err != nil { // panic(err) // } // }
package main import ( "strings" "fmt" ) /* Given a non-empty string check if it can be constructed by taking a substring of it and appending multiple copies of the substring together. You may assume the given string consists of lowercase English letters only and its length will not exceed 10000. Example 1: Input: "abab" Output: True Explanation: It's the substring "ab" twice. Example 2: Input: "aba" Output: False Example 3: Input: "abcabcabcabc" Output: True Explanation: It's the substring "abc" four times. (And the substring "abcabc" twice.) */ func repeatedSubstringPattern(s string) bool { slen := len(s) for i:=1;i<=slen/2;i++ { if slen % i != 0 { continue } sub := s[:i] asub := strings.Repeat(sub,slen/i) if asub == s { return true } } return false } func main() { fmt.Println(repeatedSubstringPattern("bb")) }
/* Copyright 2022-2023 ICS-FORTH. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package tests import ( "strings" "github.com/carv-ics-forth/frisbee/cmd/kubectl-frisbee/commands/common" "github.com/kubeshop/testkube/pkg/ui" "github.com/spf13/cobra" ) func DeleteTestCmdCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return common.CompleteScenarios(cmd, args, toComplete) } type DeleteTestCmdOptions struct { DeleteAll, Force bool Selectors []string } func DeleteTestCmdFlags(cmd *cobra.Command, options *DeleteTestCmdOptions) { cmd.Flags().BoolVar(&options.DeleteAll, "all", false, "Delete all tests") cmd.Flags().StringSliceVarP(&options.Selectors, "label", "l", nil, "label key value pair: --label key1=value1") cmd.Flags().BoolVar(&options.Force, "force", false, "Force delete a stalled test") } func NewDeleteTestsCmd() *cobra.Command { var options DeleteTestCmdOptions cmd := &cobra.Command{ Use: "test <testName>", Aliases: []string{"tests", "t"}, Short: "Delete Test", ValidArgsFunction: DeleteTestCmdCompletion, Args: func(cmd *cobra.Command, args []string) error { if len(args) == 0 && !options.DeleteAll { ui.Failf("Pass Test name, --all flag to delete all or labels to delete by labels.") } if options.DeleteAll && options.Force { ui.Failf("Choose only one of --all or --force.") } if len(args) > 1 && options.Force { ui.Failf("To prevent intended deletions, --force is applicable at one test at a time.") } return nil }, Run: func(cmd *cobra.Command, args []string) { switch { case options.Force: testName := args[0] ui.Info("Deleting test: ", testName) err := common.ForceDelete(testName) ui.ExitOnError("Force Delete "+testName, err) case options.DeleteAll: ui.Info("Deleting all tests with label: ", common.ManagedNamespace) err := common.DeleteNamespaces(common.ManagedNamespace) ui.ExitOnError("Delete all tests", err) case len(args) > 0: ui.Info("Deleting tests: ", args...) err := common.DeleteNamespaces("", args...) ui.ExitOnError("Delete tests", err) case len(options.Selectors) != 0: options.Selectors = append(options.Selectors, common.ManagedNamespace) selector := strings.Join(options.Selectors, ",") ui.Info("Deleting all tests with labels: ", common.ManagedNamespace) err := common.DeleteNamespaces(selector) ui.ExitOnError("Deleting tests by labels: "+selector, err) default: cmd.Help() } }, } DeleteTestCmdFlags(cmd, &options) return cmd }
package wire import ( "io" "github.com/multivactech/MultiVAC/base/rlp" ) // Timestamp defines the type of time. type Timestamp int64 // HeartBeatMsg is a type of message heartbeat. type HeartBeatMsg struct { Pk []byte // the miner's public key TimeStamp Timestamp Proof []byte // the proof of a mine Signature []byte // the signature of TimeStamp } // BtcDecode decode the message. // TODO(jylu) func (msg *HeartBeatMsg) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error { return rlp.Decode(r, msg) } // Deserialize deserialize the data. // TODO(jylu) func (msg *HeartBeatMsg) Deserialize(r io.Reader) error { return msg.BtcDecode(r, 0, BaseEncoding) } // BtcEncode encodes the receiver to w using the bitcoin protocol encoding. // TODO(jylu) func (msg *HeartBeatMsg) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error { return rlp.Encode(w, msg) } // Serialize will serialize the message. // TODO(jylu) func (msg *HeartBeatMsg) Serialize(w io.Writer) error { return msg.BtcEncode(w, 0, BaseEncoding) } // Command returns the protocol command string for the message. This is part // of the Message interface implementation. func (msg *HeartBeatMsg) Command() string { //TODO(jylu) return CmdHeartBeat } // MaxPayloadLength returns the maximum length the payload can be for the // receiver. This is part of the Message interface implementation. func (msg *HeartBeatMsg) MaxPayloadLength(pver uint32) uint32 { //TODO(jylu) return MaxBlockPayload }
package main // service 层不处理error func GetValue(id int) (string, error){ return DaoGet(id) }
package poller import ( "fmt" "time" "github.com/gbolo/vsummary/db" ) // BuiltInCollector is a package-wide instance of InternalCollector var BuiltInCollector InternalCollector // internal poller that contains a list of pollers as well as a backend db type InternalCollector struct { ActivePollers []*InternalPoller backend db.Backend } // NewEmptyInternalCollector returns an empty InternalCollector func NewEmptyInternalCollector() *InternalCollector { return &InternalCollector{} } // SetBackend allows InternalCollector to connect to backend database func (i *InternalCollector) SetBackend(backend db.Backend) { i.backend = backend } // addIfUnique will spawn a new poller thread for a given poller, if one doe not already exist // it will also stop a running poller if it notices that poller should be disabled func (i *InternalCollector) addIfUnique(p InternalPoller) { spawnPoller := true uniquePoller := true for k, a := range i.ActivePollers { // TODO: instead of host, we should use vcenter UUID to determine if it's truly unique if a.Config.VcenterURL == p.Config.VcenterURL { uniquePoller = false spawnPoller = false // stop the poller if it marked as disabled if p.Enabled == false && a.Poller.Enabled { log.Infof("poller state has changed to disabled for %s", a.Config.VcenterURL) i.ActivePollers[k].Enabled = false i.ActivePollers[k].StopPolling() } // spawn a new poller since it was disabled if p.Enabled && a.Enabled == false { log.Infof("poller state has changed to enabled for %s", a.Config.VcenterURL) i.ActivePollers[k].Enabled = true spawnPoller = true } continue } } if spawnPoller { if uniquePoller { log.Infof("spawning new poller for %s", p.Config.VcenterURL) } else { log.Infof("respawning poller for %s", p.Config.VcenterURL) } i.ActivePollers = append(i.ActivePollers, &p) // spawn a go routine for this poller go p.Daemonize() } } // RefreshPollers gets a list of pollers from backend database // then populates internalPoller list of ActivePollers with it. func (i *InternalCollector) RefreshPollers() { pollers, err := i.backend.GetPollers() if err != nil { log.Errorf("error getting pollers: %v", err) return } // add unique new internal pollers var backendPollerURLs []string for _, p := range pollers { if p.Internal { internalPoller := NewInternalPoller(p) internalPoller.SetBackend(i.backend) i.addIfUnique(*internalPoller) backendPollerURLs = append(backendPollerURLs, fmt.Sprintf("https://%s/sdk", p.VcenterHost)) } } // remove pollers that are no longer present or disabled i.StopPollersByURL(difference(i.GetActivePollerURLs(), backendPollerURLs)) } // GetActivePollerURLs returns a list of active pollers by URL func (i *InternalCollector) GetActivePollerURLs() (urls []string) { for _, p := range i.ActivePollers { urls = append(urls, p.Config.VcenterURL) } return } // StopPollersByURL will stop active pollers that match the list of URLs func (i *InternalCollector) StopPollersByURL(urls []string) { for _, url := range urls { for _, p := range i.ActivePollers { if p.Config.VcenterURL == url && p.Enabled { log.Warningf("poller URL is active in memory but no longer listed in backend: %v", url) p.StopPolling() } } } } // difference returns the elements in a that aren't in b func difference(a, b []string) []string { mb := map[string]bool{} for _, x := range b { mb[x] = true } ab := []string{} for _, x := range a { if _, ok := mb[x]; !ok { ab = append(ab, x) } } return ab } // Run is a blocking loop. This should only be executed once. // refreshing of the pollers is also handled in this function. func (i *InternalCollector) Run() { tick := time.Tick(defaultRefreshInterval) i.RefreshPollers() // refresh pollers forever for { select { case <-tick: i.RefreshPollers() } } } // PollPollerById executes a poll to a matching internal poller by id func (i *InternalCollector) PollPollerById(id string) (errs []error) { for _, p := range i.ActivePollers { if p.Config.Id == id { errs = p.PollThenStore() } } return }
package models import "github.com/tahmooress/motor-shop/internal/pkg/customeerror" var ( ErrEmptyEnvironment = customeerror.New("1000", "environment variables must be not empty", "پارامترهای وروری برنامه خالی هستند") ErrEmptyIPANDPORT = customeerror.New("1001", "some fields are empty in cli: IP, Port, routers", "پارامترهای آی پی و پورت وارد نشده است") ErrUnknown = customeerror.New("1002", "Unknown error", "ارور ناشناخته") ErrAuthorization = customeerror.New("1003", "unauthorized user", "خطای دسترسی کاربر") ErrUserNotFound = customeerror.New("1004", "user not found", "کاربری با این مشخصات در سیستم موجود تیست") ErrPasswordIsWrong = customeerror.New("1005", "password is wrong", "رمز عبور اشتباه است") ErrAdminAccessibilityEmpty = customeerror.New("1006", "admin accessibility cant be empty", "دسترسی ادمین نمیتواند خالی باشد") ErrUserIsTaken = customeerror.New("1007", "user name is already taken", "کاربر با این نام قبلا در سیستم ذخیره شده است") ErrParams = customeerror.New("1029", "required parameters for request are empty", "پارامترهای لازم در درخواست ارسال نشده است", ) ErrMotorIsNotExist = customeerror.New("1008", "motor with this id and name is not exist in shop", "موتور با این مشخصات در سیستم موجود نیست") ErrMobile = customeerror.New("1009", "mobile number is not valid", "شماره موبایل صحیح نمیباشد.") ErrShopNotExist = customeerror.New("1010", "shop with this id is not exist", "فروشگاهی با این شماره شناسایی موجود نیست") ErrShopAlreadyExist = customeerror.New("1011", "shop with this name is already exist", "فروشگاهی با این نام و مشخصات قبلا در سیستم ثبت شده است.") ErrEquityID = customeerror.New("1012", "equity with this id is not exits", "حسابی با این شماره شناسیایی در سیستم موجود تیسن") ErrFactorNotExist = customeerror.New("1013", "factor with this number is not exist", "فاکتوری با این شماره در سیستم موجود نیست") ErrTxTypeAndSubject = customeerror.New("1014", "type or subject of transaction is not valid", "فیلد نوع یا عنوان برای تراکنش ورودی معتبر نیست") ErrIDIsNotValid = customeerror.New("1015", "entry id number is not valid", "شماره شناسایی ورودی یک شماره شناسیایی معتبر نمیباشد") )
package main; import ( "time" "github.com/rcrowley/go-metrics" "github.com/vrischmann/go-metrics-influxdb" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/bugsnag/bugsnag-go" "strconv" "log" "os" "github.com/caarlos0/env" ) type ConfigS struct { Bind string `env:"BIND"` ApiPort int `env:"PORT" envDefault:"8080"` LFPort int `env:"LEGACY_PORT" envDefault:"8000"` RedisUrl string `env:"REDIS_URL" envDefault:"redis://localhost:6379/"` RedisCluster bool `env:"REDIS_CLUSTER"` WanAddr string `env:"WAN_ADDR"` BugsnagApiKey string `env:"BUGSNAG_API_KEY"` } var Config = &ConfigS{} func (c *ConfigS) ApiAddr() string { return c.Bind + ":" + strconv.Itoa(c.ApiPort); } func (c *ConfigS) LFAddr() string { return c.Bind + ":" + strconv.Itoa(c.LFPort); } func getWanAddr() { sess, err := session.NewSession() if err != nil { panic(err) } if len(Config.WanAddr) < 1 { if len(Config.Bind) > 0 { Config.WanAddr = Config.ApiAddr(); } else { log.Println("resolving bind address through ec2 api"); e2m := ec2metadata.New(sess) ec2id,err := e2m.GetInstanceIdentityDocument(); if err != nil { log.Println("GetInstanceIdentityDocument failed. Specify BIND= env if not on aws!") log.Panic(err) return } Config.WanAddr = ec2id.PrivateIP + Config.ApiAddr() } } log.Println("registry wan", Config.WanAddr); } func main() { err := env.Parse(Config) if (err != nil) { log.Panic(err); } getWanAddr(); hostname, err := os.Hostname() if err != nil { panic(err) } bugsnag.Configure(bugsnag.Configuration{ APIKey: Config.BugsnagApiKey, }) reg := metrics.NewPrefixedChildRegistry(metrics.DefaultRegistry, "lifeline."); metrics.RegisterDebugGCStats(reg) metrics.RegisterRuntimeMemStats(reg) go metrics.CaptureDebugGCStats(reg, time.Second*5) go metrics.CaptureRuntimeMemStats(reg, time.Second*5) go influxdb.InfluxDBWithTags( metrics.DefaultRegistry, // metrics registry time.Second * 10, // interval "http://influxdb-monitor.superscale.io:8086/", // the InfluxDB url "monitor", // your InfluxDB database "myuser", // your InfluxDB user "mypassword", // your InfluxDB password map[string]string{"host": hostname}, ) lifelines = make(map[string]*Lifeline) initRegistry(); go LifeLineServer() ApiServer() }
package main import ( "flag" "fmt" "github.com/syfaro/haste-client" "io/ioutil" "log" "os" ) var hasteClient *haste.Haste func uploadFile(name string) { data, err := ioutil.ReadFile(name) if err != nil { log.Printf("Unable to read file: %s\n", err.Error()) os.Exit(2) } resp, err := hasteClient.UploadBytes(data) if err != nil { log.Printf("Error uploading: %s\n", err.Error()) os.Exit(3) } fmt.Println(resp.GetLink(hasteClient)) } func fetchFile(key string) { resp, err := hasteClient.Fetch(key) if err != nil { log.Printf("Error fetching: %s\n", err.Error()) os.Exit(3) } fmt.Print(resp) } func main() { action := flag.String("action", "upload", "If should upload or fetch") host := flag.String("host", "http://hastebin.com", "Host to upload paste to") flag.Parse() if os.Getenv("HASTE_SERVER") != "" { hasteClient = haste.NewHaste(os.Getenv("HASTE_SERVER")) } else { hasteClient = haste.NewHaste(*host) } if len(os.Args) == 0 { log.Println("You need to provide a filename to upload or key to fetch!") os.Exit(1) } item := os.Args[len(os.Args)-1] if *action == "upload" { uploadFile(item) } else { fetchFile(item) } }
package main import ( "fmt" "sync" "time" ) func helloWait(wg *sync.WaitGroup, name string, number uint8) { defer wg.Done() for i := uint8(0); i < number; i++ { fmt.Println(i, ": Hello ", name) time.Sleep(500 * time.Millisecond) } } func main() { var wg sync.WaitGroup wg.Add(2) go helloWait(&wg, "Michel", 10) go helloWait(&wg, "ERNI", 10) wg.Wait() }
package gohotdraw type DefaultFigureDecorator struct { *DefaultFigure figure Figure //the decorated figure } func NewDefaultFigureDecorator(figure Figure) *DefaultFigureDecorator { this := new(DefaultFigureDecorator) this.DefaultFigure = newDefaultFigure() this.figure = figure return this } //forward call to contained figure func (this *DefaultFigureDecorator) Contains(point *Point) bool { return this.figure.Contains(point) } //forward call to contained figure func (this *DefaultFigureDecorator) Draw(g Graphics) { this.figure.Draw(g) } //forward call to contained figure func (this *DefaultFigureDecorator) GetDisplayBox() *Rectangle { return this.figure.GetDisplayBox() } //forward call to contained figure func (this *DefaultFigureDecorator) GetHandles() *Set { return this.figure.GetHandles() } //forward call to contained figure func (this *DefaultFigureDecorator) Includes(figure Figure) bool { return this.figure.Includes(figure) } //forward call to contained figure func (this *DefaultFigureDecorator) Release() { this.DefaultFigure.Release(this.figure) } //forward call to contained figure func (this *DefaultFigureDecorator) basicMoveBy(dx, dy int) { this.figure.basicMoveBy(dx,dy) } //forward call to contained figure func (this *DefaultFigureDecorator) setBasicDisplayBox(topLeft *Point, bottomRight *Point) { this.figure.setBasicDisplayBox(topLeft , bottomRight) } type BorderDecorator struct { *DefaultFigureDecorator } func NewBorderDecorator(figure Figure) *BorderDecorator { this := new(BorderDecorator) this.DefaultFigureDecorator = NewDefaultFigureDecorator(figure) return this } func (this *BorderDecorator) Draw(g Graphics) { this.DefaultFigureDecorator.Draw(g) g.SetFGColor(Black) g.DrawBorderFromRect(this.GetDisplayBox()) } func (this *BorderDecorator) Clone() Figure { return NewBorderDecorator(this.figure) }
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. // See LICENSE.txt for license information. // package k8s import ( "testing" "github.com/stretchr/testify/require" rbacv1 "k8s.io/api/rbac/v1" rbacbetav1 "k8s.io/api/rbac/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestClusterRolesV1(t *testing.T) { testClient := newTestKubeClient() clusterRole := &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "test-deployment"}, } t.Run("create cluster role", func(t *testing.T) { result, err := testClient.createOrUpdateClusterRoleV1(clusterRole) require.NoError(t, err) require.Equal(t, clusterRole.GetName(), result.GetName()) }) t.Run("create duplicate cluster role", func(t *testing.T) { result, err := testClient.createOrUpdateClusterRoleV1(clusterRole) require.NoError(t, err) require.Equal(t, clusterRole.GetName(), result.GetName()) }) } func TestClusterRolesBetaV1(t *testing.T) { testClient := newTestKubeClient() clusterRole := &rbacbetav1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "test-deployment"}, } t.Run("create cluster role", func(t *testing.T) { result, err := testClient.createOrUpdateClusterRoleBetaV1(clusterRole) require.NoError(t, err) require.Equal(t, clusterRole.GetName(), result.GetName()) }) t.Run("create duplicate cluster role", func(t *testing.T) { result, err := testClient.createOrUpdateClusterRoleBetaV1(clusterRole) require.NoError(t, err) require.Equal(t, clusterRole.GetName(), result.GetName()) }) } func TestRolesV1(t *testing.T) { testClient := newTestKubeClient() role := &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: "test-role", Namespace: "test-role-ns", }, } t.Run("create role", func(t *testing.T) { result, err := testClient.createOrUpdateRoleV1(role) require.NoError(t, err) require.Equal(t, role.GetName(), result.GetName()) }) t.Run("create cluster role", func(t *testing.T) { result, err := testClient.createOrUpdateRoleV1(role) require.NoError(t, err) require.Equal(t, role.GetName(), result.GetName()) }) } func TestRolesBetaV1(t *testing.T) { testClient := newTestKubeClient() role := &rbacbetav1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: "test-role", Namespace: "test-role-ns", }, } t.Run("create role", func(t *testing.T) { result, err := testClient.createOrUpdateRoleBetaV1(role) require.NoError(t, err) require.Equal(t, role.GetName(), result.GetName()) }) t.Run("create duplicate role", func(t *testing.T) { result, err := testClient.createOrUpdateRoleBetaV1(role) require.NoError(t, err) require.Equal(t, role.GetName(), result.GetName()) }) }
// Package bufpool provides a sync.Pool of bytes.Buffer. package bufpool import ( "bytes" "sync" ) const maxSize = 1 << 16 // 64 KiB. See https://github.com/golang/go/issues/23199 var p = &sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, } // Get gets a buffer from the pool, resets it and returns it. func Get() *bytes.Buffer { buf := p.Get().(*bytes.Buffer) buf.Reset() return buf } // Put puts the buffer to the pool. // WARNING: the call MUST NOT reuse the buffer's content after this call. func Put(buf *bytes.Buffer) { if buf.Cap() <= maxSize { p.Put(buf) } }
package main func main() { // Simple iface var a Iface1 = &Iface1Struct{"foo"} println("iface 1", a.Foo("bar")) a.Foo2("baz") println("iface 2", a.Foo("test"), a.(*Iface1Struct).foo) b := Iface1Struct{"bar"} c := Iface1(&b) println("iface 3", c.Foo3(a, "a", "b"), b.foo) // // Embeds var d Iface4Num d += 14 var e Iface4 = d println("iface 4", e.Foo(12), e.Bar(), e.Baz(35)) println("iface 5", e.(Iface2).Foo(15)) // TODO // empty // anon // local // out of package pointer methods and non-pointer methods // returns, params, slice subjects, etc } type Iface1 interface { Foo(string) string Foo2(string) Foo3(i Iface1, strs ...string) string } type Iface1Struct struct { foo string } func (s Iface1Struct) Foo(str string) string { return s.foo + "-foo-" + str } func (s *Iface1Struct) Foo2(str string) { *s = Iface1Struct{s.foo + "-foo2-" + str} } func (s Iface1Struct) Foo3(i Iface1, strs ...string) string { for _, v := range strs { s.foo += i.Foo("-" + v + "-") } return s.foo } type Iface2 interface { Foo(i int) int } type Iface3 interface { Bar() uint16 } type Iface4 interface { Iface2 Iface3 Baz(uint16) Iface4Num } type Iface4Num uint16 func (i Iface4Num) Foo(j int) int { return int(i) * j } func (i Iface4Num) Bar() uint16 { return uint16(i) * 3 } func (i Iface4Num) Baz(j uint16) Iface4Num { return i * Iface4Num(j) }
package main import( "fmt" "net" "code/chatroom/comon/message" "code/chatroom/server/util" "io" "code/chatroom/server/process" ) type Processor struct{ Conn net.Conn } func (this *Processor) process2() (err error) { for{ tf := &util.Transfer { Conn : this.Conn, } mes,err := tf.ReadPkg() if err != nil { if err == io.EOF { fmt.Println("客户端推出服务,服务器也退出..") return err } else { fmt.Println("readPkg err",err) return err } } fmt.Println("msg=",mes) err = this.serverProcessMes(&mes) if err !=nil { return err } } } func (this *Processor) serverProcessMes(mes *message.Message) (err error) { switch mes.Type { case message.LoginMesType: //处理登录函数 up := &process.UserProcess { Conn : this.Conn, } err = up.ServerProcessLogin(mes) if err != nil { return } case message.RegisterMesType: //处理注册函数 up := &process.UserProcess { Conn : this.Conn, } err = up.ServerProcessRegister(mes) if err != nil { return } case message.SmsMesType: smsProcess := &process.SmsProcess{} smsProcess.SendGroupMes(mes) default: fmt.Println("消息类型不存在....") } return }
package conn import ( "fmt" "reflect" "time" json "github.com/intel-go/fastjson" nats "github.com/nats-io/go-nats" . "github.com/sencydai/utils/log" ) //SysMessage 系统消息 type SysMessage struct { MsgID int Data []byte } //ResponseMessage rpc消息 type ResponseMessage struct { Subject string Data interface{} } type emptyResponseData struct { } var emptyRespData = &emptyResponseData{} //MsgHandle 系统消息处理接口 type MsgHandle func(*SysMessage) //ResponseHandle rpc处理接口 // func(in ...interface{}) interface{} | void type ResponseHandle interface{} //NatsServer NatsServer服务器 type NatsServer struct { Name string Conn *nats.EncodedConn MsgHandles map[int]MsgHandle RespHandles map[string]ResponseHandle } //RegMsgHandle 注册系统消息处理函数 func (ns *NatsServer) RegMsgHandle(msgID int, handle MsgHandle) { ns.MsgHandles[msgID] = handle } //RegResponseHandle 注册rpc处理函数 func (ns *NatsServer) RegResponseHandle(subject string, handle ResponseHandle) { t := reflect.TypeOf(handle) // 获得对象类型,从而知道有多少个参数 if t.Kind() != reflect.Func { log.Errorf("RegResponseHandle: subject(%s) reg not func type", subject) return } ns.RespHandles[subject] = handle } func disconnectHandler(conn *nats.Conn) { log.Warnf("conn disconnect %v ...", conn.Servers()) } func reconnectHandler(conn *nats.Conn) { log.Infof("conn reconnect %v %d ...", conn.Servers(), conn.Reconnects) } var ( log = DefaultLogger conns = make(map[string]*nats.EncodedConn) ) func SetLog(l ILogger) { log = l } //RegConnection 新的连接 func RegConnection(typeName, url string) bool { conn, err := nats.Connect(url, nats.Name(typeName), nats.MaxReconnects(-1), nats.ReconnectWait(time.Microsecond*500), nats.DisconnectHandler(disconnectHandler), nats.ReconnectHandler(reconnectHandler)) if err != nil { return false } econn, _ := nats.NewEncodedConn(conn, nats.JSON_ENCODER) conns[typeName] = econn log.Infof("RegConnection %s %s success", typeName, url) return true } //GetConnection 返回连接 func GetConnection(typeName string) (*nats.EncodedConn, bool) { conn, ok := conns[typeName] return conn, ok } //Close 关闭所有连接 func Close() { for _, conn := range conns { conn.FlushTimeout(time.Second * 5) conn.Close() } } //InitNatsServer 初始化NatsServer,绑定Subscribe func InitNatsServer(server *NatsServer, typeName string) bool { if server.Conn != nil { return false } econn, ok := conns[typeName] if !ok { return false } server.Conn = econn //处理事件 msgChan := make(chan *SysMessage, 100) server.Conn.BindRecvChan(fmt.Sprintf("%s.msg", server.Name), msgChan) go func(ch chan *SysMessage) { handle := func(fun MsgHandle, msg *SysMessage) { defer func() { if err := recover(); err != nil { log.Errorf("%s handle msg %d error: %v", server.Name, msg.MsgID, err) } }() fun(msg) } for msg := range ch { if fun, ok := server.MsgHandles[msg.MsgID]; ok { go handle(fun, msg) } } }(msgChan) //处理回调 server.Conn.Subscribe(fmt.Sprintf("%s.response", server.Name), func(_, reply string, msg *ResponseMessage) { go func(msg *ResponseMessage) { defer func() { if err := recover(); err != nil { log.Errorf("%s handle response %s error: %v", server.Name, msg.Subject, err) } }() cb, ok := server.RespHandles[msg.Subject] if !ok { return } cbType := reflect.TypeOf(cb) cbValue := reflect.ValueOf(cb) var oV []reflect.Value if cbType.NumIn() > 0 { data := msg.Data.([]interface{})[0].([]interface{}) oV = make([]reflect.Value, len(data)) var argType reflect.Type for i, v := range data { argType = cbType.In(i) var oPtr reflect.Value if argType.Kind() != reflect.Ptr { oPtr = reflect.New(argType) } else { oPtr = reflect.New(argType.Elem()) } d, _ := json.Marshal(v) json.Unmarshal(d, oPtr.Interface()) if argType.Kind() != reflect.Ptr { oPtr = reflect.Indirect(oPtr) } oV[i] = oPtr } } ret := cbValue.Call(oV) if reply != "" { if len(ret) == 0 { server.Conn.Publish(reply, nil) } else { value := ret[0] server.Conn.Publish(reply, value.Interface()) } } }(msg) }) log.Infof("register nats server %s %s success", typeName, server.Name) return true } //Publish Publish系统消息 func Publish(typeName, serverName string, msgID int, data []byte) { con, ok := GetConnection(typeName) if !ok { log.Warnf("nats %s not connect", typeName) return } msg := &SysMessage{MsgID: msgID, Data: data} con.Publish(fmt.Sprintf("%s.msg", serverName), msg) } //Call 同步调用rpc func Call(typeName, serverName string, subject string, out interface{}, data ...interface{}) bool { con, ok := GetConnection(typeName) if !ok { log.Warnf("nats %s not connect", typeName) return false } if out == nil { out = emptyRespData } err := con.Request(fmt.Sprintf("%s.response", serverName), &ResponseMessage{Subject: subject, Data: data}, out, time.Second*10) if err != nil { log.Errorf("call %s subject error: %s", subject, err.Error()) return false } return true } //Cast 异步调用rpc func Cast(typeName, serverName string, subject string, data ...interface{}) { con, ok := GetConnection(typeName) if !ok { log.Warnf("nats %s not connect", typeName) return } con.Publish(fmt.Sprintf("%s.response", serverName), &ResponseMessage{Subject: subject, Data: data}) }
package crawler import ( "fmt" "log" "encoding/json" "net/http" "github.com/ReToCode/GoStats/config" "github.com/ReToCode/GoStats/store" "time" ) var jobsBody struct { Hits int `json:"total_hits"` } func CrawlStats() { fmt.Println("creating ticker for crawler") ticker := time.NewTicker(time.Hour * 24) go func() { for t := range ticker.C { fmt.Println("started a crawler at", t) getHitsFromWeb() } }() } func getHitsFromWeb() { kws := store.GetKeywords() for _, k := range kws { res, err := http.Get(config.JobsUrl + k) if (err != nil) { log.Fatalf("error during http request with verb: %v", k) } decErr := json.NewDecoder(res.Body).Decode(&jobsBody) if (decErr != nil) { log.Fatalf("error http response is not valid: %v", res.Body) } fmt.Printf("got new hit value for %v. hits: %v\n", k, jobsBody.Hits) store.SaveHitStats(k, jobsBody.Hits) } }
package msgraph import ( "encoding/json" "fmt" "time" ) // CalendarEvent represents a single event within a calendar type CalendarEvent struct { ID string CreatedDateTime time.Time // Creation time of the CalendarEvent, has the correct timezone set from OriginalStartTimeZone (json) LastModifiedDateTime time.Time // Last modified time of the CalendarEvent, has the correct timezone set from OriginalEndTimeZone (json) OriginalStartTimeZone *time.Location // The original start-timezone, is already integrated in the calendartimes. Caution: is UTC on full day events OriginalEndTimeZone *time.Location // The original end-timezone, is already integrated in the calendartimes. Caution: is UTC on full day events ICalUID string Subject string Importance string Sensitivity string IsAllDay bool // true = full day event, otherwise false IsCancelled bool // calendar event has been cancelled but is still in the calendar IsOrganizer bool // true if the calendar owner is the organizer SeriesMasterID string // the ID of the master-entry of this series-event if any ShowAs string Type string ResponseStatus ResponseStatus // how the calendar-owner responded to the event (normally "organizer" because support-calendar is the host) StartTime time.Time // starttime of the Event, correct timezone is set EndTime time.Time // endtime of the event, correct timezone is set Attendees Attendees // represents all attendees to this CalendarEvent OrganizerName string // the name of the organizer from the e-mail, not reliable to identify anyone OrganizerEMail string // the e-mail address of the organizer, use this to identify the user } // GetFirstAttendee returns the first Attendee that is not the organizer of the event from the Attendees array. // If none is found then an Attendee with the Name of "None" will be returned. func (c CalendarEvent) GetFirstAttendee() Attendee { for _, attendee := range c.Attendees { if attendee.Email != c.OrganizerEMail { return attendee } } return Attendee{Name: "None"} } func (c CalendarEvent) String() string { return fmt.Sprintf("CalendarEvent(ID: \"%v\", CreatedDateTime: \"%v\", LastModifiedDateTime: \"%v\", "+ "ICalUId: \"%v\", Subject: \"%v\", "+ "Importance: \"%v\", Sensitivity: \"%v\", IsAllDay: \"%v\", IsCancelled: \"%v\", "+ "IsOrganizer: \"%v\", SeriesMasterId: \"%v\", ShowAs: \"%v\", Type: \"%v\", ResponseStatus: \"%v\", "+ "Attendees: \"%v\", Organizer: \"%v\", Start: \"%v\", End: \"%v\")", c.ID, c.CreatedDateTime, c.LastModifiedDateTime, c.ICalUID, c.Subject, c.Importance, c.Sensitivity, c.IsAllDay, c.IsCancelled, c.IsOrganizer, c.SeriesMasterID, c.ShowAs, c.Type, c.ResponseStatus, c.Attendees, c.OrganizerName+" "+c.OrganizerEMail, c.StartTime, c.EndTime) } // PrettySimpleString returns all Calendar Events in a readable format, mostly used for logging purposes func (c CalendarEvent) PrettySimpleString() string { return fmt.Sprintf("{ %v (%v) [%v - %v] }", c.Subject, c.GetFirstAttendee().Name, c.StartTime, c.EndTime) } // Equal returns wether the CalendarEvent is identical to the given CalendarEvent func (c CalendarEvent) Equal(other CalendarEvent) bool { return c.ID == other.ID && c.CreatedDateTime.Equal(other.CreatedDateTime) && c.LastModifiedDateTime.Equal(other.LastModifiedDateTime) && c.ICalUID == other.ICalUID && c.Subject == other.Subject && c.Importance == other.Importance && c.Sensitivity == other.Sensitivity && c.IsAllDay == other.IsAllDay && c.IsCancelled == other.IsCancelled && c.IsOrganizer == other.IsOrganizer && c.SeriesMasterID == other.SeriesMasterID && c.ShowAs == other.ShowAs && c.Type == other.Type && c.ResponseStatus.Equal(other.ResponseStatus) && c.StartTime.Equal(other.StartTime) && c.EndTime.Equal(other.EndTime) && c.Attendees.Equal(other.Attendees) && c.OrganizerName == other.OrganizerName && c.OrganizerEMail == other.OrganizerEMail } // UnmarshalJSON implements the json unmarshal to be used by the json-library func (c *CalendarEvent) UnmarshalJSON(data []byte) error { tmp := struct { ID string `json:"id"` CreatedDateTime string `json:"createdDateTime"` LastModifiedDateTime string `json:"lastModifiedDateTime"` OriginalStartTimeZone string `json:"originalStartTimeZone"` OriginalEndTimeZone string `json:"originalEndTimeZone"` ICalUID string `json:"iCalUId"` Subject string `json:"subject"` Importance string `json:"importance"` Sensitivity string `json:"sensitivity"` IsAllDay bool `json:"isAllDay"` IsCancelled bool `json:"isCancelled"` IsOrganizer bool `json:"isOrganizer"` SeriesMasterID string `json:"seriesMasterId"` ShowAs string `json:"showAs"` Type string `json:"type"` ResponseStatus ResponseStatus `json:"responseStatus"` Start map[string]string `json:"start"` End map[string]string `json:"end"` Attendees Attendees `json:"attendees"` Organizer struct { EmailAddress struct { Name string `json:"name"` Address string `json:"address"` } `json:"emailAddress"` } `json:"organizer"` }{} var err error // unmarshal to tmp-struct, return if error if err = json.Unmarshal(data, &tmp); err != nil { return fmt.Errorf("error on json.Unmarshal: %v | Data: %v", err, string(data)) } c.ID = tmp.ID c.CreatedDateTime, err = time.Parse(time.RFC3339Nano, tmp.CreatedDateTime) if err != nil { return fmt.Errorf("cannot time.Parse with RFC3339Nano createdDateTime %v: %v", tmp.CreatedDateTime, err) } c.LastModifiedDateTime, err = time.Parse(time.RFC3339Nano, tmp.LastModifiedDateTime) if err != nil { return fmt.Errorf("cannot time.Parse with RFC3339Nano lastModifiedDateTime %v: %v", tmp.LastModifiedDateTime, err) } c.OriginalStartTimeZone, err = mapTimeZoneStrings(tmp.OriginalStartTimeZone) if err != nil { return fmt.Errorf("cannot time.LoadLocation originalStartTimeZone %v: %v", tmp.OriginalStartTimeZone, err) } c.OriginalEndTimeZone, err = mapTimeZoneStrings(tmp.OriginalEndTimeZone) if err != nil { return fmt.Errorf("cannot time.LoadLocation originalEndTimeZone %v: %v", tmp.OriginalEndTimeZone, err) } c.ICalUID = tmp.ICalUID c.Subject = tmp.Subject c.Importance = tmp.Importance c.Sensitivity = tmp.Sensitivity c.IsAllDay = tmp.IsAllDay c.IsCancelled = tmp.IsCancelled c.IsOrganizer = tmp.IsOrganizer c.SeriesMasterID = tmp.SeriesMasterID c.ShowAs = tmp.ShowAs c.Type = tmp.Type c.ResponseStatus = tmp.ResponseStatus c.Attendees = tmp.Attendees c.OrganizerName = tmp.Organizer.EmailAddress.Name c.OrganizerEMail = tmp.Organizer.EmailAddress.Address // Parse event start & endtime with timezone c.StartTime, err = parseTimeAndLocation(tmp.Start["dateTime"], tmp.Start["timeZone"]) // the timeZone is normally ALWAYS UTC, microsoft converts time date & time to that if err != nil { return fmt.Errorf("cannot parse start-dateTime %v AND timeZone %v: %v", tmp.Start["dateTime"], tmp.Start["timeZone"], err) } c.EndTime, err = parseTimeAndLocation(tmp.End["dateTime"], tmp.End["timeZone"]) // the timeZone is normally ALWAYS UTC, microsoft converts time date & time to that if err != nil { return fmt.Errorf("cannot parse end-dateTime %v AND timeZone %v: %v", tmp.End["dateTime"], tmp.End["timeZone"], err) } // Hint: OriginalStartTimeZone & end are UTC (set by microsoft) if it is a full-day event, this will be handled in the next section c.StartTime = c.StartTime.In(c.OriginalStartTimeZone) // move the StartTime to the orignal start-timezone c.EndTime = c.EndTime.In(c.OriginalEndTimeZone) // move the EndTime to the orignal end-timezone // Now check if it's a full-day event, if yes, the event is UTC anyway. We need it to be accurate for the program to work // hence we set it to time.Local. It can later be manipulated by the program to a different timezone but the times also have // to be recalculated. E.g. we set it to UTC+2 hence it will start at 02:00 and end at 02:00, not 00:00 -> manually set to 00:00 if c.IsAllDay && FullDayEventTimeZone != time.UTC { // set to local location c.StartTime = c.StartTime.In(FullDayEventTimeZone) c.EndTime = c.EndTime.In(FullDayEventTimeZone) // get offset in seconds _, startOffSet := c.StartTime.Zone() _, endOffSet := c.EndTime.Zone() // decrease time to 00:00 again c.StartTime = c.StartTime.Add(-1 * time.Second * time.Duration(startOffSet)) c.EndTime = c.EndTime.Add(-1 * time.Second * time.Duration(endOffSet)) } return nil } // parseTimeAndLocation is just a helper method to shorten the code in the Unmarshal json func parseTimeAndLocation(timeToParse, locationToParse string) (time.Time, error) { parsedTime, err := time.Parse("2006-01-02T15:04:05.999999999", timeToParse) if err != nil { return time.Time{}, err } parsedTimeZone, err := time.LoadLocation(locationToParse) if err != nil { return time.Time{}, err } return parsedTime.In(parsedTimeZone), nil } // mapTimeZoneStrings maps various Timezones used by Microsoft to go-understandable timezones or returns the source-zone if no mapping is found func mapTimeZoneStrings(timeZone string) (*time.Location, error) { if timeZone == "tzone://Microsoft/Custom" { return FullDayEventTimeZone, nil } return globalSupportedTimeZones.GetTimeZoneByAlias(timeZone) }
package utils import "reflect" func GetTypeName(instance interface{}) string { t := reflect.TypeOf(instance) if t.Kind() == reflect.Ptr { return "*" + t.Elem().Name() } return t.Name() }
package types import "time" type Users struct { ID int `json:"id"` Created_at time.Time `json:"created_at"` Updated_at time.Time `json:"updated_at"` Deleted_at time.Time `json:"deleted_at"` Name string `json:"name"` Email string `json:"email"` Phone string `json:"phone"` Password string `json:"password"` Slug string `json:"slug"` } type Relationships struct { FollowerId int `json:"follower_id"` FollowedId int `json:"followed_id"` } type Tweets struct { ID int `json:"id"` Created_at time.Time `json:"created_at"` Deleted_at time.Time `json:"deleted_at"` User_id int `json:"user_id"` Tweet string `json:"tweet"` Liked_user_id []int `json:"liked_user_id"` LikeCount int `json:"like_count"` RedLikeButton uint8 `json:"red_like_button"` }
package taillog import ( "fmt" "logagent_study/etcd" "time" ) var ( taskMgr *tailLogMgr ) type tailLogMgr struct { logEntryList []*etcd.LogEntry taskMap map[string]*TailTask newConfChan chan []*etcd.LogEntry } func Init(logEntryConf []*etcd.LogEntry) { taskMgr = &tailLogMgr{ logEntryList: logEntryConf, taskMap: make(map[string]*TailTask, 16), newConfChan: make(chan []*etcd.LogEntry), } // 初始化时,etcd有多少就起多少个 for _, logEntry := range logEntryConf { tailObj := NewTailTask(logEntry.Path, logEntry.Topic) k := fmt.Sprintf("%s_%s", logEntry.Path, logEntry.Topic) taskMgr.taskMap[k] = tailObj } go taskMgr.run() } func (t *tailLogMgr) run() { for { select { case newConf := <-t.newConfChan: // 这里是不是只会接受一条? 写了已有的两个配置却没有,删除两个 fmt.Printf("tailLogMgr 新配置: %v\n", newConf) for _, conf := range newConf { k := fmt.Sprintf("%s_%s", conf.Path, conf.Topic) _, ok := taskMgr.taskMap[k] // 原先mgr中没有的要新增 if ok { continue } else { fmt.Printf("tailLogMgr 添加了一个配置监听\n") tailObj := NewTailTask(conf.Path, conf.Topic) t.logEntryList = append(t.logEntryList, conf) taskMgr.taskMap[k] = tailObj } } // 热加载:文件更新要全部重新读取的 // 所以, 原先有的要先删除 for _, c1 := range t.logEntryList { isDelete := true for _, c2 := range newConf { if c2.Path == c1.Path && c2.Topic == c1.Topic { isDelete = false break //continue } } if isDelete { k := fmt.Sprintf("%s_%s", c1.Path, c1.Topic) fmt.Printf("配置:%s被删除\n", k) t.taskMap[k].cancelFunc() } } default: time.Sleep(time.Second) } } } func GetNewConf() chan<- []*etcd.LogEntry { return taskMgr.newConfChan }
package main import ( "fmt" "bufio" "os" "strconv" ) func main() { scanner := bufio.NewScanner(os.Stdin) fmt.Printf("Type something: ") scanner.Scan() input := scanner.Text() fmt.Printf("You typed %q \n", input) }
// Copyright 2019 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package session import ( "context" "github.com/golang/protobuf/proto" "chromiumos/policy/chromium/policy/enterprise_management_proto" "chromiumos/tast/local/cryptohome" "chromiumos/tast/local/session" "chromiumos/tast/local/session/ownership" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: MultiUserPolicy, Desc: "Verifies that storing and retrieving user policy works with multiple profiles signed-in", Contacts: []string{ "hidehiko@chromium.org", }, Data: []string{"testcert.p12"}, Attr: []string{"group:mainline"}, }) } func MultiUserPolicy(ctx context.Context, s *testing.State) { const ( user1 = "user1@somewhere.com" user2 = "user2@somewhere.com" ) desc1 := ownership.UserPolicyDescriptor(user1) desc2 := ownership.UserPolicyDescriptor(user2) privKey, err := session.ExtractPrivKey(s.DataPath("testcert.p12")) if err != nil { s.Fatal("Failed to parse PKCS #12 file: ", err) } var settings enterprise_management_proto.ChromeDeviceSettingsProto policy, err := session.BuildPolicy("", privKey, nil, &settings) if err != nil { s.Fatal("Failed to build test policy data: ", err) } empty := &enterprise_management_proto.PolicyFetchResponse{} if err := session.SetUpDevice(ctx); err != nil { s.Fatal("Failed to reset device ownership: ", err) } // Clear the users' vault to make sure the test starts without any // policy or key lingering around. At this stage, the session isn't // started and there's no user signed in. if err := cryptohome.RemoveVault(ctx, user1); err != nil { s.Fatalf("Failed to remove vault for %s: %v", user1, err) } if err := cryptohome.CreateVault(ctx, user1, ""); err != nil { s.Fatalf("Failed to create vault for %s: %v", user1, err) } if err := cryptohome.RemoveVault(ctx, user2); err != nil { s.Fatalf("Failed to remove vault for %s: %v", user2, err) } if err := cryptohome.CreateVault(ctx, user2, ""); err != nil { s.Fatalf("Failed to create vault for %s: %v", user2, err) } sm, err := session.NewSessionManager(ctx) if err != nil { s.Fatal("Failed to create session_manager binding: ", err) } if err := session.PrepareChromeForPolicyTesting(ctx, sm); err != nil { s.Fatal("Failed to prepare Chrome for testing: ", err) } // Start a session for the first user, and verify that no policy // exists for that user yet. if err := sm.StartSession(ctx, user1, ""); err != nil { s.Fatalf("Failed to start session for %s: %v", user1, err) } if ret, err := sm.RetrievePolicyEx(ctx, desc1); err != nil { s.Fatalf("Failed to retrieve policy for %s: %v", user1, err) } else if !proto.Equal(ret, empty) { s.Fatal("Unexpected policy is fetched for ", user1) } // Then, store the policy. if err := sm.StorePolicyEx(ctx, desc1, policy); err != nil { s.Fatalf("Failed to store policy for %s: %v", user1, err) } // Storing policy for the second user fails before the session starts. if err := sm.StorePolicyEx(ctx, desc2, policy); err == nil { s.Fatalf("Unexpectedly succeeded to store policy for %s: %v", user2, err) } // Starts the second user's session, and verify that it has no // policy stored yet. if err := sm.StartSession(ctx, user2, ""); err != nil { s.Fatalf("Failed to start session for %s: %v", user1, err) } if ret, err := sm.RetrievePolicyEx(ctx, desc2); err != nil { s.Fatalf("Failed to retrieve policy for %s: %v", user2, err) } else if !proto.Equal(ret, empty) { s.Fatal("Unexpected policy is fetched for ", user2) } // Strong the policy for the second user should work now. if err := sm.StorePolicyEx(ctx, desc2, policy); err != nil { s.Fatalf("Failed to store policy for %s: %v", user2, err) } // Verify that retrieving policy for the second user works, too. if _, err := sm.RetrievePolicyEx(ctx, desc2); err != nil { s.Fatalf("Failed to retrieve policy for %s: %v", user2, err) } }
package imagerepository import ( "alauda.io/diablo/src/backend/resource/common" "log" "alauda.io/devops-apiserver/pkg/apis/devops/v1alpha1" devopsclient "alauda.io/devops-apiserver/pkg/client/clientset/versioned" "alauda.io/diablo/src/backend/api" "alauda.io/diablo/src/backend/errors" "alauda.io/diablo/src/backend/resource/dataselect" ) type ResourceItem struct { Name string `json:"name"` Namespace string `json:"namespace"` Kind string `json:"kind"` } type ResourceList struct { Items []ResourceItem `json:"items"` // List of non-critical errors, that occurred during resource retrieval Errors []error `json:"errors"` } // ImageRepositoryList contains a list of ImageRepository in the cluster. type ImageRepositoryList struct { ListMeta api.ListMeta `json:"listMeta"` // Unordered list of ImageRepository. Items []ImageRepository `json:"imagerepositories"` // List of non-critical errors, that occurred during resource retrieval. Errors []error `json:"errors"` } // ImageRepository is a presentation layer view of Kubernetes namespaces. This means it is namespace plus // additional augmented data we can get from other sources. type ImageRepository struct { ObjectMeta api.ObjectMeta `json:"objectMeta"` TypeMeta api.TypeMeta `json:"typeMeta"` Spec v1alpha1.ImageRepositorySpec `json:"spec"` Status v1alpha1.ImageRepositoryStatus `json:"status"` } // ImageTagList contains a list of ImageTag in the cluster. type ImageTagList struct { ListMeta api.ListMeta `json:"listMeta"` // ordered list of tags Items []v1alpha1.ImageTag `json:"tags"` // List of non-critical errors, that occurred during resource retrieval. Errors []error `json:"errors"` } // GetImageRepositoryList returns a list of imagerepobinding func GetImageRepositoryList(client devopsclient.Interface, namespace *common.NamespaceQuery, dsQuery *dataselect.DataSelectQuery) (*ImageRepositoryList, error) { log.Println("Getting list of repository") irList, err := client.DevopsV1alpha1().ImageRepositories(namespace.ToRequestParam()).List(api.ListEverything) if err != nil { log.Println("Error while listing repositories", err) } nonCriticalErrors, criticalError := errors.HandleError(err) if criticalError != err { return nil, criticalError } return toList(irList.Items, nonCriticalErrors, dsQuery), nil } // GetImageRepositoryList returns a list of coderepobinding func GetImageRepositoryListInBinding(client devopsclient.Interface, namespace, name string, dsQuery *dataselect.DataSelectQuery) (*ImageRepositoryList, error) { log.Println("Getting list of repository from binding ", name) binding, err := client.DevopsV1alpha1().ImageRegistryBindings(namespace).Get(name, api.GetOptionsInCache) if err != nil { return nil, err } irList, err := client.DevopsV1alpha1().ImageRepositories(namespace).List(api.ListEverything) if err != nil { log.Println("error while listing repositories", err) } nonCriticalErrors, criticalError := errors.HandleError(err) if criticalError != nil { return nil, criticalError } var repos []v1alpha1.ImageRepository for _, item := range irList.Items { for _, condition := range binding.Status.Conditions { if item.GetName() == condition.Name { repos = append(repos, item) } } } return toList(repos, nonCriticalErrors, dsQuery), nil } // GetImageTagList returns a list of tags func GetImageTagList(client devopsclient.Interface, namespace, name string) (*ImageTagList, error) { log.Println("Getting tag list of repository ", name) its := &ImageTagList{} result, err := GetImageRepository(client, namespace, name) if err != nil { return nil, err } tags := result.Status.Tags for _, tag := range tags { its.Items = append(its.Items, tag) } return its, nil } func toList(imageRepositories []v1alpha1.ImageRepository, nonCriticalErrors []error, dsQuery *dataselect.DataSelectQuery) *ImageRepositoryList { irList := &ImageRepositoryList{ Items: make([]ImageRepository, 0), ListMeta: api.ListMeta{TotalItems: len(imageRepositories)}, } irCells, filteredTotal := dataselect.GenericDataSelectWithFilter(toCells(imageRepositories), dsQuery) imageRepositories = fromCells(irCells) irList.ListMeta = api.ListMeta{TotalItems: filteredTotal} irList.Errors = nonCriticalErrors for _, repo := range imageRepositories { irList.Items = append(irList.Items, toDetailsInList(repo)) } return irList } func toDetailsInList(imageRepository v1alpha1.ImageRepository) ImageRepository { crs := ImageRepository{ ObjectMeta: api.NewObjectMeta(imageRepository.ObjectMeta), TypeMeta: api.NewTypeMeta(api.ResourceKindImageRepository), Spec: imageRepository.Spec, Status: imageRepository.Status, } return crs }
package dtacode import ( "fmt" "io/ioutil" "github.com/derpl-del/gopro2/envcode/jscode" "github.com/derpl-del/gopro2/envcode/logcode" "github.com/derpl-del/gopro2/envcode/strcode" ) //ListProduct array var ListProduct []strcode.ProductData //ReturnAllProduct for homepage func ReturnAllProduct() strcode.AllProductData { ListProduct = []strcode.ProductData{} fileInfo, err := ioutil.ReadDir("product_list/") if err != nil { fmt.Println(err) logcode.LogE(err) } for i, info := range fileInfo { var article = jscode.GetProductData(info.Name()) article.No = i + 1 ListProduct = append(ListProduct, article) } Articles := strcode.AllProductData{ListProduct: ListProduct} return Articles }
package day1 import ( "bufio" "fmt" "log" "os" "strconv" ) func check(e error) { if e != nil { panic(e) } } func main() { dat, err := os.Open("input.txt") check(err) scanner := bufio.NewScanner(dat) var numbers []int for scanner.Scan() { number, err := strconv.Atoi(scanner.Text()) check(err) numbers = append(numbers, number) } if err := scanner.Err(); err != nil { log.Fatal(err) } for i, number := range numbers { for _, num1 := range numbers[i:] { if number+num1 == 2020 { fmt.Println(num1 * number) break } } } for i, num1 := range numbers { for j, num2 := range numbers[i:] { for _, num3 := range numbers[j:] { if num1+num2+num3 == 2020 { fmt.Println(num1 * num2 * num3) break } } } } }
package floors import ( "fmt" "math/bits" "regexp" "sort" "strings" "github.com/golang/glog" "github.com/prebid/openrtb/v19/openrtb2" "github.com/prebid/prebid-server/currency" "github.com/prebid/prebid-server/openrtb_ext" ) const ( SiteDomain string = "siteDomain" PubDomain string = "pubDomain" Domain string = "domain" Bundle string = "bundle" Channel string = "channel" MediaType string = "mediaType" Size string = "size" GptSlot string = "gptSlot" AdUnitCode string = "adUnitCode" Country string = "country" DeviceType string = "deviceType" Tablet string = "tablet" Desktop string = "desktop" Phone string = "phone" BannerMedia string = "banner" VideoMedia string = "video" VideoOutstreamMedia string = "video-outstream" AudioMedia string = "audio" NativeMedia string = "native" ) // getFloorCurrency returns floors currency provided in floors JSON, // if currency is not provided then defaults to USD func getFloorCurrency(floorExt *openrtb_ext.PriceFloorRules) string { var floorCur string if floorExt != nil && floorExt.Data != nil { if floorExt.Data.Currency != "" { floorCur = floorExt.Data.Currency } if len(floorExt.Data.ModelGroups) > 0 && floorExt.Data.ModelGroups[0].Currency != "" { floorCur = floorExt.Data.ModelGroups[0].Currency } } if len(floorCur) == 0 { floorCur = defaultCurrency } return floorCur } // getMinFloorValue returns floorMin and floorMinCur, // values provided in impression extension are considered over floors JSON. func getMinFloorValue(floorExt *openrtb_ext.PriceFloorRules, imp *openrtb_ext.ImpWrapper, conversions currency.Conversions) (float64, string, error) { var err error var rate float64 var floorCur string floorMin := roundToFourDecimals(floorExt.FloorMin) floorMinCur := floorExt.FloorMinCur impFloorMin, impFloorCur, err := getFloorMinAndCurFromImp(imp) if err == nil { if impFloorMin > 0.0 { floorMin = impFloorMin } if impFloorCur != "" { floorMinCur = impFloorCur } floorCur = getFloorCurrency(floorExt) if floorMin > 0.0 && floorMinCur != "" { if floorExt.FloorMinCur != "" && impFloorCur != "" && floorExt.FloorMinCur != impFloorCur { glog.Warning("FloorMinCur are different in floorExt and ImpExt") } if floorCur != "" && floorMinCur != floorCur { rate, err = conversions.GetRate(floorMinCur, floorCur) floorMin = rate * floorMin } } floorMin = roundToFourDecimals(floorMin) } if err != nil { return floorMin, floorCur, fmt.Errorf("Error in getting FloorMin value : '%v'", err.Error()) } else { return floorMin, floorCur, err } } // getFloorMinAndCurFromImp returns floorMin and floorMinCur from impression extension func getFloorMinAndCurFromImp(imp *openrtb_ext.ImpWrapper) (float64, string, error) { var floorMin float64 var floorMinCur string impExt, err := imp.GetImpExt() if impExt != nil { impExtPrebid := impExt.GetPrebid() if impExtPrebid != nil && impExtPrebid.Floors != nil { if impExtPrebid.Floors.FloorMin > 0.0 { floorMin = impExtPrebid.Floors.FloorMin } if impExtPrebid.Floors.FloorMinCur != "" { floorMinCur = impExtPrebid.Floors.FloorMinCur } } } return floorMin, floorMinCur, err } // updateImpExtWithFloorDetails updates floors related details into imp.ext.prebid.floors func updateImpExtWithFloorDetails(imp *openrtb_ext.ImpWrapper, matchedRule string, floorRuleVal, floorVal float64) error { impExt, err := imp.GetImpExt() if err != nil { return err } extImpPrebid := impExt.GetPrebid() if extImpPrebid == nil { extImpPrebid = &openrtb_ext.ExtImpPrebid{} } extImpPrebid.Floors = &openrtb_ext.ExtImpPrebidFloors{ FloorRule: matchedRule, FloorRuleValue: floorRuleVal, FloorValue: floorVal, } impExt.SetPrebid(extImpPrebid) return err } // selectFloorModelGroup selects one modelgroup based on modelweight out of multiple modelgroups, if provided into floors JSON. func selectFloorModelGroup(modelGroups []openrtb_ext.PriceFloorModelGroup, f func(int) int) []openrtb_ext.PriceFloorModelGroup { totalModelWeight := 0 for i := 0; i < len(modelGroups); i++ { if modelGroups[i].ModelWeight == nil { modelGroups[i].ModelWeight = new(int) *modelGroups[i].ModelWeight = 1 } totalModelWeight += *modelGroups[i].ModelWeight } sort.SliceStable(modelGroups, func(i, j int) bool { if modelGroups[i].ModelWeight != nil && modelGroups[j].ModelWeight != nil { return *modelGroups[i].ModelWeight < *modelGroups[j].ModelWeight } return false }) winWeight := f(totalModelWeight + 1) for i, modelGroup := range modelGroups { winWeight -= *modelGroup.ModelWeight if winWeight <= 0 { modelGroups[0], modelGroups[i] = modelGroups[i], modelGroups[0] return modelGroups[:1] } } return modelGroups[:1] } // shouldSkipFloors returns flag to decide skipping of floors singalling based on skipRate provided func shouldSkipFloors(ModelGroupsSkipRate, DataSkipRate, RootSkipRate int, f func(int) int) bool { skipRate := 0 if ModelGroupsSkipRate > 0 { skipRate = ModelGroupsSkipRate } else if DataSkipRate > 0 { skipRate = DataSkipRate } else { skipRate = RootSkipRate } if skipRate == 0 { return false } return skipRate >= f(skipRateMax+1) } // findRule prepares rule combinations based on schema dimensions provided in floors data, request values associated with these fields and // does matching with rules provided in floors data and returns matched rule func findRule(ruleValues map[string]float64, delimiter string, desiredRuleKey []string) (string, bool) { ruleKeys := prepareRuleCombinations(desiredRuleKey, delimiter) for i := 0; i < len(ruleKeys); i++ { if _, ok := ruleValues[ruleKeys[i]]; ok { return ruleKeys[i], true } } return "", false } // createRuleKey prepares rule keys based on schema dimension and values present in request func createRuleKey(floorSchema openrtb_ext.PriceFloorSchema, request *openrtb_ext.RequestWrapper, imp *openrtb_ext.ImpWrapper) []string { var ruleKeys []string for _, field := range floorSchema.Fields { value := catchAll switch field { case MediaType: value = getMediaType(imp.Imp) case Size: value = getSizeValue(imp.Imp) case Domain: value = getDomain(request) case SiteDomain: value = getSiteDomain(request) case Bundle: value = getBundle(request) case PubDomain: value = getPublisherDomain(request) case Country: value = getDeviceCountry(request) case DeviceType: value = getDeviceType(request) case Channel: value = getChannelName(request) case GptSlot: value = getGptSlot(imp) case AdUnitCode: value = getAdUnitCode(imp) } ruleKeys = append(ruleKeys, value) } return ruleKeys } // getDeviceType returns device type provided into request func getDeviceType(request *openrtb_ext.RequestWrapper) string { value := catchAll if request.Device == nil || len(request.Device.UA) == 0 { return value } if isMobileDevice(request.Device.UA) { value = Phone } else if isTabletDevice(request.Device.UA) { value = Tablet } else { value = Desktop } return value } // getDeviceCountry returns device country provided into request func getDeviceCountry(request *openrtb_ext.RequestWrapper) string { value := catchAll if request.Device != nil && request.Device.Geo != nil { value = request.Device.Geo.Country } return value } // getMediaType returns media type for give impression func getMediaType(imp *openrtb2.Imp) string { value := catchAll formatCount := 0 if imp.Banner != nil { formatCount++ value = BannerMedia } if imp.Video != nil && imp.Video.Placement != 1 { formatCount++ value = VideoOutstreamMedia } if imp.Video != nil && imp.Video.Placement == 1 { formatCount++ value = VideoMedia } if imp.Audio != nil { formatCount++ value = AudioMedia } if imp.Native != nil { formatCount++ value = NativeMedia } if formatCount > 1 { return catchAll } return value } // getSizeValue returns size for given media type in WxH format func getSizeValue(imp *openrtb2.Imp) string { size := catchAll width := int64(0) height := int64(0) if imp.Banner != nil { width, height = getBannerSize(imp) } else if imp.Video != nil { width = imp.Video.W height = imp.Video.H } if width != 0 && height != 0 { size = fmt.Sprintf("%dx%d", width, height) } return size } // getBannerSize returns width and height for given banner impression func getBannerSize(imp *openrtb2.Imp) (int64, int64) { width := int64(0) height := int64(0) if len(imp.Banner.Format) == 1 { return imp.Banner.Format[0].W, imp.Banner.Format[0].H } else if len(imp.Banner.Format) > 1 { return width, height } else if imp.Banner.W != nil && imp.Banner.H != nil { width = *imp.Banner.W height = *imp.Banner.H } return width, height } // getDomain returns domain provided into site or app object func getDomain(request *openrtb_ext.RequestWrapper) string { value := catchAll if request.Site != nil { if len(request.Site.Domain) > 0 { value = request.Site.Domain } else if request.Site.Publisher != nil && len(request.Site.Publisher.Domain) > 0 { value = request.Site.Publisher.Domain } } else if request.App != nil { if len(request.App.Domain) > 0 { value = request.App.Domain } else if request.App.Publisher != nil && len(request.App.Publisher.Domain) > 0 { value = request.App.Publisher.Domain } } return value } // getSiteDomain returns domain provided into site object func getSiteDomain(request *openrtb_ext.RequestWrapper) string { value := catchAll if request.Site != nil && len(request.Site.Domain) > 0 { value = request.Site.Domain } else if request.App != nil && len(request.App.Domain) > 0 { value = request.App.Domain } return value } // getPublisherDomain returns publisher domain provided into site or app object func getPublisherDomain(request *openrtb_ext.RequestWrapper) string { value := catchAll if request.Site != nil && request.Site.Publisher != nil && len(request.Site.Publisher.Domain) > 0 { value = request.Site.Publisher.Domain } else if request.App != nil && request.App.Publisher != nil && len(request.App.Publisher.Domain) > 0 { value = request.App.Publisher.Domain } return value } // getBundle returns app bundle type func getBundle(request *openrtb_ext.RequestWrapper) string { value := catchAll if request.App != nil && len(request.App.Bundle) > 0 { value = request.App.Bundle } return value } // getGptSlot returns gptSlot func getGptSlot(imp *openrtb_ext.ImpWrapper) string { value := catchAll impExt, err := imp.GetImpExt() if err == nil { extData := impExt.GetData() if extData != nil { if extData.AdServer != nil && extData.AdServer.Name == "gam" { gptSlot := extData.AdServer.AdSlot if gptSlot != "" { value = gptSlot } } else if extData.PbAdslot != "" { value = extData.PbAdslot } } } return value } // getChannelName returns channel name func getChannelName(bidRequest *openrtb_ext.RequestWrapper) string { reqExt, err := bidRequest.GetRequestExt() if err == nil && reqExt != nil { prebidExt := reqExt.GetPrebid() if prebidExt != nil && prebidExt.Channel != nil { return prebidExt.Channel.Name } } return catchAll } // getAdUnitCode returns adUnit code func getAdUnitCode(imp *openrtb_ext.ImpWrapper) string { adUnitCode := catchAll impExt, err := imp.GetImpExt() if err == nil && impExt != nil && impExt.GetGpId() != "" { return impExt.GetGpId() } if imp.TagID != "" { return imp.TagID } if impExt != nil { impExtData := impExt.GetData() if impExtData != nil && impExtData.PbAdslot != "" { return impExtData.PbAdslot } prebidExt := impExt.GetPrebid() if prebidExt != nil && prebidExt.StoredRequest.ID != "" { return prebidExt.StoredRequest.ID } } return adUnitCode } // isMobileDevice returns true if device is mobile func isMobileDevice(userAgent string) bool { isMobile, err := regexp.MatchString("(?i)Phone|iPhone|Android.*Mobile|Mobile.*Android", userAgent) if err != nil { return false } return isMobile } // isTabletDevice returns true if device is tablet func isTabletDevice(userAgent string) bool { isTablet, err := regexp.MatchString("(?i)tablet|iPad|touch.*Windows NT|Windows NT.*touch|Android", userAgent) if err != nil { return false } return isTablet } // prepareRuleCombinations prepares rule combinations based on schema dimensions and request fields func prepareRuleCombinations(keys []string, delimiter string) []string { var schemaFields []string numSchemaFields := len(keys) ruleKey := newFloorRuleKeys(delimiter) for i := 0; i < numSchemaFields; i++ { schemaFields = append(schemaFields, strings.ToLower(keys[i])) } ruleKey.appendRuleKey(schemaFields) for numWildCard := 1; numWildCard <= numSchemaFields; numWildCard++ { newComb := generateCombinations(numSchemaFields, numWildCard) sortCombinations(newComb, numSchemaFields) for i := 0; i < len(newComb); i++ { eachSet := make([]string, numSchemaFields) copy(eachSet, schemaFields) for j := 0; j < len(newComb[i]); j++ { eachSet[newComb[i][j]] = catchAll } ruleKey.appendRuleKey(eachSet) } } return ruleKey.getAllRuleKeys() } // generateCombinations generates every permutation for the given number of fields with the specified number of // wildcards. Permutations are returned as a list of integer lists where each integer list represents a single // permutation with each integer indicating the position of the fields that are wildcards // source: https://docs.prebid.org/dev-docs/modules/floors.html#rule-selection-process func generateCombinations(numSchemaFields int, numWildCard int) (comb [][]int) { for subsetBits := 1; subsetBits < (1 << numSchemaFields); subsetBits++ { if bits.OnesCount(uint(subsetBits)) != numWildCard { continue } var subset []int for object := 0; object < numSchemaFields; object++ { if (subsetBits>>object)&1 == 1 { subset = append(subset, object) } } comb = append(comb, subset) } return comb } // sortCombinations sorts the list of combinations from most specific to least specific. A combination is considered more specific than // another combination if it has more exact values (less wildcards). If two combinations have the same number of wildcards, a combination // is considered more specific than another if its left-most fields are more exact. func sortCombinations(comb [][]int, numSchemaFields int) { totalComb := 1 << numSchemaFields sort.SliceStable(comb, func(i, j int) bool { wt1 := 0 for k := 0; k < len(comb[i]); k++ { wt1 += 1 << (totalComb - comb[i][k]) } wt2 := 0 for k := 0; k < len(comb[j]); k++ { wt2 += 1 << (totalComb - comb[j][k]) } return wt1 < wt2 }) } // ruleKeys defines struct used for maintaining rule combinations generated from schema fields and reqeust values. type ruleKeys struct { keyMap map[string]bool keys []string delimiter string } // newFloorRuleKeys allocates and initialise ruleKeys func newFloorRuleKeys(delimiter string) *ruleKeys { rulekey := new(ruleKeys) rulekey.delimiter = delimiter rulekey.keyMap = map[string]bool{} return rulekey } // appendRuleKey appends unique rules keys into ruleKeys array func (r *ruleKeys) appendRuleKey(rawKey []string) { var key string key = rawKey[0] for j := 1; j < len(rawKey); j++ { key += r.delimiter + rawKey[j] } if _, found := r.keyMap[key]; !found { r.keyMap[key] = true r.keys = append(r.keys, key) } } // getAllRuleKeys returns all the rules prepared func (r *ruleKeys) getAllRuleKeys() []string { return r.keys }
package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SmartLimiter is the Schema for the smartlimiters API // +kubebuilder:subresource:status // +kubebuilder:resource:path=smartlimiters,scope=Namespaced type SmartLimiter struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec SmartLimiterSpec `json:"spec,omitempty"` Status SmartLimiterStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SmartLimiterList contains a list of SmartLimiter type SmartLimiterList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []SmartLimiter `json:"items"` } // GetSpec from a wrapper func (in *SmartLimiter) GetSpec() map[string]interface{} { return nil } // GetObjectMeta from a wrapper func (in *SmartLimiter) GetObjectMeta() metav1.ObjectMeta { return in.ObjectMeta } func init() { SchemeBuilder.Register(&SmartLimiter{}, &SmartLimiterList{}) }
// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package loginapi import ( "context" "time" "chromiumos/tast/common/fixture" "chromiumos/tast/common/policy" "chromiumos/tast/common/policy/fakedms" "chromiumos/tast/errors" "chromiumos/tast/local/chrome" "chromiumos/tast/local/chrome/ash" "chromiumos/tast/local/chrome/uiauto" "chromiumos/tast/local/chrome/uiauto/nodewith" "chromiumos/tast/local/chrome/uiauto/role" "chromiumos/tast/local/input" "chromiumos/tast/local/mgs" "chromiumos/tast/local/session" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: SharedManagedGuestSessionCleanup, LacrosStatus: testing.LacrosVariantNeeded, Desc: "Test chrome.login.endSharedSession Extension API properly performs cleanup", Contacts: []string{ "hendrich@chromium.com", "chromeos-commercial-identity@google.com", }, Attr: []string{"group:mainline", "informational"}, SoftwareDeps: []string{"chrome"}, Fixture: fixture.FakeDMSEnrolled, }) } // SharedManagedGuestSessionCleanup tests that chrome.login.endSession performs // its cleanup operations correctly. The following cleanups are tested: // 1. Browsing data: This is tested by opening a browser page, setting a // cookie, and checking that both the browser history and cookie are cleared // after cleanup. // 2. Open windows: This is tested by opening a browser tab and checking that // the tab is closed. // 3. Extensions: This is tested by checking that the background page // connection is closed after cleanup. This is not a direct check since we // cannot test if an extension has been reinstalled. // The RestrictedManagedGuestSessionExtensionCleanupExemptList policy is also // tested here. // 4. Clipboard: This is tested by setting clipboard data and checking that it // is cleared. // // Printing is not tested due to the set up needed and will be covered in a // browser test in Chrome instead. func SharedManagedGuestSessionCleanup(ctx context.Context, s *testing.State) { fdms := s.FixtValue().(fakedms.HasFakeDMS).FakeDMS() accountID := "foo@managedchrome.com" loginScreenExtensionID := mgs.LoginScreenExtensionID inSessionExtensionID := mgs.InSessionExtensionID // ID for Google Keep extension. Note this extension is arbitrarily chosen // and is used to test the // RestrictedManagedGuestSessionExtensionCleanupExemptList policy. googleKeepExtensionID := "lpcaedmchfhocbbapmcbpinfpgnhiddi" // ID for the Test API extension. testAPIExtensionID := "behllobkkfkfnphdnhnkndlbkcpglgmj" mgs, cr, err := mgs.New( ctx, fdms, mgs.Accounts(accountID), mgs.AddPublicAccountPolicies(accountID, []policy.Policy{ &policy.ExtensionInstallForcelist{ Val: []string{inSessionExtensionID, googleKeepExtensionID}, }, &policy.RestrictedManagedGuestSessionExtensionCleanupExemptList{ Val: []string{inSessionExtensionID, testAPIExtensionID}, }, }), mgs.ExtraPolicies([]policy.Policy{ &policy.DeviceLoginScreenExtensions{ Val: []string{loginScreenExtensionID}, }, &policy.DeviceRestrictedManagedGuestSessionEnabled{ Val: true, }, }), mgs.ExtraChromeOptions(chrome.ExtraArgs("--force-devtools-available")), ) if err != nil { s.Error("Failed to start Chrome on Signin screen with MGS accounts: ", err) } defer mgs.Close(ctx) sm, err := session.NewSessionManager(ctx) if err != nil { s.Fatal("Failed to connect to session manager: ", err) } sw, err := sm.WatchSessionStateChanged(ctx, "started") if err != nil { s.Fatal("Failed to watch for D-Bus signals: ", err) } defer sw.Close(ctx) loginScreenBGURL := chrome.ExtensionBackgroundPageURL(loginScreenExtensionID) conn, err := cr.NewConnForTarget(ctx, chrome.MatchTargetURL(loginScreenBGURL)) if err != nil { s.Fatal("Failed to connect to login screen background page: ", err) } defer conn.Close() // Launch a shared managed guest session. password := "password" if err := conn.Call(ctx, nil, `(password) => new Promise((resolve, reject) => { chrome.login.launchSharedManagedGuestSession(password, () => { if (chrome.runtime.lastError) { reject(new Error(chrome.runtime.lastError.message)); return; } resolve(); }); })`, password); err != nil { s.Fatal("Failed to launch shared MGS: ", err) } select { case <-sw.Signals: // Pass case <-ctx.Done(): s.Fatal("Timeout before getting SessionStateChanged signal: ", err) } inSessionBGURL := chrome.ExtensionBackgroundPageURL(inSessionExtensionID) inSessionConn, err := cr.NewConnForTarget(ctx, chrome.MatchTargetURL(inSessionBGURL)) if err != nil { s.Fatal("Failed to connect to in-session background page: ", err) } defer inSessionConn.Close() swLocked, err := sm.WatchScreenIsLocked(ctx) if err != nil { s.Fatal("Failed watch for screen lock: ", err) } googleKeepBGURL := chrome.ExtensionBackgroundPageURL(googleKeepExtensionID) googleKeepConn, err := cr.NewConnForTarget(ctx, chrome.MatchTargetURL(googleKeepBGURL)) if err != nil { s.Fatal("Failed to connect to Google Keep background page: ", err) } defer googleKeepConn.Close() // Store arbitrary data in localStorage of the Google Keep extension. if err := googleKeepConn.Eval(ctx, `new Promise((resolve, reject) => { chrome.storage.local.set({foo: 1}, () => { if (chrome.runtime.lastError) { reject(new Error(chrome.runtime.lastError.message)); return; } resolve(); }); })`, nil); err != nil { s.Fatal("Failed to set localStorage for Google Keep: ", err) } // Open a non-trivial webpage that takes longer to unload. pageConn, err := cr.NewConn(ctx, "https://www.google.com") if err != nil { s.Fatal("Failed to open www.google.com: ", err) } defer pageConn.Close() // Make sure the webpage has a cookie. if err := pageConn.Eval(ctx, "document.cookie = document.cookie || 'abcdef'", nil); err != nil { s.Fatal("Failed to ensure a cookie: ", err) } tConn, err := cr.TestAPIConn(ctx) if err != nil { s.Fatal("Failed to create Test API connection: ", err) } // Set clipboard data. if err := ash.SetClipboard(ctx, tConn, "clipboard string"); err != nil { s.Fatal("Failed to set clipboard: ", err) } // Call login.endSharedSession() to end the shared session and trigger // cleanup. At the end of the cleanup, the screen will be locked. if err := inSessionConn.Eval(ctx, `new Promise((resolve, reject) => { chrome.login.endSharedSession(() => { if (chrome.runtime.lastError) { reject(new Error(chrome.runtime.lastError.message)); return; } resolve(); }); })`, nil); err != nil { s.Fatal("Failed to end shared session: ", err) } select { case <-swLocked.Signals: // Pass case <-ctx.Done(): s.Fatal("Timeout before getting session locked signal: ", err) } swUnlocked, err := sm.WatchScreenIsUnlocked(ctx) if err != nil { s.Fatal("Failed to watch for D-Bus signals: ", err) } defer swUnlocked.Close(ctx) // Previous conn is closed since it is a login screen extension which // closes when the session starts. conn2, err := cr.NewConnForTarget(ctx, chrome.MatchTargetURL(loginScreenBGURL)) if err != nil { s.Fatal("Failed to connect to login screen background page on lock screen: ", err) } defer conn2.Close() // Enter a new shared session. if err := conn2.Call(ctx, nil, `(password) => new Promise((resolve, reject) => { chrome.login.enterSharedSession(password, () => { if (chrome.runtime.lastError) { reject(new Error(chrome.runtime.lastError.message)); return; } resolve(); }); })`, password); err != nil { s.Fatal("Failed to enter new shared session: ", err) } select { case <-swUnlocked.Signals: // Pass case <-ctx.Done(): s.Fatal("Timeout before getting session unlocked signal: ", err) } // Check the inSessionConn is still alive. This indicates that the // RestrictedManagedGuestSessionExtensionCleanupExemptList was successfully // applied. if err := checkConnIsAlive(ctx, inSessionConn); err != nil { s.Fatal("In-session extension conn closed unexpectedly: ", err) } // Cleanup should have closed the Google Keep extension connection. if err := checkConnIsAlive(ctx, googleKeepConn); err == nil { s.Fatal("Google Keep extension conn was not closed: ", err) } // Create new connection for Google Keep since the old one was closed. googleKeepConn2, err := cr.NewConnForTarget(ctx, chrome.MatchTargetURL(googleKeepBGURL)) if err != nil { s.Fatal("Failed to connect to Google Keep background page: ", err) } defer googleKeepConn2.Close() // Check that localStorage is cleared. if err := googleKeepConn2.Eval(ctx, `new Promise((resolve, reject) => { chrome.storage.local.get((data) => { if (chrome.runtime.lastError) { reject(new Error(chrome.runtime.lastError.message)); return; } if (typeof data !== 'object' || Object.keys(data).length !== 0) { reject(new Error("Expected {}, got: " + JSON.stringify(data))); return; } resolve(); }); })`, nil); err != nil { s.Fatal("Local storage for Google Keep was not cleared: ", err) } // Cleanup should have closed all open browser windows. if err := pageConn.Eval(ctx, "undefined", nil); err == nil { s.Fatal("Page conn was not closed: ", err) } // Try to restore browser tabs from previous session. keyboard, err := input.Keyboard(ctx) if err != nil { s.Fatal("Failed to get keyboard: ", err) } defer keyboard.Close() if err := keyboard.Accel(ctx, "Ctrl+Shift+t"); err != nil { s.Fatal("Failed to run keyboard command for restoring tabs: ", err) } // Check that no browser tabs from previous session got restored. pages, err := cr.FindTargets(ctx, chrome.MatchAllPages()) if err != nil { s.Fatal("Failed to collect info about Chrome webpages: ", err) } if len(pages) > 0 { s.Fatal("Expected no restored tabs but found ", len(pages)) } // Open the browsing history page. historyConn, err := cr.NewConn(ctx, "chrome://history") if err != nil { s.Fatal("Failed to open chrome://history: ", err) } defer historyConn.Close() ui := uiauto.New(tConn) // Check that there are no history entries. EnsureGoneFor is needed as the // UI tree is not immediately populated so the node will not be present // initially. if err := ui.EnsureGoneFor(nodewith.HasClass("website-link").Role(role.Link), 5*time.Second)(ctx); err != nil { s.Fatal("Browser history was not cleared: ", err) } clipboardText, err := ash.ClipboardTextData(ctx, tConn) if err != nil { s.Fatal("Failed to get clipboard text: ", err) } if clipboardText != "" { s.Fatal("Clipboard was not cleared: ", clipboardText) } } func checkConnIsAlive(ctx context.Context, conn *chrome.Conn) error { result := false if err := conn.Eval(ctx, "true", &result); err != nil { return err } if !result { return errors.New("eval 'true' returned false") } return nil }
package main import ( "log" "net/rpc/jsonrpc" ) type Agrs struct { A, B int } type Quotient struct { Quo, Rem int } // http rpc 方式 func main() { //if len(os.Args) != 2 { // log.Println("Usage: ", os.Args[0], "server") // os.Exit(1) //} // //serverAddress := os.Args[1] //client, err := rpc.DialHTTP("tcp",serverAddress+ ":1234") // http rpc 方式 //client, err := rpc.DialHTTP("tcp", ":1234") //tcp rpc 方式 //client,err:=rpc.Dial("tcp",":1234") //json rcp 方式 client,err:=jsonrpc.Dial("tcp",":1234") if err != nil { log.Fatal("rpc dial fail") } args := Agrs{12, 5} var reply int err = client.Call("Math.Multiply", args, &reply) if err != nil { log.Fatal("Math.Mutiply call fail") } log.Printf("Math: %d*%d=%d\n", args.A, args.B, reply) var quotient Quotient err = client.Call("Math.Divide", args, &quotient) if err != nil { log.Fatal("Math.Divide call fail") } log.Printf("Math: %d/%d=%d remainder %d\n", args.A, args.B, quotient.Quo, quotient.Rem) } // tcp rpc 方式
package server //服务器调度策略 const ( SERVERSCHENIL uint32 = iota //不接受调度 SERVERSCHEROUND //轮流调度 SERVERSCHELOAD //最小负载调度 ) // Info 服务器信息,用于代理发现服务器 // 服务器启动后发布自己的信息给代理 type Info struct { Addr string `json:"addr"` //服务器地址 Type uint32 `json:"type"` //服务器类型 Sche uint32 `json:"sche"` //服务器调度策略 Load uint32 `json:"load"` //服务器负载 } // Topic 服务器信息主题 // 用于服务器发布和代理服务器订阅 func (si *Info) Topic() string { return "ServerInfo" }
package main import ( "github.com/GoAdminGroup/go-admin/context" "github.com/GoAdminGroup/go-admin/modules/db" "github.com/GoAdminGroup/go-admin/plugins/admin/modules/table" "github.com/GoAdminGroup/go-admin/template/types/form" ) func GetBlogArticleTable(ctx *context.Context) table.Table { blogArticle := table.NewDefaultTable(table.DefaultConfigWithDriver("mysql")) info := blogArticle.GetInfo().HideFilterArea() info.AddField("Id", "id", db.Int). FieldFilterable() info.AddField("Tag_id", "tag_id", db.Int) info.AddField("Title", "title", db.Varchar) info.AddField("Desc", "desc", db.Varchar) info.AddField("Content", "content", db.Text) info.AddField("Cover_image_url", "cover_image_url", db.Varchar) info.AddField("Created_on", "created_on", db.Int) info.AddField("Created_by", "created_by", db.Varchar) info.AddField("Modified_on", "modified_on", db.Int) info.AddField("Modified_by", "modified_by", db.Varchar) info.AddField("Deleted_on", "deleted_on", db.Int) info.AddField("State", "state", db.Tinyint) info.SetTable("blog_article").SetTitle("BlogArticle").SetDescription("BlogArticle") formList := blogArticle.GetForm() formList.AddField("Id", "id", db.Int, form.Default). FieldDisableWhenCreate(). FieldDisableWhenUpdate() formList.AddField("Tag_id", "tag_id", db.Int, form.Number). FieldDisableWhenCreate(). FieldDisableWhenUpdate() formList.AddField("Title", "title", db.Varchar, form.Text). FieldDisableWhenCreate(). FieldDisableWhenUpdate() formList.AddField("Desc", "desc", db.Varchar, form.Text). FieldDisableWhenCreate(). FieldDisableWhenUpdate() formList.AddField("Content", "content", db.Text, form.RichText). FieldDisableWhenCreate(). FieldDisableWhenUpdate() formList.AddField("Cover_image_url", "cover_image_url", db.Varchar, form.Text). FieldDisableWhenCreate(). FieldDisableWhenUpdate() formList.AddField("Created_on", "created_on", db.Int, form.Number). FieldDisableWhenCreate(). FieldDisableWhenUpdate() formList.AddField("Created_by", "created_by", db.Varchar, form.Text). FieldDisableWhenCreate(). FieldDisableWhenUpdate() formList.AddField("Modified_on", "modified_on", db.Int, form.Number). FieldDisableWhenCreate(). FieldDisableWhenUpdate() formList.AddField("Modified_by", "modified_by", db.Varchar, form.Text). FieldDisableWhenCreate(). FieldDisableWhenUpdate() formList.AddField("Deleted_on", "deleted_on", db.Int, form.Number). FieldDisableWhenCreate(). FieldDisableWhenUpdate() formList.AddField("State", "state", db.Tinyint, form.Number). FieldDisableWhenCreate(). FieldDisableWhenUpdate() formList.SetTable("blog_article").SetTitle("BlogArticle").SetDescription("BlogArticle") return blogArticle }
/** * Testing file for linked list **/ package linkedListTesting import ( "testing" // "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" ll "linkedList/main.go/linkedlist" "fmt" ) type removeSuite struct { suite.Suite } var al ll.LinkedList func (s *removeSuite) BeforeTest(suiteName, testName string) { al = ll.LinkedList{} al.Append(2) al.Append(3) al.Append(4) al.Append(10) // fmt.Printf("TestName: %v \n", testName) } func (s *removeSuite) AfterTest(suiteName, testName string) { // fmt.Println("This runs after test") } // Test for FindItem function func (s *removeSuite) TestFindItem() { _, f1 := al.FindItem(2) _, f2 := al.FindItem(3) _, f3 := al.FindItem(4) _, f4 := al.FindItem(10) item5, f5 := al.FindItem(11) // Checks for items s.Equal(f1, true, "Item1 should be 2") s.Equal(f2, true, "Item2 should be 3") s.Equal(f3, true, "Item3 should be 4") s.Equal(f4, true, "Item4 should be 10") // Checks if item was not found s.Equal(f5, false, "f5 should be false") // is item5 nil? means it was not found if s.Nil(item5) { fmt.Println("Item5 is nil because it was not found") } } // Function test when head is removed func (s *removeSuite) TestRemoveHead() { al.RemoveItem(2) prevHead, exists := al.FindItem(2) s.Equal(al.Head().Item, 3, "Head does not equal 3") s.Equal(exists, false, "prev head does not exists , return false") if s.Nil(prevHead) { fmt.Println("previous Head is nil, it does not longer exists") } } // Function test when tail is removed func (s *removeSuite) TestRemoveTail() { removed := al.RemoveItem(10) prevTail, found := al.FindItem(10) s.Equal(removed, true, "Element was not removed") s.Equal(found, false, "Element was not removed") if s.Nil(prevTail) { fmt.Println("previous tail is nil, it does not longer exists") } } // Function test when middle node is removed func (s *removeSuite) TestRemoveMiddle() { prevNode,_ := al.FindItem(3) afterNode,_ := al.FindItem(10) removedNode := al.RemoveItem(4) findPrevHead, found := al.FindItem(4) s.Equal(removedNode, true, "Node should be removed") s.Equal(prevNode.Next, afterNode, "Prev node does not equal after node") s.Equal(found, false, "Prev node does not equal after node") if s.Nil(findPrevHead) { fmt.Println("previous node is nil, it does not longer exists") } } func TestLinkedListSuite(t *testing.T) { st := new(removeSuite) suite.Run(t, st) }
package main import ( "flag" "fmt" _ "github.com/go-sql-driver/mysql" "github.com/sjmudd/mysql_defaults_file" "os" "github.com/takaidohigasi/gtid-errant-fixer/src/replica" ) func exitWithError(err error) { fmt.Print(err) os.Exit(1) } func main() { var conf, monitorUser, monitorPass string var forceOption bool flag.StringVar(&conf, "c", ".my.cnf", "mysql client config(need SUPER priv to operate stop / slave)") flag.StringVar(&monitorUser, "u", "root", "mysql client config ()") flag.StringVar(&monitorPass, "p", "", "mysql client config ()") flag.BoolVar(&forceOption, "f", false, "force execution (skip prompt to confirm Y/N") flag.Parse() if _, err := os.Stat(conf); err != nil { exitWithError(err) } db, err := mysql_defaults_file.OpenUsingDefaultsFile("mysql", conf, "") if err != nil { exitWithError(err) } defer db.Close() mysqlDB, err := replica.NewMySQLDB(db, monitorUser, monitorPass) if err != nil { exitWithError(err) } if err := mysqlDB.FixErrantGTID(forceOption); err != nil { exitWithError(err) } fmt.Println("completed.") }
//go:build !linux // +build !linux package main import "github.com/sirupsen/logrus" func notifyReady(_ *logrus.Logger) { // No init service to notify }
package drivers // Driver are the names of supported driver names type Driver string const ( // MySQL database driver MySQL = "mysql" // Postgres database driver Postgres = "postgres" // SQLServer database driver SQLServer = "sqlserver" // SQLite database driver SQLite = "sqlite" // SQLite3 database driver SQLite3 = "sqlite3" )
package handler import ( "net/http" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "github.com/Lunchr/luncher-api/db" "github.com/Lunchr/luncher-api/db/model" "github.com/Lunchr/luncher-api/router" "github.com/Lunchr/luncher-api/session" "github.com/deiwin/facebook" "golang.org/x/oauth2" ) // RedirectToFBForLogin returns a handler that redirects the user to Facebook to log in func RedirectToFBForLogin(sessionManager session.Manager, auther facebook.Authenticator) router.Handler { return func(w http.ResponseWriter, r *http.Request) *router.HandlerError { session := sessionManager.GetOrInit(w, r) redirectURL := auther.AuthURL(session) http.Redirect(w, r, redirectURL, http.StatusSeeOther) return nil } } // RedirectedFromFBForLogin returns a handler that receives the user and page tokens for the // user who has just logged in through Facebook. Updates the user and page // access tokens in the DB func RedirectedFromFBForLogin(sessionManager session.Manager, fbAuth facebook.Authenticator, users db.Users, restaurants db.Restaurants) router.Handler { return func(w http.ResponseWriter, r *http.Request) *router.HandlerError { session := sessionManager.GetOrInit(w, r) tok, handlerErr := getLongTermToken(session, r, fbAuth) if handlerErr != nil { return handlerErr } fbUserID, err := getUserID(tok, fbAuth) if err != nil { return router.NewHandlerError(err, "Failed to get the user information from Facebook", http.StatusInternalServerError) } user, err := users.GetFbID(fbUserID) if err == mgo.ErrNotFound { return router.NewHandlerError(err, "User not registered", http.StatusForbidden) } else if err != nil { return router.NewHandlerError(err, "Failed to find the user from DB", http.StatusInternalServerError) } if handlerErr = storeAccessTokensInDB(user.ID, fbUserID, tok, session, users); err != nil { return handlerErr } if handlerErr = storeTokensForRestaurantPages(fbUserID, tok, restaurants, users, fbAuth); err != nil { return handlerErr } http.Redirect(w, r, "/#/admin", http.StatusSeeOther) return nil } } func storeTokensForRestaurantPages(fbUserID string, userAccessToken *oauth2.Token, restaurants db.Restaurants, users db.Users, fbAuth facebook.Authenticator) *router.HandlerError { managedRestaurants, handlerErr := getRestaurantsManagedThroughFB(userAccessToken, restaurants, fbAuth) if handlerErr != nil { return handlerErr } pageAccessTokens, handlerErr := getPageAccessTokensForRestaurants(userAccessToken, managedRestaurants, fbAuth) if handlerErr != nil { return handlerErr } if err := users.SetPageAccessTokens(fbUserID, pageAccessTokens); err != nil { return router.NewHandlerError(err, "Failed to persist Facebook page access tokens", http.StatusInternalServerError) } return nil } func getPageAccessTokensForRestaurants(userAccessToken *oauth2.Token, restaurants []*model.Restaurant, fbAuth facebook.Authenticator) ([]model.FacebookPageToken, *router.HandlerError) { pageAccessTokens := make([]model.FacebookPageToken, len(restaurants)) for i, restaurant := range restaurants { pageAccessToken, handlerErr := getPageAccessToken(userAccessToken, restaurant.FacebookPageID, fbAuth) if handlerErr != nil { return nil, handlerErr } pageAccessTokens[i] = pageAccessToken } return pageAccessTokens, nil } func getPageAccessToken(userAccessToken *oauth2.Token, pageID string, fbAuth facebook.Authenticator) (model.FacebookPageToken, *router.HandlerError) { pageAccessToken, err := fbAuth.PageAccessToken(userAccessToken, pageID) if err == facebook.ErrNoSuchPage { return model.FacebookPageToken{}, router.NewHandlerError(err, "Access denied by Facebook to the managed page", http.StatusForbidden) } else if err != nil { return model.FacebookPageToken{}, router.NewHandlerError(err, "Failed to get access to the Facebook page", http.StatusInternalServerError) } return model.FacebookPageToken{ PageID: pageID, Token: pageAccessToken, }, nil } func getRestaurantsManagedThroughFB(userAccessToken *oauth2.Token, restaurants db.Restaurants, fbAuth facebook.Authenticator) ([]*model.Restaurant, *router.HandlerError) { fbPagesManagedByUser, handlerErr := getPages(userAccessToken, fbAuth) if handlerErr != nil { return nil, handlerErr } fbPageIDs := make([]string, len(fbPagesManagedByUser)) for i, fbPage := range fbPagesManagedByUser { fbPageIDs[i] = fbPage.ID } restaurantsManagedByUserThroughFB, err := restaurants.GetByFacebookPageIDs(fbPageIDs) if err != nil { return nil, router.NewHandlerError(err, "Failed to find restaurants for FB pages associated with this user", http.StatusInternalServerError) } return restaurantsManagedByUserThroughFB, nil } func getLongTermToken(session string, r *http.Request, auther facebook.Authenticator) (*oauth2.Token, *router.HandlerError) { tok, err := auther.Token(session, r) if err != nil { if err == facebook.ErrMissingState { return nil, router.NewHandlerError(err, "Expecting a 'state' value", http.StatusBadRequest) } else if err == facebook.ErrInvalidState { return nil, router.NewHandlerError(err, "Invalid 'state' value", http.StatusForbidden) } else if err == facebook.ErrMissingCode { return nil, router.NewHandlerError(err, "Expecting a 'code' value", http.StatusBadRequest) } return nil, router.NewHandlerError(err, "Failed to connect to Facebook", http.StatusInternalServerError) } return tok, nil } func storeAccessTokensInDB(userID bson.ObjectId, fbUserID string, tok *oauth2.Token, sessionID string, usersCollection db.Users) *router.HandlerError { if err := usersCollection.SetAccessToken(fbUserID, *tok); err != nil { return router.NewHandlerError(err, "Failed to persist Facebook user access token in DB", http.StatusInternalServerError) } if err := usersCollection.SetSessionID(userID, sessionID); err != nil { return router.NewHandlerError(err, "Failed to persist session ID in DB", http.StatusInternalServerError) } return nil } func getUserID(tok *oauth2.Token, auther facebook.Authenticator) (string, error) { api := auther.APIConnection(tok) user, err := api.Me() if err != nil { return "", err } return user.ID, nil }
package v1alpha1 const ( // SystemSecretName it's a secret resource created and updated dynamically by a controller. // It should be used for communicating between systems SystemSecretName = "koli-system-token" DefaultClusterRole = "koli:mutator:default" )
package main import ( "github.com/gocql/gocql" "github.com/relops/cqlc/cqlc" "log" "twitter" ) var TWEETS = twitter.TweetsTableDef() func main() { host := "127.0.0.1" keyspace := "twitter_example" cluster := gocql.NewCluster(host) cluster.Keyspace = keyspace session, err := cluster.CreateSession() if err != nil { log.Fatalf("Could not create CQL session: %s", err) } ctx := cqlc.NewContext() err = ctx.Upsert(TWEETS). SetString(TWEETS.NAME, "tweeter"). Where(TWEETS.ID.Eq(1)). Exec(session) if err != nil { log.Fatalf("Could not execute CQL upsert: %s", err) } iter, err := ctx.Select(). From(TWEETS). Where(TWEETS.ID.Eq(1)). Fetch(session) if err != nil { log.Fatalf("Could not execute CQL select: %s", err) } tweets, err := twitter.BindTweets(iter) if err != nil { log.Fatalf("Could not bind tweets: %s", err) } err = iter.Close() if err != nil { log.Fatalf("Could not bind tweets: %s", err) } log.Printf("Got tweets: %+v\n", tweets) }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package kvcoord import ( "context" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/cockroachdb/errors" ) const ( // maxTxnRefreshAttempts defines the maximum number of times a single // transactional batch can trigger a refresh spans attempt. A batch // may need multiple refresh attempts if it runs into progressively // larger timestamps as more and more of its component requests are // executed. maxTxnRefreshAttempts = 5 ) // MaxTxnRefreshSpansBytes is a threshold in bytes for refresh spans stored // on the coordinator during the lifetime of a transaction. Refresh spans // are used for SERIALIZABLE transactions to avoid client restarts. var MaxTxnRefreshSpansBytes = settings.RegisterIntSetting( "kv.transaction.max_refresh_spans_bytes", "maximum number of bytes used to track refresh spans in serializable transactions", 256*1000, ).WithPublic() // txnSpanRefresher is a txnInterceptor that collects the read spans of a // serializable transaction in the event it gets a serializable retry error. It // can then use the set of read spans to avoid retrying the transaction if all // the spans can be updated to the current transaction timestamp. // // Serializable isolation mandates that transactions appear to have occurred in // some total order, where none of their component sub-operations appear to have // interleaved with sub-operations from other transactions. CockroachDB enforces // this isolation level by ensuring that all of a transaction's reads and writes // are performed at the same HLC timestamp. This timestamp is referred to as the // transaction's commit timestamp. // // As a transaction in CockroachDB executes at a certain provisional commit // timestamp, it lays down intents at this timestamp for any write operations // and ratchets various timestamp cache entries to this timestamp for any read // operations. If a transaction performs all of its reads and writes and is able // to commit at its original provisional commit timestamp then it may go ahead // and do so. However, for a number of reasons including conflicting reads and // writes, a transaction may discover that its provisional commit timestamp is // too low and that it needs to move this timestamp forward to commit. // // This poses a problem for operations that the transaction has already // completed at lower timestamps. Are the effects of these operations still // valid? The transaction is always free to perform a full restart at a higher // epoch, but this often requires iterating in a client-side retry loop and // performing all of the transaction's operations again. Intents are maintained // across retries to improve the chance that later epochs succeed, but it is // vastly preferable to avoid re-issuing these operations. Instead, it would be // ideal if the transaction could "move" each of its operations to its new // provisional commit timestamp without redoing them entirely. // // Only a single write intent can exist on a key and no reads are allowed above // the intent's timestamp until the intent is resolved, so a transaction is free // to move any of its intent to a higher timestamp. In fact, a synchronous // rewrite of these intents isn't even necessary because intent resolution will // already rewrite the intents at higher timestamp if necessary. So, moving // write intents to a higher timestamp can be performed implicitly by committing // their transaction at a higher timestamp. However, unlike intents created by // writes, timestamp cache entries created by reads only prevent writes on // overlapping keys from being written at or below their timestamp; they do // nothing to prevent writes on overlapping keys from being written above their // timestamp. This means that a transaction is not free to blindly move its // reads to a higher timestamp because writes from other transaction may have // already invalidated them. In effect, this means that transactions acquire // pessimistic write locks and optimistic read locks. // // The txnSpanRefresher is in charge of detecting when a transaction may want to // move its provisional commit timestamp forward and determining whether doing // so is safe given the reads that it has performed (i.e. its "optimistic read // locks"). When the interceptor decides to attempt to move a transaction's // timestamp forward, it first "refreshes" each of its reads. This refreshing // step revisits all of the key spans that the transaction has read and checks // whether any writes have occurred between the original time that these span // were read and the timestamp that the transaction now wants to commit at that // change the result of these reads. If any read would produce a different // result at the newer commit timestamp, the refresh fails and the transaction // is forced to fall back to a full transaction restart. However, if all of the // reads would produce exactly the same result at the newer commit timestamp, // the timestamp cache entries for these reads are updated and the transaction // is free to update its provisional commit timestamp without needing to // restart. type txnSpanRefresher struct { st *cluster.Settings knobs *ClientTestingKnobs riGen rangeIteratorFactory wrapped lockedSender // refreshFootprint contains key spans which were read during the // transaction. In case the transaction's timestamp needs to be pushed, we can // avoid a retriable error by "refreshing" these spans: verifying that there // have been no changes to their data in between the timestamp at which they // were read and the higher timestamp we want to move to. refreshFootprint condensableSpanSet // refreshInvalid is set if refresh spans have not been collected (because the // memory budget was exceeded). When set, refreshFootprint is empty. This is // set when we've failed to condense the refresh spans below the target memory // limit. refreshInvalid bool // refreshedTimestamp keeps track of the largest timestamp that refreshed // don't fail on (i.e. if we'll refresh, we'll refreshFrom timestamp onwards). // After every epoch bump, it is initialized to the timestamp of the first // batch. It is then bumped after every successful refresh. refreshedTimestamp hlc.Timestamp // canAutoRetry is set if the txnSpanRefresher is allowed to auto-retry. canAutoRetry bool refreshSuccess *metric.Counter refreshFail *metric.Counter refreshFailWithCondensedSpans *metric.Counter refreshMemoryLimitExceeded *metric.Counter refreshAutoRetries *metric.Counter } // SendLocked implements the lockedSender interface. func (sr *txnSpanRefresher) SendLocked( ctx context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { batchReadTimestamp := ba.Txn.ReadTimestamp if sr.refreshedTimestamp.IsEmpty() { // This must be the first batch we're sending for this epoch. Future // refreshes shouldn't check values below batchReadTimestamp, so initialize // sr.refreshedTimestamp. sr.refreshedTimestamp = batchReadTimestamp } else if batchReadTimestamp.Less(sr.refreshedTimestamp) { // sr.refreshedTimestamp might be ahead of batchReadTimestamp. We want to // read at the latest refreshed timestamp, so bump the batch. // batchReadTimestamp can be behind after a successful refresh, if the // TxnCoordSender hasn't actually heard about the updated read timestamp. // This can happen if a refresh succeeds, but then the retry of the batch // that produced the timestamp fails without returning the update txn (for // example, through a canceled ctx). The client should only be sending // rollbacks in such cases. ba.Txn.ReadTimestamp.Forward(sr.refreshedTimestamp) ba.Txn.WriteTimestamp.Forward(sr.refreshedTimestamp) } else if sr.refreshedTimestamp != batchReadTimestamp { return nil, roachpb.NewError(errors.AssertionFailedf( "unexpected batch read timestamp: %s. Expected refreshed timestamp: %s. ba: %s. txn: %s", batchReadTimestamp, sr.refreshedTimestamp, ba, ba.Txn)) } // Set the batch's CanForwardReadTimestamp flag. ba.CanForwardReadTimestamp = sr.canForwardReadTimestampWithoutRefresh(ba.Txn) if rArgs, hasET := ba.GetArg(roachpb.EndTxn); hasET { et := rArgs.(*roachpb.EndTxnRequest) // Assign the EndTxn's DeprecatedCanCommitAtHigherTimestamp flag if it // isn't already set correctly. We don't write blindly because we could // be dealing with a re-issued batch from splitEndTxnAndRetrySend after // a refresh and we don't want to mutate previously issued requests or // we risk a data race (checked by raceTransport). In these cases, we // need to clone the EndTxn request first before mutating. // // We know this is a re-issued batch if the flag is already set and we // need to unset it. We aren't able to detect the case where the flag is // not set and we now need to set it to true, but such cases don't // happen in practice (i.e. we'll never begin setting the flag after a // refresh). // // TODO(nvanbenschoten): this is ugly. If we weren't about to delete // this field, we'd want to do something better. Just delete this ASAP. if et.DeprecatedCanCommitAtHigherTimestamp != ba.CanForwardReadTimestamp { isReissue := et.DeprecatedCanCommitAtHigherTimestamp if isReissue { etCpy := *et ba.Requests[len(ba.Requests)-1].MustSetInner(&etCpy) et = &etCpy } et.DeprecatedCanCommitAtHigherTimestamp = ba.CanForwardReadTimestamp } } // Attempt a refresh before sending the batch. ba, pErr := sr.maybeRefreshPreemptivelyLocked(ctx, ba, false) if pErr != nil { return nil, pErr } // Send through wrapped lockedSender. Unlocks while sending then re-locks. br, pErr := sr.sendLockedWithRefreshAttempts(ctx, ba, sr.maxRefreshAttempts()) if pErr != nil { return nil, pErr } // If the transaction is no longer pending, just return without // attempting to record its refresh spans. if br.Txn.Status != roachpb.PENDING { return br, nil } // Iterate over and aggregate refresh spans in the requests, qualified by // possible resume spans in the responses. if !sr.refreshInvalid { if err := sr.appendRefreshSpans(ctx, ba, br); err != nil { return nil, roachpb.NewError(err) } // Check whether we should condense the refresh spans. maxBytes := MaxTxnRefreshSpansBytes.Get(&sr.st.SV) if sr.refreshFootprint.bytes >= maxBytes { condensedBefore := sr.refreshFootprint.condensed condensedSufficient := sr.tryCondenseRefreshSpans(ctx, maxBytes) if condensedSufficient { log.VEventf(ctx, 2, "condensed refresh spans for txn %s to %d bytes", br.Txn, sr.refreshFootprint.bytes) } else { // Condensing was not enough. Giving up on tracking reads. Refreshed // will not be possible. log.VEventf(ctx, 2, "condensed refresh spans didn't save enough memory. txn %s. "+ "refresh spans after condense: %d bytes", br.Txn, sr.refreshFootprint.bytes) sr.refreshInvalid = true sr.refreshFootprint.clear() } if sr.refreshFootprint.condensed && !condensedBefore { sr.refreshMemoryLimitExceeded.Inc(1) } } } return br, nil } // tryCondenseRefreshSpans attempts to condense the refresh spans in order to // save memory. Returns true if we managed to condense them below maxBytes. func (sr *txnSpanRefresher) tryCondenseRefreshSpans(ctx context.Context, maxBytes int64) bool { if sr.knobs.CondenseRefreshSpansFilter != nil && !sr.knobs.CondenseRefreshSpansFilter() { return false } sr.refreshFootprint.maybeCondense(ctx, sr.riGen, maxBytes) return sr.refreshFootprint.bytes < maxBytes } // sendLockedWithRefreshAttempts sends the batch through the wrapped sender. It // catches serializable errors and attempts to avoid them by refreshing the txn // at a larger timestamp. func (sr *txnSpanRefresher) sendLockedWithRefreshAttempts( ctx context.Context, ba roachpb.BatchRequest, maxRefreshAttempts int, ) (*roachpb.BatchResponse, *roachpb.Error) { if ba.Txn.WriteTooOld { // The WriteTooOld flag is not supposed to be set on requests. It's only set // by the server and it's terminated by this interceptor on the client. log.Fatalf(ctx, "unexpected WriteTooOld request. ba: %s (txn: %s)", ba.String(), ba.Txn.String()) } br, pErr := sr.wrapped.SendLocked(ctx, ba) // 19.2 servers might give us an error with the WriteTooOld flag set. This // interceptor wants to always terminate that flag. In the case of an error, // we can just ignore it. if pErr != nil && pErr.GetTxn() != nil { pErr.GetTxn().WriteTooOld = false } if pErr == nil && br.Txn.WriteTooOld { // If we got a response with the WriteTooOld flag set, then we pretend that // we got a WriteTooOldError, which will cause us to attempt to refresh and // propagate the error if we failed. When it can, the server prefers to // return the WriteTooOld flag, rather than a WriteTooOldError because, in // the former case, it can leave intents behind. We like refreshing eagerly // when the WriteTooOld flag is set because it's likely that the refresh // will fail (if we previously read the key that's now causing a WTO, then // the refresh will surely fail). // TODO(andrei): Implement a more discerning policy based on whether we've // read that key before. // // If the refresh fails, we could continue running the transaction even // though it will not be able to commit, in order for it to lay down more // intents. Not doing so, though, gives the SQL a chance to auto-retry. // TODO(andrei): Implement a more discerning policy based on whether // auto-retries are still possible. // // For the refresh, we have two options: either refresh everything read // *before* this batch, and then retry this batch, or refresh the current // batch's reads too and then, if successful, there'd be nothing to retry. // We take the former option by setting br = nil below to minimized the // chances that the refresh fails. bumpedTxn := br.Txn.Clone() bumpedTxn.WriteTooOld = false pErr = roachpb.NewErrorWithTxn( roachpb.NewTransactionRetryError(roachpb.RETRY_WRITE_TOO_OLD, "WriteTooOld flag converted to WriteTooOldError"), bumpedTxn) br = nil } if pErr != nil { if maxRefreshAttempts > 0 { br, pErr = sr.maybeRefreshAndRetrySend(ctx, ba, pErr, maxRefreshAttempts) } else { log.VEventf(ctx, 2, "not checking error for refresh; refresh attempts exhausted") } } sr.forwardRefreshTimestampOnResponse(br, pErr) return br, pErr } // maybeRefreshAndRetrySend attempts to catch serializable errors and avoid them // by refreshing the txn at a larger timestamp. If it succeeds at refreshing the // txn timestamp, it recurses into sendLockedWithRefreshAttempts and retries the // batch. If the refresh fails, the input pErr is returned. func (sr *txnSpanRefresher) maybeRefreshAndRetrySend( ctx context.Context, ba roachpb.BatchRequest, pErr *roachpb.Error, maxRefreshAttempts int, ) (*roachpb.BatchResponse, *roachpb.Error) { // Check for an error which can be retried after updating spans. canRefreshTxn, refreshTxn := roachpb.CanTransactionRefresh(ctx, pErr) if !canRefreshTxn || !sr.canAutoRetry { return nil, pErr } log.VEventf(ctx, 2, "trying to refresh to %s because of %s", refreshTxn.ReadTimestamp, pErr) // Try updating the txn spans so we can retry. if ok := sr.tryUpdatingTxnSpans(ctx, refreshTxn); !ok { log.Eventf(ctx, "refresh failed; propagating original retry error") return nil, pErr } // We've refreshed all of the read spans successfully and bumped // ba.Txn's timestamps. Attempt the request again. log.Eventf(ctx, "refresh succeeded; retrying original request") ba.UpdateTxn(refreshTxn) sr.refreshAutoRetries.Inc(1) // To prevent starvation of batches that are trying to commit, split off the // EndTxn request into its own batch on auto-retries. This avoids starvation // in two ways. First, it helps ensure that we lay down intents if any of // the other requests in the batch are writes. Second, it ensures that if // any writes are getting pushed due to contention with reads or due to the // closed timestamp, they will still succeed and allow the batch to make // forward progress. Without this, each retry attempt may get pushed because // of writes in the batch and then rejected wholesale when the EndTxn tries // to evaluate the pushed batch. When split, the writes will be pushed but // succeed, the transaction will be refreshed, and the EndTxn will succeed. args, hasET := ba.GetArg(roachpb.EndTxn) if len(ba.Requests) > 1 && hasET && !args.(*roachpb.EndTxnRequest).Require1PC { log.Eventf(ctx, "sending EndTxn separately from rest of batch on retry") return sr.splitEndTxnAndRetrySend(ctx, ba) } retryBr, retryErr := sr.sendLockedWithRefreshAttempts(ctx, ba, maxRefreshAttempts-1) if retryErr != nil { log.VEventf(ctx, 2, "retry failed with %s", retryErr) return nil, retryErr } log.VEventf(ctx, 2, "retry successful @%s", retryBr.Txn.ReadTimestamp) return retryBr, nil } // splitEndTxnAndRetrySend splits the batch in two, with a prefix containing all // requests up to but not including the EndTxn request and a suffix containing // only the EndTxn request. It then issues the two partial batches in order, // stitching their results back together at the end. func (sr *txnSpanRefresher) splitEndTxnAndRetrySend( ctx context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // NOTE: call back into SendLocked with each partial batch, not into // sendLockedWithRefreshAttempts. This ensures that we properly set // CanForwardReadTimestamp on each partial batch and that we provide // the EndTxn batch with a chance to perform a preemptive refresh. // Issue a batch up to but not including the EndTxn request. etIdx := len(ba.Requests) - 1 baPrefix := ba baPrefix.Requests = ba.Requests[:etIdx] brPrefix, pErr := sr.SendLocked(ctx, baPrefix) if pErr != nil { return nil, pErr } // Issue a batch containing only the EndTxn request. baSuffix := ba baSuffix.Requests = ba.Requests[etIdx:] baSuffix.UpdateTxn(brPrefix.Txn) brSuffix, pErr := sr.SendLocked(ctx, baSuffix) if pErr != nil { return nil, pErr } // Combine the responses. br := brPrefix br.Responses = append(br.Responses, roachpb.ResponseUnion{}) if err := br.Combine(brSuffix, []int{etIdx}); err != nil { return nil, roachpb.NewError(err) } return br, nil } // maybeRefreshPreemptivelyLocked attempts to refresh a transaction's read timestamp // eagerly. Doing so can take advantage of opportunities where the refresh is // free or can avoid wasting work issuing a batch containing an EndTxn that will // necessarily throw a serializable error. The method returns a batch with an // updated transaction if the refresh is successful, or a retry error if not. // If the force flag is true, the refresh will be attempted even if a refresh // is not inevitable. func (sr *txnSpanRefresher) maybeRefreshPreemptivelyLocked( ctx context.Context, ba roachpb.BatchRequest, force bool, ) (roachpb.BatchRequest, *roachpb.Error) { // If we know that the transaction will need a refresh at some point because // its write timestamp has diverged from its read timestamp, consider doing // so preemptively. We perform a preemptive refresh if either a) doing so // would be free because we have not yet accumulated any refresh spans, or // b) the batch contains a committing EndTxn request that we know will be // rejected if issued. // // The first case is straightforward. If the transaction has yet to perform // any reads but has had its write timestamp bumped, refreshing is a trivial // no-op. In this case, refreshing eagerly prevents the transaction for // performing any future reads at its current read timestamp. Not doing so // preemptively guarantees that we will need to perform a real refresh in // the future if the transaction ever performs a read. At best, this would // be wasted work. At worst, this could result in the future refresh // failing. So we might as well refresh preemptively while doing so is free. // // Note that this first case here does NOT obviate the need for server-side // refreshes. Notably, a transaction's write timestamp might be bumped in // the same batch in which it performs its first read. In such cases, a // preemptive refresh would not be needed but a reactive refresh would not // be a trivial no-op. These situations are common for one-phase commit // transactions. // // The second case is more complex. If the batch contains a committing // EndTxn request that we know will need a refresh, we don't want to bother // issuing it just for it to be rejected. Instead, preemptively refresh // before issuing the EndTxn batch. If we view reads as acquiring a form of // optimistic read locks under an optimistic concurrency control scheme (as // is discussed in the comment on txnSpanRefresher) then this preemptive // refresh immediately before the EndTxn is synonymous with the "validation" // phase of a standard OCC transaction model. However, as an optimization // compared to standard OCC, the validation phase is only performed when // necessary in CockroachDB (i.e. if the transaction's writes have been // pushed to higher timestamps). // // TODO(andrei): whether or not we can still auto-retry at the SQL level // should also play a role in deciding whether we want to refresh eagerly or // not. // If the transaction has yet to be pushed, no refresh is necessary. if ba.Txn.ReadTimestamp == ba.Txn.WriteTimestamp { return ba, nil } // If true, tryUpdatingTxnSpans will trivially succeed. refreshFree := ba.CanForwardReadTimestamp // If true, this batch is guaranteed to fail without a refresh. args, hasET := ba.GetArg(roachpb.EndTxn) refreshInevitable := hasET && args.(*roachpb.EndTxnRequest).Commit // If neither condition is true, defer the refresh. if !refreshFree && !refreshInevitable && !force { return ba, nil } canRefreshTxn, refreshTxn := roachpb.PrepareTransactionForRefresh(ba.Txn, ba.Txn.WriteTimestamp) if !canRefreshTxn || !sr.canAutoRetry { return roachpb.BatchRequest{}, newRetryErrorOnFailedPreemptiveRefresh(ba.Txn) } log.VEventf(ctx, 2, "preemptively refreshing to timestamp %s before issuing %s", refreshTxn.ReadTimestamp, ba) // Try updating the txn spans at a timestamp that will allow us to commit. if ok := sr.tryUpdatingTxnSpans(ctx, refreshTxn); !ok { log.Eventf(ctx, "preemptive refresh failed; propagating retry error") return roachpb.BatchRequest{}, newRetryErrorOnFailedPreemptiveRefresh(ba.Txn) } log.Eventf(ctx, "preemptive refresh succeeded") ba.UpdateTxn(refreshTxn) return ba, nil } func newRetryErrorOnFailedPreemptiveRefresh(txn *roachpb.Transaction) *roachpb.Error { reason := roachpb.RETRY_SERIALIZABLE if txn.WriteTooOld { reason = roachpb.RETRY_WRITE_TOO_OLD } err := roachpb.NewTransactionRetryError(reason, "failed preemptive refresh") return roachpb.NewErrorWithTxn(err, txn) } // tryUpdatingTxnSpans sends Refresh and RefreshRange commands to all spans read // during the transaction to ensure that no writes were written more recently // than sr.refreshedTimestamp. All implicated timestamp caches are updated with // the final transaction timestamp. Returns whether the refresh was successful // or not. func (sr *txnSpanRefresher) tryUpdatingTxnSpans( ctx context.Context, refreshTxn *roachpb.Transaction, ) (ok bool) { // Track the result of the refresh in metrics. defer func() { if ok { sr.refreshSuccess.Inc(1) } else { sr.refreshFail.Inc(1) if sr.refreshFootprint.condensed { sr.refreshFailWithCondensedSpans.Inc(1) } } }() if sr.refreshInvalid { log.VEvent(ctx, 2, "can't refresh txn spans; not valid") return false } else if sr.refreshFootprint.empty() { log.VEvent(ctx, 2, "there are no txn spans to refresh") sr.refreshedTimestamp.Forward(refreshTxn.ReadTimestamp) return true } // Refresh all spans (merge first). // TODO(nvanbenschoten): actually merge spans. refreshSpanBa := roachpb.BatchRequest{} refreshSpanBa.Txn = refreshTxn addRefreshes := func(refreshes *condensableSpanSet) { // We're going to check writes between the previous refreshed timestamp, if // any, and the timestamp we want to bump the transaction to. Note that if // we've already refreshed the transaction before, we don't need to check // the (key ranges x timestamp range) that we've already checked - there's // no values there for sure. // More importantly, reads that have happened since we've previously // refreshed don't need to be checked below below the timestamp at which // they've been read (which is the timestamp to which we've previously // refreshed). Checking below that timestamp (like we would, for example, if // we simply used txn.OrigTimestamp here), could cause false-positives that // would fail the refresh. for _, u := range refreshes.asSlice() { var req roachpb.Request if len(u.EndKey) == 0 { req = &roachpb.RefreshRequest{ RequestHeader: roachpb.RequestHeaderFromSpan(u), RefreshFrom: sr.refreshedTimestamp, } } else { req = &roachpb.RefreshRangeRequest{ RequestHeader: roachpb.RequestHeaderFromSpan(u), RefreshFrom: sr.refreshedTimestamp, } } refreshSpanBa.Add(req) log.VEventf(ctx, 2, "updating span %s @%s - @%s to avoid serializable restart", req.Header().Span(), sr.refreshedTimestamp, refreshTxn.WriteTimestamp) } } addRefreshes(&sr.refreshFootprint) // Send through wrapped lockedSender. Unlocks while sending then re-locks. if _, batchErr := sr.wrapped.SendLocked(ctx, refreshSpanBa); batchErr != nil { log.VEventf(ctx, 2, "failed to refresh txn spans (%s)", batchErr) return false } sr.refreshedTimestamp.Forward(refreshTxn.ReadTimestamp) return true } // appendRefreshSpans appends refresh spans from the supplied batch request, // qualified by the batch response where appropriate. func (sr *txnSpanRefresher) appendRefreshSpans( ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse, ) error { readTimestamp := br.Txn.ReadTimestamp if readTimestamp.Less(sr.refreshedTimestamp) { // This can happen with (illegal) concurrent txn use, but that's supposed to // be detected by the gatekeeper interceptor. return errors.AssertionFailedf("attempting to append refresh spans after the tracked"+ " timestamp has moved forward. batchTimestamp: %s refreshedTimestamp: %s ba: %s", errors.Safe(readTimestamp), errors.Safe(sr.refreshedTimestamp), ba) } ba.RefreshSpanIterate(br, func(span roachpb.Span) { if log.ExpensiveLogEnabled(ctx, 3) { log.VEventf(ctx, 3, "recording span to refresh: %s", span.String()) } sr.refreshFootprint.insert(span) }) return nil } // canForwardReadTimestampWithoutRefresh returns whether the transaction can // forward its read timestamp without refreshing any read spans. This allows for // the "server-side refresh" optimization, where batches are re-evaluated at a // higher read-timestamp without returning to transaction coordinator. // // This requires that the transaction has encountered no spans which require // refreshing at the forwarded timestamp and that the transaction's timestamp // has not leaked. If either of those conditions are true, a client-side refresh // is required. // // Note that when deciding whether a transaction can be bumped to a particular // timestamp, the transaction's deadling must also be taken into account. func (sr *txnSpanRefresher) canForwardReadTimestampWithoutRefresh(txn *roachpb.Transaction) bool { return sr.canAutoRetry && !sr.refreshInvalid && sr.refreshFootprint.empty() && !txn.CommitTimestampFixed } // forwardRefreshTimestampOnResponse updates the refresher's tracked // refreshedTimestamp to stay in sync with "server-side refreshes", where the // transaction's read timestamp is updated during the evaluation of a batch. func (sr *txnSpanRefresher) forwardRefreshTimestampOnResponse( br *roachpb.BatchResponse, pErr *roachpb.Error, ) { var txn *roachpb.Transaction if pErr != nil { txn = pErr.GetTxn() } else { txn = br.Txn } if txn != nil { sr.refreshedTimestamp.Forward(txn.ReadTimestamp) } } // maxRefreshAttempts returns the configured number of times that a transaction // should attempt to refresh its spans for a single batch. func (sr *txnSpanRefresher) maxRefreshAttempts() int { if knob := sr.knobs.MaxTxnRefreshAttempts; knob != 0 { if knob == -1 { return 0 } return knob } return maxTxnRefreshAttempts } // setWrapped implements the txnInterceptor interface. func (sr *txnSpanRefresher) setWrapped(wrapped lockedSender) { sr.wrapped = wrapped } // populateLeafInputState is part of the txnInterceptor interface. func (sr *txnSpanRefresher) populateLeafInputState(tis *roachpb.LeafTxnInputState) { tis.RefreshInvalid = sr.refreshInvalid } // populateLeafFinalState is part of the txnInterceptor interface. func (sr *txnSpanRefresher) populateLeafFinalState(tfs *roachpb.LeafTxnFinalState) { tfs.RefreshInvalid = sr.refreshInvalid if !sr.refreshInvalid { // Copy mutable state so access is safe for the caller. tfs.RefreshSpans = append([]roachpb.Span(nil), sr.refreshFootprint.asSlice()...) } } // importLeafFinalState is part of the txnInterceptor interface. func (sr *txnSpanRefresher) importLeafFinalState( ctx context.Context, tfs *roachpb.LeafTxnFinalState, ) { if tfs.RefreshInvalid { sr.refreshInvalid = true sr.refreshFootprint.clear() } else if !sr.refreshInvalid { sr.refreshFootprint.insert(tfs.RefreshSpans...) sr.refreshFootprint.maybeCondense(ctx, sr.riGen, MaxTxnRefreshSpansBytes.Get(&sr.st.SV)) } } // epochBumpedLocked implements the txnInterceptor interface. func (sr *txnSpanRefresher) epochBumpedLocked() { sr.refreshFootprint.clear() sr.refreshInvalid = false sr.refreshedTimestamp.Reset() } // createSavepointLocked is part of the txnInterceptor interface. func (sr *txnSpanRefresher) createSavepointLocked(ctx context.Context, s *savepoint) { s.refreshSpans = make([]roachpb.Span, len(sr.refreshFootprint.asSlice())) copy(s.refreshSpans, sr.refreshFootprint.asSlice()) s.refreshInvalid = sr.refreshInvalid } // rollbackToSavepointLocked is part of the txnInterceptor interface. func (sr *txnSpanRefresher) rollbackToSavepointLocked(ctx context.Context, s savepoint) { sr.refreshFootprint.clear() sr.refreshFootprint.insert(s.refreshSpans...) sr.refreshInvalid = s.refreshInvalid } // closeLocked implements the txnInterceptor interface. func (*txnSpanRefresher) closeLocked() {}
/* * Copyright (c) 2016, Randy Westlund and Jacqueline Kory Westlund. * All rights reserved. * This code is under the BSD-2-Clause license. */ package defs // Album represents an album in the DB. type Album struct { Name string `json:"name"` // See the comment in db/albums.go:scan_album() for why this is a pointer. CoverImageID *uint32 `json:"cover_image_id"` // Everything below here is a computed field. // This is a count of how many images have this album. ImageCount uint32 `json:"image_count"` }
package controllers //RequestContext exposes request context methods type RequestContext interface { NegotiateFormat(...string) string ShouldBindJSON(interface{}) error JSON(int, interface{}) Header(string, string) Data(int, string, []byte) DefaultQuery(string, string) string BindQuery(interface{}) error BindJSON(interface{}) error Param(string) string }
package views import ( "html/template" "net/http" "path/filepath" ) type View struct { Template *template.Template Layout string } // Метод структуры View для возврата шаблона func (v *View) RenderTemplate(w http.ResponseWriter, data interface{}) { if err := v.Template.ExecuteTemplate(w, v.Layout, data); err != nil { panic(err) } } /* Не важно сколько темплейтов, а сколько файлов их содержать и все нужно розпарсить rootLayout запрашивать одну и туже входную точку "enty", но при вызове данной функции шаблон entry будет выбран из файла, который соответствует определенному пути имя одно, а содержание разное! */ func NewView(layoutName string, files ...string) (*View, error) { // хороший тон возвращать ошибку // так не работает "./layouts/footer.gohtml" files = append(files, getFiles("views/layouts/", "gohtml")...) // getFiles вернет срез, так что ... templ, err := template.ParseFiles(files...) // ParseFilES = > 1 if err != nil { return nil, err } // Создаю структуру, а возвращаю указатель на нее return &View{ Template: templ, Layout: layoutName, }, nil } func getFiles(path, ext string) []string { // Вернет массив(срез) путей(строк), которые будет содержать все файлы с искомым расширением // в папк, где была вызвана функция! files, err := filepath.Glob(path + "*." + ext) if err != nil { panic(err) } return files }
package internal import ( "github.com/adamluzsi/frameless/pkg/teardown" "reflect" ) func Equal(v1, v2 any) bool { if v1 == nil || v2 == nil { return v1 == v2 } return reflectDeepEqual( &refMem{visited: make(map[uintptr]struct{})}, reflect.ValueOf(v1), reflect.ValueOf(v2)) } func RegisterIsEqual(typ reflect.Type, rfn func(v1, v2 reflect.Value) bool) { isEqualFuncRegister[typ] = rfn } var isEqualFuncRegister = map[reflect.Type]func(v1, v2 reflect.Value) bool{} //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// func reflectDeepEqual(m *refMem, v1, v2 reflect.Value) (iseq bool) { if !m.TryVisit(v1, v2) { return true // probably OK since we already visited it } if !v1.IsValid() || !v2.IsValid() { return v1.IsValid() == v2.IsValid() } if v1.Type() != v2.Type() { return false } if eq, ok := tryEqualityMethods(v1, v2); ok { return eq } switch v1.Kind() { case reflect.Struct: for i, n := 0, v1.NumField(); i < n; i++ { f1, ok := TryToMakeAccessible(v1.Field(i)) if !ok { continue } f2, ok := TryToMakeAccessible(v2.Field(i)) if !ok { continue } if eq := reflectDeepEqual(m, f1, f2); !eq { return eq } } return true case reflect.Pointer: if v1.UnsafePointer() == v2.UnsafePointer() { return true } return reflectDeepEqual(m, v1.Elem(), v2.Elem()) case reflect.Array: // TODO: check if array with different length are considered as the same type for i := 0; i < v1.Len(); i++ { if eq := reflectDeepEqual(m, v1.Index(i), v2.Index(i)); !eq { return eq } } return true case reflect.Slice: if v1.IsNil() != v2.IsNil() { return false } if v1.Len() != v2.Len() { return false } if v1.UnsafePointer() == v2.UnsafePointer() { return true } // Special case for []byte, which is common. if v1.Type().Elem().Kind() == reflect.Uint8 { return string(v1.Bytes()) == string(v2.Bytes()) } for i := 0; i < v1.Len(); i++ { if eq := reflectDeepEqual(m, v1.Index(i), v2.Index(i)); !eq { return eq } } return true case reflect.Interface: if v1.IsNil() || v2.IsNil() { return v1.IsNil() == v2.IsNil() } return reflectDeepEqual(m, v1.Elem(), v2.Elem()) case reflect.Map: if v1.IsNil() != v2.IsNil() { return false } if v1.Len() != v2.Len() { return false } if v1.UnsafePointer() == v2.UnsafePointer() { return true } for _, k := range v1.MapKeys() { val1 := v1.MapIndex(k) val2 := v2.MapIndex(k) if !val1.IsValid() || !val2.IsValid() { return false } if eq := reflectDeepEqual(m, val1, val2); !eq { return eq } } return true case reflect.Func: if v1.IsNil() && v2.IsNil() { return true } if v1.Pointer() == v2.Pointer() { return true } return false case reflect.Chan: if v1.IsNil() && v2.IsNil() { return true } if v1.Cap() == 0 { return reflect.DeepEqual(v1.Interface(), v2.Interface()) } if v1.Cap() != v2.Cap() || v1.Len() != v2.Len() { return false } var ( ln = v1.Len() td = &teardown.Teardown{} ) defer func() { _ = td.Finish() }() for i := 0; i < ln; i++ { v1x, v1OK := v1.Recv() if v1OK { td.Defer(func() error { v1.Send(v1x) return nil }) } v2x, v2OK := v1.Recv() if v2OK { td.Defer(func() error { v2.Send(v2x) return nil }) } if v1OK != v2OK { return false } if eq := reflectDeepEqual(m, v1x, v2x); !eq { return eq } } return true default: return reflect.DeepEqual( Accessible(v1).Interface(), Accessible(v2).Interface()) } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// func tryEqualityMethods(v1, v2 reflect.Value) (isEqual, ok bool) { defer func() { recover() }() if v1.Type() != v2.Type() { return false, false } if eqfn, ok := isEqualFuncRegister[v1.Type()]; ok { return eqfn(v1, v2), true } if eq, ok := tryEquatable(v1, v2); ok { return eq, ok } if eq, ok := tryComparableEqual(v1, v2); ok { return eq, ok } return false, false } func tryEquatable(v1, v2 reflect.Value) (bool, bool) { for _, methodName := range []string{"Equal", "IsEqual"} { if eq, ok := tryIsEqualMethod(methodName, v1, v2); ok { return eq, true } if eq, ok := tryIsEqualMethod(methodName, ptrOf(v1), v2); ok { return eq, true } } return false, false } func tryIsEqualMethod(methodName string, v1, v2 reflect.Value) (bool, bool) { method := v1.MethodByName(methodName) if method == (reflect.Value{}) { return false, false } methodType := method.Type() if methodType.NumIn() != 1 { return false, false } if methodType.In(0) != v2.Type() { return false, false } if numOut := methodType.NumOut(); !(numOut == 1 || numOut == 2) { return false, false } switch methodType.NumOut() { case 1: if methodType.Out(0) != boolType { return false, false } default: return false, false } result := method.Call([]reflect.Value{v2}) return result[0].Bool(), true } func tryComparableEqual(v1, v2 reflect.Value) (bool, bool) { if eq, ok := tryCmpEqual(v1, v2); ok { return eq, ok } if eq, ok := tryCmpEqual(ptrOf(v1), v2); ok { return eq, ok } return false, false } func tryCmpEqual(v1 reflect.Value, v2 reflect.Value) (bool, bool) { method := v1.MethodByName("Cmp") if method == (reflect.Value{}) { return false, false } methodType := method.Type() if methodType.NumIn() != 1 { return false, false } if methodType.In(0) != v2.Type() { return false, false } if methodType.NumOut() != 1 { return false, false } if methodType.Out(0) != intType { return false, false } result := method.Call([]reflect.Value{v2}) return result[0].Int() == 0, true }
package main import ( "log" "net" "net/http" "sync" "time" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "github.com/sleep2death/gotham" "github.com/sleep2death/gotham/examples/pb" ) func main() { // SERVER // Starts a new gotham instance without any middleware. router := gotham.New() // Define your handlers router.Handle("pb.EchoMessage", func(c *gotham.Context) { message := new(pb.EchoMessage) // If some error fires, you can abort the request. if err := proto.Unmarshal(c.Request.Data(), message); err != nil { c.AbortWithStatus(http.StatusBadRequest) return } // log.Printf("Ping request received at %s", ptypes.TimestampString(message.Ts)) message.Message = "Pong" message.Ts = ptypes.TimestampNow() c.Write(message) }) // Run, gotham, Run... addr := ":9090" go func() { log.Fatal(router.Run(addr)) }() // Wait a little while for server prepairing time.Sleep(time.Millisecond * 5) // CLIENTS var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) go func(idx int) { defer wg.Done() // Connect to server. client, err := net.Dial("tcp", addr) if err != nil { log.Fatal("can not get in touch with gotham.") } defer client.Close() msg := &pb.EchoMessage{ Message: "Ping", Ts: ptypes.TimestampNow(), } // Write the message with a little help of gotham utils function. if err := gotham.WriteFrame(client, msg); err != nil { log.Fatalf("client write data error: %s.", err) } res, err := gotham.ReadFrame(client) if err != nil { log.Fatalf("client read data error: %s.", err) } // Unmarshal the raw data err = proto.Unmarshal(res.Data(), msg) if err != nil { log.Fatalf("client read data error: %s.", err) } if msg.GetMessage() == "Pong" { log.Printf("Ping response of (%d) received.", idx) } }(i) } wg.Wait() }
// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package arc import ( "context" "time" "chromiumos/tast/local/apps" "chromiumos/tast/local/arc" "chromiumos/tast/local/chrome" "chromiumos/tast/local/chrome/ash" "chromiumos/tast/testing" ) func init() { testing.AddTest(&testing.Test{ Func: GuestPlayStore, LacrosStatus: testing.LacrosVariantUnneeded, Desc: "Check PlayStore is Off in Guest mode", Contacts: []string{"cpiao@google.com", "cros-arc-te@google.com"}, Attr: []string{"group:mainline", "group:arc-functional"}, SoftwareDeps: []string{"chrome"}, Fixture: "chromeLoggedInGuest", Timeout: chrome.LoginTimeout + arc.BootTimeout + 30*time.Second, Params: []testing.Param{{ ExtraSoftwareDeps: []string{"android_p"}, }, { Name: "vm", ExtraSoftwareDeps: []string{"android_vm"}, }}, }) } func GuestPlayStore(ctx context.Context, s *testing.State) { cr := s.FixtValue().(*chrome.Chrome) tconn, err := cr.TestAPIConn(ctx) if err != nil { s.Fatal("Failed to connect Test API: ", err) } s.Log("Verify None of Default ARC Apps are Installed") installedApps, err := ash.ChromeApps(ctx, tconn) if err != nil { s.Fatal("Failed to get installed apps: ", err) } for _, app := range []apps.App{apps.PlayStore, apps.Duo, apps.PlayBooks, apps.PlayGames, apps.GoogleTV, apps.Clock, apps.Contacts} { for _, installedapp := range installedApps { if app.ID == installedapp.AppID { s.Fatalf("%s (%s) App is installed", app.Name, app.ID) } } } }
package main import ( "fmt" "log" "encoding/json" ) type Book struct { GUID string `json:"guid"` FileName string `json:"fileName"` } type BookAux struct { Title string `json:"title"` } type BookShelf struct { Count int `json:"count"` Books []Book `json:"books"` Aux *BookAux `json:"aux"` Time int64 } func main() { var bookShelf BookShelf bytes := []byte(`{"count":100,"books":[{"guid":"58c879c5-3183-4033-9789-bddbd58805b0","fileName":"foo.txt"},{"guid":"a59237f6-c084-43d8-8f13-72c4892b1048","fileName":"bar.txt"}], "aux":{"title":"TITLE"}}`) if err := json.Unmarshal(bytes, &bookShelf); err != nil { log.Fatal(err) return } fmt.Println(bookShelf.Count) for i, book := range bookShelf.Books { fmt.Printf("[%d]\n", i); fmt.Println("\t" + book.GUID); fmt.Println("\t" + book.FileName); } if bookShelf.Aux != nil { fmt.Println(bookShelf.Aux.Title) } fmt.Println(bookShelf.Time) }
package utils import ( "golang.org/x/crypto/bcrypt" ) func HashPassword(password string) (string, error) { bytes, err := bcrypt.GenerateFromPassword([]byte(password), 10) return string(bytes), err } func CheckPassowrd(password, hash string) error { err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(password)) return err }
package main import ( "context" "flag" "fmt" "os" "strings" "github.com/google/go-github/github" "golang.org/x/oauth2" ) type help_t struct { name string text string } var CommandHelp = []help_t{ {"help", "display this help text"}, {"list", "list all repositories"}, {"find", "search go package git repositories for keywords"}, } func Github() *github.Client { client := github.NewClient(nil) return client } func PrintHelp() { for _, helptext := range CommandHelp { fmt.Fprintf(os.Stderr, "%s - %s\n", helptext.name, helptext.text) } fmt.Fprintf(os.Stderr, "\n") } func main() { var accessToken = flag.String("token", os.Getenv("GITHUB_TOKEN"), "token github api") var user = flag.String("user", "", "user to list repos for") var allLanguages = flag.Bool("all", false, "dont return only go language repos") flag.Usage = func() { PrintHelp() flag.PrintDefaults() } flag.Parse() var client = getclient(*accessToken) switch flag.Arg(0) { case "", "help": flag.Usage() case "list": listRepositories(client, *user) case "code": if flag.NArg() == 1 { flag.Usage() return } findCodeRepositories(client, strings.Join(flag.Args()[1:], "+"), *allLanguages) case "find": if flag.NArg() == 1 { flag.Usage() return } findRepositories(client, strings.Join(flag.Args()[1:], "+"), *allLanguages) } } func getclient(token string) *github.Client { var client *github.Client if token != "" { ctx := context.Background() ts := oauth2.StaticTokenSource( &oauth2.Token{AccessToken: token}, ) tc := oauth2.NewClient(ctx, ts) client = github.NewClient(tc) } else { client = Github() } return client } func listRepositories(client *github.Client, name string) { // list all repositories for the authenticated user ctx := context.Background() for i := range []int{1, 2} { // pages of 100 opts := &github.RepositoryListOptions{} opts.PerPage = 100 opts.Page = i repos, _, err := client.Repositories.List(ctx, name, opts) if err != nil { fmt.Println(err) os.Exit(111) } if len(repos) == 0 { break } for _, repo := range repos { descr := repo.GetDescription() if descr != "" { descr = " - " + descr } fmt.Printf("%s/%s%s\n", name, repo.GetName(), descr) } } } func findCodeRepositories(client *github.Client, keywords string, allLanguages bool) { // log.Printf("searching repos for: %q", keywords) if !allLanguages { keywords += "+language:go" } ctx := context.Background() x := 0 for i := range []int{1, 2} { // pages of 100 opts := &github.SearchOptions{} opts.PerPage = 100 opts.Page = i repos, _, err := client.Search.Code(ctx, keywords, opts) if err != nil { fmt.Println(err) os.Exit(111) } num := repos.GetTotal() fmt.Printf("Found %v results\n", num) fmt.Printf("# (*) repository - description\n") //already := map[string]bool{} for _, repocode := range repos.CodeResults { repo := repocode.GetRepository() fmt.Println(repocode.GetHTMLURL()) descr := repo.GetDescription() if descr != "" { descr = " - " + descr } stars := repo.GetStargazersCount() fmt.Printf("%00v (%0v) %s%s\n", x, stars, repo.GetCloneURL(), descr) x++ } if num != 100 { break } } } func findRepositories(client *github.Client, keywords string, allLanguages bool) { // log.Printf("searching repos for: %q", keywords) if !allLanguages { keywords += "+language:go" } ctx := context.Background() x := 0 for i := range []int{1, 2} { // pages of 100 opts := &github.SearchOptions{} opts.PerPage = 100 opts.Page = i repos, _, err := client.Search.Repositories(ctx, keywords, opts) if err != nil { fmt.Println(err) os.Exit(111) } num := repos.GetTotal() fmt.Printf("Found %v results\n", num) fmt.Printf("# (*) repository - description\n") for _, repo := range repos.Repositories { descr := repo.GetDescription() if descr != "" { descr = " - " + descr } stars := repo.GetStargazersCount() fmt.Printf("%00v (%0v) %s%s\n", x, stars, repo.GetCloneURL(), descr) x++ } if num != 100 { break } } }
package humanize_test import ( "math" "testing" "github.com/git-lfs/git-lfs/tools/humanize" "github.com/stretchr/testify/assert" ) type ParseBytesTestCase struct { Given string Expected uint64 Err error } func (c *ParseBytesTestCase) Assert(t *testing.T) { got, err := humanize.ParseBytes(c.Given) if c.Err == nil { assert.NoError(t, err, "unexpected error: %s", err) assert.EqualValues(t, c.Expected, got) } else { assert.Equal(t, c.Err, err) } } type FormatBytesTestCase struct { Given uint64 Expected string } func (c *FormatBytesTestCase) Assert(t *testing.T) { assert.Equal(t, c.Expected, humanize.FormatBytes(c.Given)) } type ParseByteUnitTestCase struct { Given string Expected uint64 Err string } func (c *ParseByteUnitTestCase) Assert(t *testing.T) { got, err := humanize.ParseByteUnit(c.Given) if len(c.Err) == 0 { assert.NoError(t, err, "unexpected error: %s", err) assert.EqualValues(t, c.Expected, got) } else { assert.EqualError(t, err, c.Err) } } type FormatBytesUnitTestCase struct { Given uint64 Unit uint64 Expected string } func (c *FormatBytesUnitTestCase) Assert(t *testing.T) { assert.Equal(t, c.Expected, humanize.FormatBytesUnit(c.Given, c.Unit)) } func TestParseBytes(t *testing.T) { for desc, c := range map[string]*ParseBytesTestCase{ "parse byte (zero, empty)": {"", uint64(0), nil}, "parse byte (empty)": {"10", uint64(10 * math.Pow(2, 0)), nil}, "parse byte": {"10B", uint64(10 * math.Pow(2, 0)), nil}, "parse kibibyte": {"20KIB", uint64(20 * math.Pow(2, 10)), nil}, "parse mebibyte": {"30MIB", uint64(30 * math.Pow(2, 20)), nil}, "parse gibibyte": {"40GIB", uint64(40 * math.Pow(2, 30)), nil}, "parse tebibyte": {"50TIB", uint64(50 * math.Pow(2, 40)), nil}, "parse pebibyte": {"60PIB", uint64(60 * math.Pow(2, 50)), nil}, "parse byte (lowercase)": {"10b", uint64(10 * math.Pow(2, 0)), nil}, "parse kibibyte (lowercase)": {"20kib", uint64(20 * math.Pow(2, 10)), nil}, "parse mebibyte (lowercase)": {"30mib", uint64(30 * math.Pow(2, 20)), nil}, "parse gibibyte (lowercase)": {"40gib", uint64(40 * math.Pow(2, 30)), nil}, "parse tebibyte (lowercase)": {"50tib", uint64(50 * math.Pow(2, 40)), nil}, "parse pebibyte (lowercase)": {"60pib", uint64(60 * math.Pow(2, 50)), nil}, "parse byte (with space)": {"10 B", uint64(10 * math.Pow(2, 0)), nil}, "parse kibibyte (with space)": {"20 KIB", uint64(20 * math.Pow(2, 10)), nil}, "parse mebibyte (with space)": {"30 MIB", uint64(30 * math.Pow(2, 20)), nil}, "parse gibibyte (with space)": {"40 GIB", uint64(40 * math.Pow(2, 30)), nil}, "parse tebibyte (with space)": {"50 TIB", uint64(50 * math.Pow(2, 40)), nil}, "parse pebibyte (with space)": {"60 PIB", uint64(60 * math.Pow(2, 50)), nil}, "parse byte (with space, lowercase)": {"10 b", uint64(10 * math.Pow(2, 0)), nil}, "parse kibibyte (with space, lowercase)": {"20 kib", uint64(20 * math.Pow(2, 10)), nil}, "parse mebibyte (with space, lowercase)": {"30 mib", uint64(30 * math.Pow(2, 20)), nil}, "parse gibibyte (with space, lowercase)": {"40 gib", uint64(40 * math.Pow(2, 30)), nil}, "parse tebibyte (with space, lowercase)": {"50 tib", uint64(50 * math.Pow(2, 40)), nil}, "parse pebibyte (with space, lowercase)": {"60 pib", uint64(60 * math.Pow(2, 50)), nil}, "parse kilobyte": {"20KB", uint64(20 * math.Pow(10, 3)), nil}, "parse megabyte": {"30MB", uint64(30 * math.Pow(10, 6)), nil}, "parse gigabyte": {"40GB", uint64(40 * math.Pow(10, 9)), nil}, "parse terabyte": {"50TB", uint64(50 * math.Pow(10, 12)), nil}, "parse petabyte": {"60PB", uint64(60 * math.Pow(10, 15)), nil}, "parse kilobyte (lowercase)": {"20kb", uint64(20 * math.Pow(10, 3)), nil}, "parse megabyte (lowercase)": {"30mb", uint64(30 * math.Pow(10, 6)), nil}, "parse gigabyte (lowercase)": {"40gb", uint64(40 * math.Pow(10, 9)), nil}, "parse terabyte (lowercase)": {"50tb", uint64(50 * math.Pow(10, 12)), nil}, "parse petabyte (lowercase)": {"60pb", uint64(60 * math.Pow(10, 15)), nil}, "parse kilobyte (with space)": {"20 KB", uint64(20 * math.Pow(10, 3)), nil}, "parse megabyte (with space)": {"30 MB", uint64(30 * math.Pow(10, 6)), nil}, "parse gigabyte (with space)": {"40 GB", uint64(40 * math.Pow(10, 9)), nil}, "parse terabyte (with space)": {"50 TB", uint64(50 * math.Pow(10, 12)), nil}, "parse petabyte (with space)": {"60 PB", uint64(60 * math.Pow(10, 15)), nil}, "parse kilobyte (with space, lowercase)": {"20 kb", uint64(20 * math.Pow(10, 3)), nil}, "parse megabyte (with space, lowercase)": {"30 mb", uint64(30 * math.Pow(10, 6)), nil}, "parse gigabyte (with space, lowercase)": {"40 gb", uint64(40 * math.Pow(10, 9)), nil}, "parse terabyte (with space, lowercase)": {"50 tb", uint64(50 * math.Pow(10, 12)), nil}, "parse petabyte (with space, lowercase)": {"60 pb", uint64(60 * math.Pow(10, 15)), nil}, } { t.Run(desc, c.Assert) } } func TestFormatBytes(t *testing.T) { for desc, c := range map[string]*FormatBytesTestCase{ "format bytes": {uint64(1 * math.Pow(10, 0)), "1 B"}, "format kilobytes": {uint64(1 * math.Pow(10, 3)), "1.0 KB"}, "format megabytes": {uint64(1 * math.Pow(10, 6)), "1.0 MB"}, "format gigabytes": {uint64(1 * math.Pow(10, 9)), "1.0 GB"}, "format petabytes": {uint64(1 * math.Pow(10, 12)), "1.0 TB"}, "format terabytes": {uint64(1 * math.Pow(10, 15)), "1.0 PB"}, "format kilobytes under": {uint64(1.49 * math.Pow(10, 3)), "1.5 KB"}, "format megabytes under": {uint64(1.49 * math.Pow(10, 6)), "1.5 MB"}, "format gigabytes under": {uint64(1.49 * math.Pow(10, 9)), "1.5 GB"}, "format petabytes under": {uint64(1.49 * math.Pow(10, 12)), "1.5 TB"}, "format terabytes under": {uint64(1.49 * math.Pow(10, 15)), "1.5 PB"}, "format kilobytes over": {uint64(1.51 * math.Pow(10, 3)), "1.5 KB"}, "format megabytes over": {uint64(1.51 * math.Pow(10, 6)), "1.5 MB"}, "format gigabytes over": {uint64(1.51 * math.Pow(10, 9)), "1.5 GB"}, "format petabytes over": {uint64(1.51 * math.Pow(10, 12)), "1.5 TB"}, "format terabytes over": {uint64(1.51 * math.Pow(10, 15)), "1.5 PB"}, "format kilobytes exact": {uint64(1.3 * math.Pow(10, 3)), "1.3 KB"}, "format megabytes exact": {uint64(1.3 * math.Pow(10, 6)), "1.3 MB"}, "format gigabytes exact": {uint64(1.3 * math.Pow(10, 9)), "1.3 GB"}, "format petabytes exact": {uint64(1.3 * math.Pow(10, 12)), "1.3 TB"}, "format terabytes exact": {uint64(1.3 * math.Pow(10, 15)), "1.3 PB"}, } { t.Run(desc, c.Assert) } } func TestParseByteUnit(t *testing.T) { for desc, c := range map[string]*ParseByteUnitTestCase{ "parse byte": {"B", uint64(math.Pow(2, 0)), ""}, "parse kibibyte": {"KIB", uint64(math.Pow(2, 10)), ""}, "parse mebibyte": {"MIB", uint64(math.Pow(2, 20)), ""}, "parse gibibyte": {"GIB", uint64(math.Pow(2, 30)), ""}, "parse tebibyte": {"TIB", uint64(math.Pow(2, 40)), ""}, "parse pebibyte": {"PIB", uint64(math.Pow(2, 50)), ""}, "parse byte (lowercase)": {"b", uint64(math.Pow(2, 0)), ""}, "parse kibibyte (lowercase)": {"kib", uint64(math.Pow(2, 10)), ""}, "parse mebibyte (lowercase)": {"mib", uint64(math.Pow(2, 20)), ""}, "parse gibibyte (lowercase)": {"gib", uint64(math.Pow(2, 30)), ""}, "parse tebibyte (lowercase)": {"tib", uint64(math.Pow(2, 40)), ""}, "parse pebibyte (lowercase)": {"pib", uint64(math.Pow(2, 50)), ""}, "parse byte (with space)": {" B", uint64(math.Pow(2, 0)), ""}, "parse kibibyte (with space)": {" KIB", uint64(math.Pow(2, 10)), ""}, "parse mebibyte (with space)": {" MIB", uint64(math.Pow(2, 20)), ""}, "parse gibibyte (with space)": {" GIB", uint64(math.Pow(2, 30)), ""}, "parse tebibyte (with space)": {" TIB", uint64(math.Pow(2, 40)), ""}, "parse pebibyte (with space)": {" PIB", uint64(math.Pow(2, 50)), ""}, "parse byte (with space, lowercase)": {" b", uint64(math.Pow(2, 0)), ""}, "parse kibibyte (with space, lowercase)": {" kib", uint64(math.Pow(2, 10)), ""}, "parse mebibyte (with space, lowercase)": {" mib", uint64(math.Pow(2, 20)), ""}, "parse gibibyte (with space, lowercase)": {" gib", uint64(math.Pow(2, 30)), ""}, "parse tebibyte (with space, lowercase)": {" tib", uint64(math.Pow(2, 40)), ""}, "parse pebibyte (with space, lowercase)": {" pib", uint64(math.Pow(2, 50)), ""}, "parse kilobyte": {"KB", uint64(math.Pow(10, 3)), ""}, "parse megabyte": {"MB", uint64(math.Pow(10, 6)), ""}, "parse gigabyte": {"GB", uint64(math.Pow(10, 9)), ""}, "parse terabyte": {"TB", uint64(math.Pow(10, 12)), ""}, "parse petabyte": {"PB", uint64(math.Pow(10, 15)), ""}, "parse kilobyte (lowercase)": {"kb", uint64(math.Pow(10, 3)), ""}, "parse megabyte (lowercase)": {"mb", uint64(math.Pow(10, 6)), ""}, "parse gigabyte (lowercase)": {"gb", uint64(math.Pow(10, 9)), ""}, "parse terabyte (lowercase)": {"tb", uint64(math.Pow(10, 12)), ""}, "parse petabyte (lowercase)": {"pb", uint64(math.Pow(10, 15)), ""}, "parse kilobyte (with space)": {" KB", uint64(math.Pow(10, 3)), ""}, "parse megabyte (with space)": {" MB", uint64(math.Pow(10, 6)), ""}, "parse gigabyte (with space)": {" GB", uint64(math.Pow(10, 9)), ""}, "parse terabyte (with space)": {" TB", uint64(math.Pow(10, 12)), ""}, "parse petabyte (with space)": {" PB", uint64(math.Pow(10, 15)), ""}, "parse kilobyte (with space, lowercase)": {"kb", uint64(math.Pow(10, 3)), ""}, "parse megabyte (with space, lowercase)": {"mb", uint64(math.Pow(10, 6)), ""}, "parse gigabyte (with space, lowercase)": {"gb", uint64(math.Pow(10, 9)), ""}, "parse terabyte (with space, lowercase)": {"tb", uint64(math.Pow(10, 12)), ""}, "parse petabyte (with space, lowercase)": {"pb", uint64(math.Pow(10, 15)), ""}, "parse unknown unit": {"imag", 0, "unknown unit: \"imag\""}, } { t.Run(desc, c.Assert) } } func TestFormatBytesUnit(t *testing.T) { for desc, c := range map[string]*FormatBytesUnitTestCase{ "format bytes": {uint64(1 * math.Pow(10, 0)), humanize.Byte, "1"}, "format kilobytes": {uint64(1 * math.Pow(10, 3)), humanize.Byte, "1000"}, "format megabytes": {uint64(1 * math.Pow(10, 6)), humanize.Byte, "1000000"}, "format gigabytes": {uint64(1 * math.Pow(10, 9)), humanize.Byte, "1000000000"}, "format petabytes": {uint64(1 * math.Pow(10, 12)), humanize.Byte, "1000000000000"}, "format terabytes": {uint64(1 * math.Pow(10, 15)), humanize.Byte, "1000000000000000"}, "format kilobytes under": {uint64(1.49 * math.Pow(10, 3)), humanize.Byte, "1490"}, "format megabytes under": {uint64(1.49 * math.Pow(10, 6)), humanize.Byte, "1490000"}, "format gigabytes under": {uint64(1.49 * math.Pow(10, 9)), humanize.Byte, "1490000000"}, "format petabytes under": {uint64(1.49 * math.Pow(10, 12)), humanize.Byte, "1490000000000"}, "format terabytes under": {uint64(1.49 * math.Pow(10, 15)), humanize.Byte, "1490000000000000"}, "format kilobytes over": {uint64(1.51 * math.Pow(10, 3)), humanize.Byte, "1510"}, "format megabytes over": {uint64(1.51 * math.Pow(10, 6)), humanize.Byte, "1510000"}, "format gigabytes over": {uint64(1.51 * math.Pow(10, 9)), humanize.Byte, "1510000000"}, "format petabytes over": {uint64(1.51 * math.Pow(10, 12)), humanize.Byte, "1510000000000"}, "format terabytes over": {uint64(1.51 * math.Pow(10, 15)), humanize.Byte, "1510000000000000"}, } { t.Run(desc, c.Assert) } }