text
stringlengths 11
4.05M
|
|---|
package state
import (
"github.com/darkliquid/go-ircevent"
"github.com/darkliquid/leader1/config"
"sync"
)
type StateTracker struct {
channels map[string]*Channel
nicks map[string]*Nick
conn *irc.Connection
mutex sync.Mutex
cfg *config.Settings
}
func New(cfg *config.Settings, conn *irc.Connection) *StateTracker {
state := &StateTracker{
channels: make(map[string]*Channel),
nicks: make(map[string]*Nick),
conn: conn,
cfg: cfg,
}
state.nicks[cfg.Irc.Nick] = &Nick{
Nick: cfg.Irc.Nick,
Channels: make(map[string]*ChannelPrivileges),
}
return state
}
|
package sem
import (
"errors"
"fmt"
"net/smtp"
"strings"
)
// HOST smtp地址及端口
const HOST = "smtp.163.com:25"
// SendMail 网易暂不支持通过API发送邮件,网易163邮箱发送邮件的逻辑函数
func SendMail(user, password, host, to, subject, body, mailtype string) error {
hp := strings.Split(host, ":")
auth := smtp.PlainAuth("", user, password, hp[0])
var contentType string
if mailtype == "html" {
contentType = "Content-Type: text/" + mailtype + "; charset=UTF-8"
} else {
contentType = "Content-Type: text/plain" + "; charset=UTF-8"
}
msg := []byte("To: " + to + "\r\nFrom: " + user + "<" + user + ">\r\nSubject: " + subject + "\r\n" + contentType + "\r\n\r\n" + body)
sendTo := strings.Split(to, ";")
err := smtp.SendMail(host, auth, user, sendTo, msg)
return err
}
// NetEaseSEMClient 网易邮件Client
type NetEaseSEMClient struct {
USER string
PASSWD string
}
// NewNetEaseSEMClient 生成SEMClient
func NewNetEaseSEMClient(user, password string) (*NetEaseSEMClient, error) {
if user == "" {
return nil, errors.New("NewNetEase User should be not empty")
}
if password == "" {
return nil, errors.New("NewNetEase Password should be not empty")
}
return &NetEaseSEMClient{
USER: user,
PASSWD: password,
}, nil
}
// SendEmail 网易163发送频率不可以太高,否则会被判断为垃圾邮件无法发出
func (client *NetEaseSEMClient) SendEmail(toAddress string, templateAction TemplateAction, language TemplateLanguage, templateParam map[string]string) (bool, error) {
replaces := getParams(templateAction, language)
if !strings.Contains(toAddress, ";") {
body := fmt.Sprintf(getLanguageHTML(language), toAddress, replaces[0], replaces[1], convertTemplateParam(templateParam))
err := SendMail(client.USER, client.PASSWD, HOST, toAddress, replaces[3], body, "html")
if err != nil {
return false, err
}
return true, nil
}
for _, v := range strings.Split(toAddress, ";") {
body := fmt.Sprintf(getLanguageHTML(language), v, replaces[0], replaces[1], convertTemplateParam(templateParam))
err := SendMail(client.USER, client.PASSWD, HOST, v, replaces[3], body, "html")
if err != nil {
return false, err
}
}
return true, nil
}
|
package micro
import (
"mix/plugins/mysql"
)
func MakeEntityMethods(renderService *RenderService, entity *mysql.Entity) {
renderEntity := ToRenderEntity(entity)
renderService.Entities = append(renderService.Entities, renderEntity)
renderMethods := make([]*RenderMethod, 0)
renderMethods = append(renderMethods, makeCreateSqlMap(entity))
renderMethods = append(renderMethods, makeGetSqlMap(entity, mysql.IndexTypePrimary, entity.Primary, "", "", false)...)
renderMethods = append(renderMethods, makeGetSqlMap(entity, "", "", "", "", true)...)
renderMethods = append(renderMethods, makeDeleteSqlMap(entity, mysql.IndexTypePrimary, entity.Primary, "", "", false)...)
renderMethods = append(renderMethods, makeUpdateSqlMap(entity, mysql.IndexTypePrimary, entity.Primary, "", "", false)...)
renderMethods = append(renderMethods, makeQuerySqlMap(entity)...)
for _, v := range entity.Subject {
renderMethods = append(renderMethods, makeGetSqlMap(entity, v.Slug, entity.Primary, "", v.Field, false)...)
renderMethods = append(renderMethods, makeUpdateSqlMap(entity, v.Slug, entity.Primary, "", v.Field, false)...)
renderMethods = append(renderMethods, makeDeleteSqlMap(entity, v.Slug, entity.Primary, "", v.Field, false)...)
}
for _, v := range entity.Filter {
renderMethods = append(renderMethods, makeGetSqlMap(entity, "", "", v.Slug, v.Field, true)...)
renderMethods = append(renderMethods, makeUpdateSqlMap(entity, "", "", v.Slug, v.Field, true)...)
renderMethods = append(renderMethods, makeDeleteSqlMap(entity, "", "", v.Slug, v.Field, true)...)
}
for _, v := range entity.Index {
renderMethods = append(renderMethods, makeGetSqlMap(entity, "", "", v.Name, v.Fields, !v.IsUnique())...)
renderMethods = append(renderMethods, makeUpdateSqlMap(entity, "", "", v.Name, v.Fields, !v.IsUnique())...)
renderMethods = append(renderMethods, makeDeleteSqlMap(entity, "", "", v.Name, v.Fields, !v.IsUnique())...)
}
for _, v1 := range entity.Subject {
renderMethods = append(renderMethods, makeGetSqlMap(entity, v1.Slug, v1.Field, "", "", true)...)
for _, v2 := range entity.Filter {
renderMethods = append(renderMethods, makeGetSqlMap(entity, v1.Slug, v1.Field, v2.Slug, v2.Field, true)...)
renderMethods = append(renderMethods, makeUpdateSqlMap(entity, v1.Slug, v1.Field, v2.Slug, v2.Field, true)...)
renderMethods = append(renderMethods, makeDeleteSqlMap(entity, v1.Slug, v1.Field, v2.Slug, v2.Field, true)...)
}
for _, v := range entity.Index {
renderMethods = append(renderMethods, makeGetSqlMap(entity, v1.Slug, v.Fields, v.Name, v1.Field, !v.IsUnique())...)
renderMethods = append(renderMethods, makeUpdateSqlMap(entity, v1.Slug, v.Fields, v.Name, v1.Field, !v.IsUnique())...)
renderMethods = append(renderMethods, makeDeleteSqlMap(entity, v1.Slug, v.Fields, v.Name, v1.Field, !v.IsUnique())...)
}
}
for _, base := range renderMethods {
local := renderService.GetMethod(base.Slug)
if local == nil {
renderService.Methods = append(renderService.Methods, base)
}
}
for _, v := range renderService.Methods {
v.SetIOName()
}
return
}
|
package metric
//
type TcpRow struct {
LocalAddr string
LocalPort uint16
RemoteAddr string
RemotePort uint16
State int
}
//
type TcpTable struct {
Table []TcpRow
}
// 获取当前被监听的tcp端口号
func (p *TcpTable) GetActivePorts() []uint16 {
var ports []uint16
if len(p.Table) == 0 {
return ports
}
var tports = make(map[uint16]string)
for _, v := range p.Table {
tports[v.LocalPort] = " "
}
for k, _ := range tports {
ports = append(ports, k)
}
return ports
}
|
package control
import (
. "../config"
"sort"
)
func add_new_peer_to_elevlist(id string) {
var empty_queue [2][N_floors]int
var empty_ack_list [2][N_floors]int
for j := 0; j < 2; j++ {
for k := 0; k < N_floors; k++ {
empty_queue[j][k] = 0
empty_ack_list[j][k] = 0
}
}
new_empty_peer := elevator_states{destination: empty_order, last_known_floor: -1, dir: MD_Stop, state: IDLE, queue: empty_queue, ack_list: empty_ack_list}
elev_list[id] = &new_empty_peer
if len(elev_list) > 1 {
single_mode = false
}
}
func set_value_in_ack_list(value int, order Order) {
if order.Floor == empty_order.Floor {
return
}
bt_type := 0
if order.Button == BT_HallDown {
bt_type = 1
}
outgoing_msg.Ack_list[bt_type][order.Floor] = value
}
func update_local_elevator_struct(elevator Elevator) {
//Updates its own elevator_struct in elevator_list
(*elev_list[elevID]).destination = elevator.Destination
(*elev_list[elevID]).last_known_floor = elevator.Last_known_floor
(*elev_list[elevID]).state = elevator.State
(*elev_list[elevID]).dir = elevator.Dir
(*elev_list[elevID]).ack_list = outgoing_msg.Ack_list
}
func update_outgoing_msg(elevator Elevator) {
outgoing_msg.Destination = elevator.Destination
outgoing_msg.Last_known_floor = elevator.Last_known_floor
outgoing_msg.State = elevator.State
outgoing_msg.Dir = elevator.Dir
}
func update_extern_elevator_struct(elevator Msg_struct) {
//Update elevator_list from msg
if elev_list[elevator.ID] == nil {
return
}
(*elev_list[elevator.ID]).destination = elevator.Destination
(*elev_list[elevator.ID]).last_known_floor = elevator.Last_known_floor
(*elev_list[elevator.ID]).state = elevator.State
(*elev_list[elevator.ID]).dir = elevator.Dir
(*elev_list[elevator.ID]).ack_list = elevator.Ack_list
}
func cost_function(id string, order Order) int {
cost := 0
//Make sure no elev with powerloss gets assigned
if (*elev_list[id]).state == POWERLOSS {
cost += 100
}
if ((*elev_list[id]).state == IDLE || (*elev_list[id]).state == DOOROPEN) && (*elev_list[id]).last_known_floor == order.Floor {
cost -= 15
}
//Order already in list
if (*elev_list[id]).destination.Floor == order.Floor {
cost -= 15
}
for i := 0; i < 2; i++ {
if (*elev_list[id]).queue[i][order.Floor] == 1 {
cost -= 15
}
}
if order.Button == BT_HallUp { //Order is up
if (*elev_list[id]).last_known_floor < order.Floor && (*elev_list[id]).destination.Floor > order.Floor { //going up and flor is between:
cost -= 10
}
} else { //Order is down
if (*elev_list[id]).last_known_floor > order.Floor && (*elev_list[id]).destination.Floor < order.Floor && (*elev_list[id]).destination.Floor != empty_order.Floor{ //going down and floor is between orders
cost -= 10
}
}
if (*elev_list[id]).state == IDLE && (*elev_list[id]).destination.Floor == empty_order.Floor { //Nothing to do
cost -= 5
}
//Adding the value of the distance
if order.Floor > (*elev_list[id]).last_known_floor {
cost += order.Floor - (*elev_list[id]).last_known_floor
} else {
cost += (*elev_list[id]).last_known_floor - order.Floor
}
return cost
}
func add_order_to_elevlist(assigned_id string, order Order) {
bt_type := 0
if order.Button == BT_HallDown {
bt_type = 1
}
(*elev_list[assigned_id]).queue[bt_type][order.Floor] = 1
}
func getLowestCostElevatorID(order Order) string {
lowestCost := N_floors
assignedID := ""
//Sorting the IDs to make sure that the cost function give the same result when runned on different elevators
var keys []string
for k:= range elev_list{
keys = append(keys,k)
}
sort.Strings(keys)
for i:=0; i<len(keys);i++ {
cost := cost_function(keys[i], order)
if cost < lowestCost {
lowestCost = cost
assignedID = keys[i]
}
}
return assignedID
}
//The order acknowledging system is implemented in this function
func synchronize (inc_msg Msg_struct, illuminate_extern_order_ch chan<- Order, extern_order_ch chan<- Order){
for i := 0; i < 2; i++ {
for j := 0; j < N_floors; j++ {
switch inc_msg.Ack_list[i][j] {
case 0:
if outgoing_msg.Ack_list[i][j] == -1 && (*elev_list[inc_msg.ID]).ack_list[i][j] == -1{
bt_type := BT_HallUp
if i == 1 {
bt_type = BT_HallDown
}
order := Order{Button: bt_type, Floor: j}
assignedID := getLowestCostElevatorID(order)
add_order_to_elevlist(assignedID, order)
if assignedID == elevID {
go func() { extern_order_ch <- order }()
} else {
go func() { illuminate_extern_order_ch <- order }()
}
outgoing_msg.Ack_list[i][j] = 0
}
case 1:
if outgoing_msg.Ack_list[i][j] == 0 {
outgoing_msg.Ack_list[i][j] = 1
} else if outgoing_msg.Ack_list[i][j] == 1 {
bt_type := BT_HallUp
if i == 1 {
bt_type = BT_HallDown
}
order := Order{Button: bt_type, Floor: j}
assignedID := getLowestCostElevatorID(order)
if assignedID == elevID {
outgoing_msg.Ack_list[i][j] = -1
}
}
case -1:
if outgoing_msg.Ack_list[i][j] == 1 {
outgoing_msg.Ack_list[i][j] = -1
} else if outgoing_msg.Ack_list[i][j] == -1 {
bt_type := BT_HallUp
if i == 1 {
bt_type = BT_HallDown
}
order := Order{Button: bt_type, Floor: j}
assignedID := getLowestCostElevatorID(order)
if assignedID == inc_msg.ID {
outgoing_msg.Ack_list[i][j] = 0
add_order_to_elevlist(assignedID, order)
go func() { illuminate_extern_order_ch <- order }()
}
}
}
}
}
}
func handling_powerloss(inc_msg Msg_struct){
if inc_msg.State == POWERLOSS {
for i := 0; i < 2; i++ {
for j := 0; j < N_floors; j++ {
//Delete order in elevators queue if elevator has powerloss
if (*elev_list[inc_msg.ID]).queue[i][j] == 1 && outgoing_msg.Ack_list[i][j] != -1 {
outgoing_msg.Ack_list[i][j] = 1
(*elev_list[inc_msg.ID]).queue[i][j] = 0
}
}
}
}
if (*elev_list[elevID]).state == POWERLOSS {
for i := 0; i < 2; i++ {
for j := 0; j < N_floors; j++ {
//Delete orders in own queue if poerloss
if (*elev_list[elevID]).queue[i][j] == 1 && (inc_msg.Ack_list[i][j] == 1 || inc_msg.Ack_list[i][j] == -1) {
(*elev_list[elevID]).queue[i][j] = 0
}
}
}
}
}
func delete_order_if_handled(id string, clear_lights_and_extern_orders_ch chan<- int) {
if (*elev_list[id]).state == DOOROPEN {
if id != elevID{
go func() { clear_lights_and_extern_orders_ch <- (*elev_list[id]).last_known_floor}()
}
(*elev_list[id]).queue[0][(*elev_list[id]).last_known_floor] = 0
(*elev_list[id]).queue[1][(*elev_list[id]).last_known_floor] = 0
}
}
func lost_peer_event (lost_peers []string){
for i := 0; i < len(lost_peers); i++ {
//Copy orders before deleting elevator
for j := 0; j < 2; j++ {
for k := 0; k < N_floors; k++ {
if (*elev_list[lost_peers[i]]).queue[j][k] == 1 && outgoing_msg.Ack_list[j][k] != -1 {
outgoing_msg.Ack_list[j][k] = 1
(*elev_list[lost_peers[i]]).queue[j][k] = 0
}
}
}
if lost_peers[i] != elevID{
delete(elev_list, lost_peers[i])
}
}
if len(elev_list) == 1 {
single_mode = true
}
}
|
package main
import "fmt"
type MyInt int
func (i MyInt) String() string {
return "myint"
}
func main() {
fmt.Printf("%s\n", MyInt(1))
}
|
package day09
import (
"strconv"
"../utils"
)
var input, _ = utils.ReadFile("day09/input.txt")
var preamble = 25
func isValid(val int, list []int) bool {
for i := 0; i < len(list); i++ {
for j := i + 1; j < len(list); j++ {
if list[i]+list[j] == val {
return true
}
}
}
return false
}
// ParseLines parses file input
func ParseLines(input []string) []int {
var data []int
for _, line := range input {
v, _ := strconv.Atoi(line)
data = append(data, v)
}
return data
}
// Solve1 returns answer to first problem
func Solve1() int {
xmas := ParseLines(input)
for i := preamble; i < len(xmas); i++ {
val := xmas[i]
var list []int
for j := i - preamble; j < i; j++ {
list = append(list, xmas[j])
}
if !isValid(val, list) {
return xmas[i]
}
}
return 0
}
// Solve2 returns answer to second problem
func Solve2() int {
xmas := ParseLines(input)
val := Solve1()
for i := 0; i < len(xmas)-1; i++ {
max := xmas[i]
min := xmas[i]
acc := xmas[i]
for j := i + 1; j < len(xmas); j++ {
if xmas[j] < min {
min = xmas[j]
}
if xmas[j] > max {
max = xmas[j]
}
acc += xmas[j]
switch {
case acc == val:
return min + max
case acc > val:
break
}
}
}
return 0
}
|
/*
There are many different styles of music and many albums exhibit multiple styles. Create a function that takes an array of musical styles from albums and returns how many styles are unique.
Examples
uniqueStyles([
"Dub, Dancehall",
"Industrial, Heavy Metal",
"Techno, Dubstep",
"Synth-pop, Euro-Disco",
"Industrial, Techno, Minimal"
]) ➞ 9
uniqueStyles([
"Soul",
"House, Folk",
"Trance, Downtempo, Big Beat, House",
"Deep House",
"Soul"
]) ➞ 7
*/
package main
import (
"strings"
)
func main() {
assert(uniquestyles([]string{
"Dub,Dancehall",
"Industrial,Heavy Metal",
"Techno,Dubstep",
"Synth-pop,Euro-Disco",
"Industrial,Techno,Minimal",
}) == 9)
assert(uniquestyles([]string{
"Soul",
"House,Folk",
"Trance,Downtempo,Big Beat,House",
"Deep House",
"Soul",
}) == 7)
assert(uniquestyles([]string{
"Black Metal,Avantgarde",
"Funk",
"Deep House,House",
"Big Band",
"Punk",
}) == 7)
assert(uniquestyles([]string{
"Funk",
"Funk",
"Funk",
"Funk",
"Funk",
}) == 1)
}
func uniquestyles(s []string) int {
m := make(map[string]bool)
c := 0
for _, s := range s {
t := strings.Split(s, ",")
for _, t := range t {
t = strings.ToLower(t)
t = strings.TrimSpace(t)
if !m[t] {
m[t], c = true, c+1
}
}
}
return c
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
|
package elements
import (
"fmt"
"sort"
"strconv"
"github.com/Nv7-Github/Nv7Haven/eod/types"
)
func (b *Elements) FoundCmd(elem string, m types.Msg, rsp types.Rsp) {
b.lock.RLock()
dat, exists := b.dat[m.GuildID]
b.lock.RUnlock()
if !exists {
return
}
rsp.Acknowledge()
el, res := dat.GetElement(elem)
if !res.Exists {
rsp.ErrorMessage(res.Message)
return
}
items := make(map[string]types.Empty)
for _, inv := range dat.Inventories {
if inv.Elements.Contains(el.Name) {
items[inv.User] = types.Empty{}
}
}
out := make([]string, len(items))
i := 0
for k := range items {
out[i] = k
i++
}
sort.Slice(out, func(i, j int) bool {
int1, err1 := strconv.Atoi(out[i])
int2, err2 := strconv.Atoi(out[j])
if err1 != nil && err2 != nil {
return int1 < int2
}
return out[i] < out[j]
})
for i, v := range out {
out[i] = fmt.Sprintf("<@%s>", v)
}
b.base.NewPageSwitcher(types.PageSwitcher{
Kind: types.PageSwitchInv,
Title: fmt.Sprintf("%s Found (%d)", el.Name, len(out)),
PageGetter: b.base.InvPageGetter,
Items: out,
User: m.Author.ID,
}, m, rsp)
}
|
package mysql
import (
"github.com/jinzhu/gorm"
"github.com/smilga/analyzer/api"
)
type FeatureStore struct {
DB *gorm.DB
}
func (s *FeatureStore) All() ([]*api.Feature, error) {
fs := []*api.Feature{}
err := s.DB.Find(&fs).Error
if err != nil {
return nil, err
}
return fs, nil
}
func (s *FeatureStore) Get(id api.FeatureID) (*api.Feature, error) {
f := &api.Feature{}
err := s.DB.First(&f, id).Error
if err != nil {
return nil, err
}
return f, nil
}
func (s *FeatureStore) Save(f *api.Feature) error {
return s.DB.Create(f).Error
}
func NewFeatureStore(DB *gorm.DB) *FeatureStore {
return &FeatureStore{DB}
}
|
package aoc2015
import (
"crypto/md5"
"fmt"
"strconv"
)
// checkHash checks the first n nibbles (half-octets) of the MD5 of Augend+string(Addend)
// and returns true if they are all zero.
func checkHash(n int, augend string, addend int) bool {
hash := md5.Sum([]byte(augend + strconv.Itoa(addend)))
// This is where counting gets weird.
// Note that hash is a byte array of length 16,
// and a nibble is half a byte, thus having 32 of them...
// Let us consider the ii'th nibble as the LSBs and MSBs of hash[ii/2].
for ii := 0; ii < n; ii++ {
concern := hash[ii/2]
if ii%2 == 0 { // MSBs
if concern&0xF0 != 0 {
return false
}
} else { // LSBs
if concern&0x0F != 0 {
return false
}
}
}
// once we know first n bits is zero...
return true
}
// Day04 solves the fourth day puzzle "The Ideal Stocking Stuffer".
//
// Input
//
// A string of length 8 containing only the lowercase
// letters 'a' to 'z'. For example:
//
// iwrupvqb
//
// Because this is more or less a brute-force solution,
// it may take a long time before returning an answer.
func Day04(input string) (answer1, answer2 string, err error) {
// suppose we have an addends channel
// which contains all the possible addends for input
// i.e., input+<-addned
processes := 8 // how many processes in parallel?
addends := func() chan int {
result := make(chan int, processes*2)
go func() {
moar := 1
// add moar to addend channel
for {
result <- moar
moar++
}
}()
return result
}
resultFive, resultSix := make(chan int), make(chan int) // result for 5 and 6 zeroes
addendFive, addendSix := addends(), addends()
foundFive, foundSix := make(chan struct{}), make(chan struct{}) // will close once results are found
evaluateFive := func() {
for {
addend := <-addendFive
select {
case <-foundFive:
return
default:
if checkHash(5, input, addend) {
resultFive <- addend
close(foundFive) // will always send default
return
}
}
}
}
evaluateSix := func() {
for {
addend := <-addendSix
select {
case <-foundSix:
return
default:
if checkHash(6, input, addend) {
resultSix <- addend
close(foundSix) // will always send default
return
}
}
}
}
for ii := 0; ii < processes; ii++ {
go evaluateFive()
go evaluateSix()
}
answer1 = strconv.Itoa(<-resultFive)
answer2 = strconv.Itoa(<-resultSix)
return
}
// Day04ST solves the fourth day puzzle "The Ideal Stocking Stuffer"
// but is a single-threaded solution.
func Day04ST(input string) (answer1, answer2 string, err error) {
addend := 1 // to be appended to input
foundFive := false // have we found five zeroes?
foundSix := false // have we found six zeroes?
for {
builtString := []byte(fmt.Sprintf("%v%v", input, addend))
if !foundFive {
firstFive := fmt.Sprintf("%x", md5.Sum(builtString))[:5]
if firstFive == "00000" {
foundFive = true
answer1 = strconv.Itoa(addend)
}
}
if !foundSix {
firstSix := fmt.Sprintf("%x", md5.Sum(builtString))[:6]
if firstSix == "000000" {
foundSix = true
answer2 = strconv.Itoa(addend)
}
}
if foundFive && foundSix {
break
}
addend++
}
return
}
|
package main
import (
"fmt"
"os"
"os/signal"
"path/filepath"
"sync"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/contrib/apparmor"
"github.com/containerd/containerd/defaults"
gocni "github.com/containerd/go-cni"
"github.com/crosbymichael/boss/config"
"github.com/crosbymichael/boss/monitor"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/urfave/cli"
"golang.org/x/sys/unix"
)
var agentCommand = cli.Command{
Name: "agent",
Usage: "run the boss agent for restarting services",
Flags: []cli.Flag{
cli.DurationFlag{
Name: "interval,i",
Usage: "set the interval to reconcile state",
Value: 10 * time.Second,
},
cli.StringSliceFlag{
Name: "nameservers,n",
Usage: "set the boss nameservers",
Value: &cli.StringSlice{
"8.8.8.8",
"8.8.4.4",
},
},
},
Before: func(clix *cli.Context) error {
f, err := os.Create(filepath.Join(config.Root, "resolv.conf"))
if err != nil {
return err
}
defer f.Close()
for _, ns := range clix.StringSlice("nameservers") {
if _, err := f.WriteString(fmt.Sprintf("nameserver %s\n", ns)); err != nil {
return err
}
}
return nil
},
Action: func(clix *cli.Context) error {
signals := make(chan os.Signal, 64)
signal.Notify(signals, unix.SIGTERM, unix.SIGINT)
// generate defalt profile
if err := apparmor.WithDefaultProfile("boss")(nil, nil, nil, &specs.Spec{
Process: &specs.Process{},
}); err != nil {
return err
}
client, err := containerd.New(
defaults.DefaultAddress,
containerd.WithDefaultRuntime("io.containerd.runc.v1"),
)
if err != nil {
return err
}
defer client.Close()
networks := make(map[config.NetworkType]monitor.Network)
networks[config.Host] = &host{}
networks[config.None] = &none{}
if networking, err := gocni.New(gocni.WithPluginDir([]string{"/opt/containerd/bin"}), gocni.WithDefaultConf); err == nil {
networks[config.CNI] = &cni{network: networking}
}
m := monitor.New(client, register, networks)
var once sync.Once
go func() {
for s := range signals {
switch s {
case unix.SIGTERM:
once.Do(m.Shutdown)
case unix.SIGINT:
once.Do(func() {
m.Stop()
})
}
}
}()
if err := m.Attach(); err != nil {
return err
}
m.Run(clix.Duration("interval"))
return nil
},
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cli
import (
"bytes"
"context"
"fmt"
"strings"
"github.com/gosuri/uitable"
"github.com/oam-dev/cluster-gateway/pkg/generated/clientset/versioned"
"github.com/pkg/errors"
"github.com/spf13/cobra"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
apiregistrationV1beta "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"
apiregistration "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1"
metrics "k8s.io/metrics/pkg/client/clientset/versioned"
"sigs.k8s.io/yaml"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/utils/common"
)
const (
// FlagSpecify specifies the deployment name
FlagSpecify = "specify"
// FlagOutputFormat specifies the output format. One of: (wide | yaml)
FlagOutputFormat = "output"
// APIServiceName is the name of APIService
APIServiceName = "v1alpha1.cluster.core.oam.dev"
// UnknownMetric represent that we can't compute the metric data
UnknownMetric = "N/A"
)
// NewSystemCommand print system detail info
func NewSystemCommand(c common.Args, order string) *cobra.Command {
cmd := &cobra.Command{
Use: "system",
Short: "Manage system.",
Long: "Manage system, including printing the system deployment information in vela-system namespace and diagnosing the system's health.",
Example: "# Check all deployments information in all namespaces with label app.kubernetes.io/name=vela-core :\n" +
"> vela system info\n" +
"# Specify a deployment name with a namespace to check detail information:\n" +
"> vela system info -s kubevela-vela-core -n vela-system\n" +
"# Diagnose the system's health:\n" +
"> vela system diagnose\n",
Annotations: map[string]string{
types.TagCommandType: types.TypeSystem,
types.TagCommandOrder: order,
},
}
cmd.AddCommand(
NewSystemInfoCommand(c),
NewSystemDiagnoseCommand(c))
return cmd
}
// NewSystemInfoCommand prints system detail info
func NewSystemInfoCommand(c common.Args) *cobra.Command {
cmd := &cobra.Command{
Use: "info",
Short: "Print the system deployment detail information in all namespaces with label app.kubernetes.io/name=vela-core.",
Long: "Print the system deployment detail information in all namespaces with label app.kubernetes.io/name=vela-core.",
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
// Get deploymentName from flag
deployName, err := cmd.Flags().GetString(FlagSpecify)
if err != nil {
return errors.Wrapf(err, "failed to get deployment name flag")
}
// Get output format from flag
outputFormat, err := cmd.Flags().GetString(FlagOutputFormat)
if err != nil {
return errors.Wrapf(err, "failed to get output format flag")
}
if outputFormat != "" {
outputFormatOptions := map[string]struct{}{
"wide": {},
"yaml": {},
}
if _, exist := outputFormatOptions[outputFormat]; !exist {
return errors.Errorf("Outputformat must in wide | yaml !")
}
}
// Get kube config
if outputFormat != "" {
outputFormatOptions := map[string]struct{}{
"wide": {},
"yaml": {},
}
if _, exist := outputFormatOptions[outputFormat]; !exist {
return errors.Errorf("Outputformat must in wide | yaml !")
}
}
// Get kube config
config, err := c.GetConfig()
if err != nil {
return err
}
// Get clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return err
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Get deploymentsClient in all namespace
deployments, err := clientset.AppsV1().Deployments(metav1.NamespaceAll).List(
ctx,
metav1.ListOptions{
LabelSelector: "app.kubernetes.io/name=vela-core",
},
)
if err != nil {
return err
}
if deployName != "" {
// DeployName is not empty, print the specified deployment's information
found := false
for _, deployment := range deployments.Items {
if deployment.Name == deployName {
table := SpecifiedFormatPrinter(deployment)
cmd.Println(table.String())
found = true
break
}
}
if !found {
return errors.Errorf("deployment \"%s\" not found", deployName)
}
} else {
// Get metrics clientset
mc, err := metrics.NewForConfig(config)
if err != nil {
return err
}
switch outputFormat {
case "":
table, err := NormalFormatPrinter(ctx, deployments, mc)
if err != nil {
return err
}
cmd.Println(table.String())
case "wide":
table, err := WideFormatPrinter(ctx, deployments, mc)
if err != nil {
return err
}
cmd.Println(table.String())
case "yaml":
str, err := YamlFormatPrinter(deployments)
if err != nil {
return err
}
cmd.Println(str)
}
}
return nil
},
Annotations: map[string]string{
types.TagCommandType: types.TypeSystem,
},
}
cmd.Flags().StringP(FlagSpecify, "s", "", "Specify the name of the deployment to check detail information. If empty, it will print all deployments information. Default to be empty.")
cmd.Flags().StringP(FlagOutputFormat, "o", "", "Specifies the output format. One of: (wide | yaml)")
return cmd
}
// SpecifiedFormatPrinter prints the specified deployment's information
func SpecifiedFormatPrinter(deployment v1.Deployment) *uitable.Table {
table := newUITable().
AddRow("Name:", deployment.Name).
AddRow("Namespace:", deployment.Namespace).
AddRow("CreationTimestamp:", deployment.CreationTimestamp).
AddRow("Labels:", Map2Str(deployment.Labels)).
AddRow("Annotations:", Map2Str(deployment.Annotations)).
AddRow("Selector:", Map2Str(deployment.Spec.Selector.MatchLabels)).
AddRow("Image:", deployment.Spec.Template.Spec.Containers[0].Image).
AddRow("Args:", strings.Join(deployment.Spec.Template.Spec.Containers[0].Args, "\n")).
AddRow("Envs:", GetEnvVariable(deployment.Spec.Template.Spec.Containers[0].Env)).
AddRow("Limits:", CPUMem(deployment.Spec.Template.Spec.Containers[0].Resources.Limits)).
AddRow("Requests:", CPUMem(deployment.Spec.Template.Spec.Containers[0].Resources.Requests))
table.MaxColWidth = 120
return table
}
// CPUMem returns the upsage of cpu and memory
func CPUMem(resourceList corev1.ResourceList) string {
b := new(bytes.Buffer)
fmt.Fprintf(b, "cpu=%s\n", resourceList.Cpu())
fmt.Fprintf(b, "memory=%s", resourceList.Memory())
return b.String()
}
// Map2Str converts map to string
func Map2Str(m map[string]string) string {
b := new(bytes.Buffer)
for key, value := range m {
fmt.Fprintf(b, "%s=%s\n", key, value)
}
if len(b.String()) > 1 {
return b.String()[:len(b.String())-1]
}
return b.String()
}
// NormalFormatPrinter prints information in format of normal
func NormalFormatPrinter(ctx context.Context, deployments *v1.DeploymentList, mc *metrics.Clientset) (*uitable.Table, error) {
table := newUITable().AddRow("NAME", "NAMESPACE", "READY PODS", "IMAGE", "CPU(cores)", "MEMORY(bytes)")
cpuMetricMap, memMetricMap := ComputeMetricByDeploymentName(ctx, deployments, mc)
for _, deploy := range deployments.Items {
table.AddRow(
deploy.Name,
deploy.Namespace,
fmt.Sprintf("%d/%d", deploy.Status.ReadyReplicas, deploy.Status.Replicas),
deploy.Spec.Template.Spec.Containers[0].Image,
cpuMetricMap[deploy.Name],
memMetricMap[deploy.Name],
)
}
return table, nil
}
// WideFormatPrinter prints information in format of wide
func WideFormatPrinter(ctx context.Context, deployments *v1.DeploymentList, mc *metrics.Clientset) (*uitable.Table, error) {
table := newUITable().AddRow("NAME", "NAMESPACE", "READY PODS", "IMAGE", "CPU(cores)", "MEMORY(bytes)", "ARGS", "ENVS")
table.MaxColWidth = 100
cpuMetricMap, memMetricMap := ComputeMetricByDeploymentName(ctx, deployments, mc)
for _, deploy := range deployments.Items {
table.AddRow(
deploy.Name,
deploy.Namespace,
fmt.Sprintf("%d/%d", deploy.Status.ReadyReplicas, deploy.Status.Replicas),
deploy.Spec.Template.Spec.Containers[0].Image,
cpuMetricMap[deploy.Name],
memMetricMap[deploy.Name],
strings.Join(deploy.Spec.Template.Spec.Containers[0].Args, " "),
limitStringLength(GetEnvVariable(deploy.Spec.Template.Spec.Containers[0].Env), 180),
)
}
return table, nil
}
// YamlFormatPrinter prints information in format of yaml
func YamlFormatPrinter(deployments *v1.DeploymentList) (string, error) {
str := ""
for _, deployment := range deployments.Items {
// Set ManagedFields to nil because it's too long to read
deployment.ManagedFields = nil
deploymentYaml, err := yaml.Marshal(deployment)
if err != nil {
return "", err
}
str += string(deploymentYaml)
}
return str, nil
}
// ComputeMetricByDeploymentName computes cpu and memory metric of deployment
func ComputeMetricByDeploymentName(ctx context.Context, deployments *v1.DeploymentList, mc *metrics.Clientset) (cpuMetricMap, memMetricMap map[string]string) {
cpuMetricMap = make(map[string]string)
memMetricMap = make(map[string]string)
podMetricsList, err := mc.MetricsV1beta1().PodMetricses(metav1.NamespaceAll).List(
ctx,
metav1.ListOptions{},
)
if err != nil {
for _, deploy := range deployments.Items {
cpuMetricMap[deploy.Name] = UnknownMetric
memMetricMap[deploy.Name] = UnknownMetric
}
return
}
for _, deploy := range deployments.Items {
cpuUsage, memUsage := int64(0), int64(0)
for _, pod := range podMetricsList.Items {
if strings.HasPrefix(pod.Name, deploy.Name) {
for _, container := range pod.Containers {
cpuUsage += container.Usage.Cpu().MilliValue()
memUsage += container.Usage.Memory().Value() / (1024 * 1024)
}
}
}
cpuMetricMap[deploy.Name] = fmt.Sprintf("%dm", cpuUsage)
memMetricMap[deploy.Name] = fmt.Sprintf("%dMi", memUsage)
}
return
}
// GetEnvVariable gets the environment variables
func GetEnvVariable(envList []corev1.EnvVar) (envStr string) {
for _, env := range envList {
envStr += fmt.Sprintf("%s=%s ", env.Name, env.Value)
}
if len(envStr) == 0 {
return "-"
}
return
}
// NewSystemDiagnoseCommand create command to help user to diagnose system's health
func NewSystemDiagnoseCommand(c common.Args) *cobra.Command {
cmd := &cobra.Command{
Use: "diagnose",
Short: "Diagnoses system problems.",
Long: "Diagnoses system problems.",
RunE: func(cmd *cobra.Command, args []string) error {
// Diagnose clusters' health
fmt.Println("------------------------------------------------------")
fmt.Println("Diagnosing health of clusters...")
k8sClient, err := c.GetClient()
if err != nil {
return errors.Wrapf(err, "failed to get k8s client")
}
clusters, err := multicluster.ListVirtualClusters(context.Background(), k8sClient)
if err != nil {
return errors.Wrap(err, "fail to get registered cluster")
}
// Get kube config
config, err := c.GetConfig()
if err != nil {
return err
}
for _, cluster := range clusters {
clusterName := cluster.Name
if clusterName == multicluster.ClusterLocalName {
continue
}
content, err := versioned.NewForConfigOrDie(config).ClusterV1alpha1().ClusterGateways().RESTClient(clusterName).Get().AbsPath("healthz").DoRaw(context.TODO())
if err != nil {
return errors.Wrapf(err, "failed connect cluster %s", clusterName)
}
cmd.Printf("Connect to cluster %s successfully.\n%s\n", clusterName, string(content))
}
fmt.Println("Result: Clusters are fine~")
fmt.Println("------------------------------------------------------")
// Diagnoses the link of hub APIServer to cluster-gateway
fmt.Println("------------------------------------------------------")
fmt.Println("Diagnosing the link of hub APIServer to cluster-gateway...")
// Get clientset
clientset, err := apiregistration.NewForConfig(config)
if err != nil {
return err
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
apiService, err := clientset.APIServices().Get(ctx, APIServiceName, metav1.GetOptions{})
if err != nil {
return err
}
for _, condition := range apiService.Status.Conditions {
if condition.Type == "Available" {
if condition.Status != "True" {
cmd.Printf("APIService \"%s\" is not available! \nMessage: %s\n", APIServiceName, condition.Message)
return CheckAPIService(ctx, config, apiService)
}
cmd.Printf("APIService \"%s\" is available!\n", APIServiceName)
}
}
fmt.Println("Result: The link of hub APIServer to cluster-gateway is fine~")
fmt.Println("------------------------------------------------------")
// Todo: Diagnose others
return nil
},
Annotations: map[string]string{
types.TagCommandType: types.TypeSystem,
},
}
return cmd
}
// CheckAPIService checks the APIService
func CheckAPIService(ctx context.Context, config *rest.Config, apiService *apiregistrationV1beta.APIService) error {
svcName := apiService.Spec.Service.Name
svcNamespace := apiService.Spec.Service.Namespace
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return err
}
svc, err := clientset.CoreV1().Services(svcNamespace).Get(ctx, svcName, metav1.GetOptions{})
if err != nil {
return err
}
set := labels.Set(svc.Spec.Selector)
listOptions := metav1.ListOptions{LabelSelector: set.AsSelector().String()}
pods, err := clientset.CoreV1().Pods(svcNamespace).List(ctx, listOptions)
if err != nil {
return err
}
if len(pods.Items) == 0 {
return errors.Errorf("No available pods in %s namespace with label %s.", svcNamespace, set.AsSelector().String())
}
for _, pod := range pods.Items {
for _, status := range pod.Status.ContainerStatuses {
if !status.Ready {
for _, condition := range pod.Status.Conditions {
if condition.Status != "True" {
return errors.Errorf("Pod %s is not ready. Condition \"%s\" status: %s.", pod.Name, condition.Type, condition.Status)
}
}
return errors.Errorf("Pod %s is not ready.", pod.Name)
}
}
}
return nil
}
|
package main
import "fmt"
func TypeJudge(items ...interface{}) {
for index, v := range items {
switch v.(type) {
case bool:
fmt.Printf("第%v参数是bool类型, 值:%v\n", index, v)
case float64:
fmt.Printf("第%v参数是float64类型, 值:%v\n", index, v)
case int, int32, int64:
fmt.Printf("第%v参数是整数类型, 值:%v\n", index, v)
case string:
fmt.Printf("第%v参数是string类型, 值:%v\n", index, v)
default:
fmt.Printf("第%v参数类型不确定, 值:%v\n", index, v)
}
}
}
func main() {
var n1 int32 = 18
var n2 float64 = 31.8
var name = "tom"
address := "深圳"
n3 := 100
TypeJudge(n1, n2, name, address, n3)
}
|
package leetcode
// Roman numerals are represented by seven different symbols: I, V, X, L, C, D and M.
// Given an integer, convert it to a roman numeral. Input is guaranteed to be within the range from 1 to 3999.
func intToRoman(num int) string {
roman, decimalPlace := "", 1
symbols := map[int]string{1: "I", 2: "II", 3: "III", 4: "IV", 5: "V", 6: "VI", 7: "VII", 8: "VIII", 9: "IX", 10: "X", 20: "XX", 30: "XXX", 40: "XL", 50: "L", 60: "LX", 70: "LXX", 80: "LXXX", 90: "XC", 100: "C", 200: "CC", 300: "CCC", 400: "CD", 500: "D", 600: "DC", 700: "DCC", 800: "DCCC", 900: "CM", 1000: "M", 2000: "MM", 3000: "MMM"}
for num > 0 {
digit := (num % 10) * decimalPlace
num /= 10
decimalPlace *= 10
roman = symbols[digit] + roman
}
return roman
}
|
package ds
/**
*
Given an array consisting of n integers,
find the contiguous subarray of given length k that has the maximum average value.
And you need to output the maximum average value.
Example 1:
Input: [1,12,-5,-6,50,3], k = 4
Output: 12.75
Explanation: Maximum average is (12-5-6+50)/4 = 51/4 = 12.75
Note:
1 <= k <= n <= 30,000.
Elements of the given array will be in the range [-10,000, 10,000].
*
*
*
*/
/**
* @param {number[]} nums
* @param {number} k
* @return {number}
*/
// 暴力法
// 找到所有的子数组,然后求平均值 略
// 求出每k个数字的总和
// [1,12,-5,-6,50,3]
func findMaxAverage(nums []int, k int) float64 {
length := len(nums)
sums := make([]float64, length)
sums[0] = float64(nums[0])
for i := 1; i < length; i++ {
sums[i] += sums[i - 1] + float64(nums[i])
}
// 然后求出所有连续K个数字的和
avg := sums[k - 1]/float64(k)
// j :=1 是错的,why
for j:= 1; j < length - k; j++ {
newAvg :=(sums[j+k] - sums[j]) / float64(k)
avg = getMax(avg, newAvg)
}
return avg
}
func getMax(a, b float64) float64{
if a >= b {
return a
} else {
return b
}
}
// slide window
|
// Copyright (c) 2019 Chair of Applied Cryptography, Technische Universität
// Darmstadt, Germany. All rights reserved. This file is part of go-perun. Use
// of this source code is governed by a MIT-style license that can be found in
// the LICENSE file.
package sim
import (
_ "perun.network/go-perun/backend/sim/channel" // backend init
_ "perun.network/go-perun/backend/sim/wallet" // backend init
)
|
package main
func myPow(x float64, n int) float64 {
var N int64
N = int64(n)
if n < 0 {
x = 1 / x
N = -N
}
return fastPow(x, N)
}
func fastPow(x float64, n int64) float64 {
if n == 0 {
return 1.0
}
fast := fastPow(x, n/2)
if n%2 == 0 {
return fast * fast
} else {
return fast * fast * x
}
}
|
package git
/*
#include <git2.h>
extern const git_oid * git_indexer_hash(const git_indexer *idx);
extern int git_indexer_append(git_indexer *idx, const void *data, size_t size, git_transfer_progress *stats);
extern int git_indexer_commit(git_indexer *idx, git_transfer_progress *stats);
extern int _go_git_indexer_new(git_indexer **out, const char *path, unsigned int mode, git_odb *odb, void *progress_cb_payload);
extern void git_indexer_free(git_indexer *idx);
*/
import "C"
import (
"reflect"
"runtime"
"unsafe"
)
// Indexer can post-process packfiles and create an .idx file for efficient
// lookup.
type Indexer struct {
doNotCompare
ptr *C.git_indexer
stats C.git_transfer_progress
ccallbacks C.git_remote_callbacks
}
// NewIndexer creates a new indexer instance.
func NewIndexer(packfilePath string, odb *Odb, callback TransferProgressCallback) (indexer *Indexer, err error) {
var odbPtr *C.git_odb = nil
if odb != nil {
odbPtr = odb.ptr
}
indexer = new(Indexer)
populateRemoteCallbacks(&indexer.ccallbacks, &RemoteCallbacks{TransferProgressCallback: callback}, nil)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
cstr := C.CString(packfilePath)
defer C.free(unsafe.Pointer(cstr))
ret := C._go_git_indexer_new(&indexer.ptr, cstr, 0, odbPtr, indexer.ccallbacks.payload)
runtime.KeepAlive(odb)
if ret < 0 {
untrackCallbacksPayload(&indexer.ccallbacks)
return nil, MakeGitError(ret)
}
runtime.SetFinalizer(indexer, (*Indexer).Free)
return indexer, nil
}
// Write adds data to the indexer.
func (indexer *Indexer) Write(data []byte) (int, error) {
header := (*reflect.SliceHeader)(unsafe.Pointer(&data))
ptr := unsafe.Pointer(header.Data)
size := C.size_t(header.Len)
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_indexer_append(indexer.ptr, ptr, size, &indexer.stats)
runtime.KeepAlive(indexer)
if ret < 0 {
return 0, MakeGitError(ret)
}
return len(data), nil
}
// Commit finalizes the pack and index. It resolves any pending deltas and
// writes out the index file.
//
// It also returns the packfile's hash. A packfile's name is derived from the
// sorted hashing of all object names.
func (indexer *Indexer) Commit() (*Oid, error) {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
ret := C.git_indexer_commit(indexer.ptr, &indexer.stats)
if ret < 0 {
return nil, MakeGitError(ret)
}
id := newOidFromC(C.git_indexer_hash(indexer.ptr))
runtime.KeepAlive(indexer)
return id, nil
}
// Free frees the indexer and its resources.
func (indexer *Indexer) Free() {
untrackCallbacksPayload(&indexer.ccallbacks)
runtime.SetFinalizer(indexer, nil)
C.git_indexer_free(indexer.ptr)
}
|
package main
import (
"fmt"
"time"
)
func main() {
go a()
for i :=1;i<6;i++{
fmt.Println(i)
time.Sleep(time.Millisecond)
}
}
func a() {
defer b()
panic("A test panic!")
}
func b() {
if demo := recover();demo != nil{
fmt.Println("Recover panic demo:", demo)
}
}
|
package main
import (
"bartenderAsFunction/model"
"reflect"
"testing"
"bartenderAsFunction/testUtils"
"github.com/stretchr/testify/assert"
"github.com/aws/aws-lambda-go/events"
"encoding/json"
)
func Test_serveCommand(t *testing.T) {
type args struct {
items *[]model.Item
toServe string
}
tests := []struct {
name string
args args
wantItems []model.Item
}{
{"no items", args{&[]model.Item{}, "aaa"}, []model.Item{}},
{"1 item, not equal", args{&[]model.Item{{Amount: 1, Name: "bbb", Served: false}}, "aaa"}, []model.Item{{Amount: 1, Name: "bbb", Served: false}}},
{"1 item, equal", args{&[]model.Item{{Amount: 1, Name: "bbb", Served: false}}, "bbb"}, []model.Item{{Amount: 1, Name: "bbb", Served: true}}},
{"1 item equal, 1 not equal", args{&[]model.Item{{Amount: 1, Name: "bbb", Served: false}, {Amount: 1, Name: "aaa", Served: false}}, "bbb"}, []model.Item{{Amount: 1, Name: "bbb", Served: true}, {Amount: 1, Name: "aaa", Served: false}}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if serveCommand(tt.args.items, tt.args.toServe); !reflect.DeepEqual(*tt.args.items, tt.wantItems) {
t.Errorf("%s getItemsFromMap() = %v, want %v", tt.name, tt.args.items, tt.wantItems)
}
})
}
}
func TestHandlerShouldReturnNoCommand(t *testing.T) {
mock := testUtils.CommandConnectionMock{Command: model.Command{IdCommand: "111"}}
DataConnectionManager = &mock
event := events.APIGatewayProxyRequest{PathParameters: map[string]string{"idCommand": "1", "type": "beer"}}
response, _ := Handler(event)
assert.Equal(t, response.Body, "not available command to serve")
assert.Equal(t, response.StatusCode, 200)
}
func TestHandlerShouldReturnCommand(t *testing.T) {
item := model.Item{Name: "1664", Amount: 1, Served: false}
mock := testUtils.CommandConnectionMock{Command: model.Command{IdCommand: "111", Beer: []model.Item{item}}}
DataConnectionManager = &mock
body, _ := json.Marshal(item)
event := events.APIGatewayProxyRequest{PathParameters: map[string]string{"idCommand": "111", "type": "beer"},
Body: string(body),
}
response, _ := Handler(event)
item.Served = true
command := model.Command{IdCommand: "111", Beer: []model.Item{item}}
resp, _ := json.Marshal(command)
assert.Equal(t, response.Body, string(resp))
assert.Equal(t, response.StatusCode, 200)
}
|
/*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package cluster
import (
"context"
"github.com/Tencent/bk-bcs/bcs-common/common/blog"
cmproto "github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/api/clustermanager"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/actions"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/clusterops"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/common"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/store"
"github.com/Tencent/bk-bcs/bcs-services/bcs-cluster-manager/internal/utils"
)
// UpdateVirtualClusterQuotaAction action for update virtual cluster namespace quota
type UpdateVirtualClusterQuotaAction struct {
ctx context.Context
model store.ClusterManagerModel
k8sOp *clusterops.K8SOperator
cluster *cmproto.Cluster
req *cmproto.UpdateVirtualClusterQuotaReq
resp *cmproto.UpdateVirtualClusterQuotaResp
}
// NewUpdateVirtualClusterQuotaAction update virtual cluster namespace quota action
func NewUpdateVirtualClusterQuotaAction(model store.ClusterManagerModel,
k8sOp *clusterops.K8SOperator) *UpdateVirtualClusterQuotaAction {
return &UpdateVirtualClusterQuotaAction{
model: model,
k8sOp: k8sOp,
}
}
func (ca *UpdateVirtualClusterQuotaAction) validate() error {
err := ca.req.Validate()
if err != nil {
return err
}
return nil
}
func (ca *UpdateVirtualClusterQuotaAction) setResp(code uint32, msg string) {
ca.resp.Code = code
ca.resp.Message = msg
ca.resp.Result = (code == common.BcsErrClusterManagerSuccess)
}
// Handle create virtual cluster request
func (ca *UpdateVirtualClusterQuotaAction) Handle(ctx context.Context, req *cmproto.UpdateVirtualClusterQuotaReq,
resp *cmproto.UpdateVirtualClusterQuotaResp) {
if req == nil || resp == nil {
blog.Errorf("create virtual cluster failed, req or resp is empty")
return
}
ca.ctx = ctx
ca.req = req
ca.resp = resp
var err error
// create validate cluster
if err = ca.validate(); err != nil {
ca.setResp(common.BcsErrClusterManagerInvalidParameter, err.Error())
return
}
ca.cluster, err = actions.GetClusterInfoByClusterID(ca.model, ca.req.ClusterID)
if err != nil {
ca.setResp(common.BcsErrClusterManagerDBOperation, err.Error())
return
}
var nsInfo cmproto.NamespaceInfo
err = utils.ToStringObject([]byte(ca.cluster.ExtraInfo[common.VClusterNamespaceInfo]), &nsInfo)
if err != nil {
ca.setResp(common.BcsErrClusterManagerCloudProviderErr, err.Error())
return
}
//update quota
err = ca.k8sOp.UpdateResourceQuota(ctx, ca.cluster.SystemID, clusterops.ResourceQuotaInfo{
Name: nsInfo.Name,
CpuRequests: ca.req.Quota.CpuRequests,
CpuLimits: ca.req.Quota.CpuLimits,
MemRequests: ca.req.Quota.MemoryRequests,
MemLimits: ca.req.Quota.MemoryLimits,
})
if err != nil {
ca.setResp(common.BcsErrClusterManagerCloudProviderErr, err.Error())
return
}
nsInfo.Quota.CpuRequests = ca.req.Quota.CpuRequests
nsInfo.Quota.CpuLimits = ca.req.Quota.CpuLimits
nsInfo.Quota.MemoryRequests = ca.req.Quota.MemoryRequests
nsInfo.Quota.MemoryLimits = ca.req.Quota.MemoryLimits
ca.cluster.ExtraInfo[common.VClusterNamespaceInfo] = utils.ToJSONString(nsInfo)
err = ca.model.UpdateCluster(ctx, ca.cluster)
if err != nil {
ca.setResp(common.BcsErrClusterManagerDBOperation, err.Error())
return
}
ca.setResp(common.BcsErrClusterManagerSuccess, common.BcsErrClusterManagerSuccessStr)
return
}
|
package handler
import (
"dappapi/models"
jwt "dappapi/pkg/jwtauth"
"dappapi/tools"
"log"
"net/http"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/mojocn/base64Captcha"
"github.com/mssola/user_agent"
)
var store = base64Captcha.DefaultMemStore
func PayloadFunc(data interface{}) jwt.MapClaims {
if v, ok := data.(map[string]interface{}); ok {
u, _ := v["user"].(models.SysUser)
r, _ := v["role"].(models.SysRole)
return jwt.MapClaims{
jwt.IdentityKey: u.UserId,
jwt.RoleIdKey: r.RoleId,
jwt.NiceKey: u.Username,
jwt.RoleKey: r.Name,
jwt.ChannelKey: u.Channel,
}
}
return jwt.MapClaims{}
}
func IdentityHandler(c *gin.Context) interface{} {
claims := jwt.ExtractClaims(c)
return map[string]interface{}{
"IdentityKey": claims["identity"],
"UserName": claims["nice"],
"RoleKey": claims["rolekey"],
"UserId": claims["identity"],
"RoleIds": claims["roleid"],
"DataScope": claims["datascope"],
"Channel": claims["channel"],
}
}
// @Summary 登陆
// @Description 获取token
// LoginHandler can be used by clients to get a jwt token.
// Payload needs to be json in the form of {"username": "USERNAME", "password": "PASSWORD"}.
// Reply will be of the form {"token": "TOKEN"}.
// @Accept application/json
// @Product application/json
// @Param username body models.Login true "Add account"
// @Success 200 {string} string "{"code": 200, "expire": "2019-08-07T12:45:48+08:00", "token": ".eyJleHAiOjE1NjUxNTMxNDgsImlkIjoiYWRtaW4iLCJvcmlnX2lhdCI6MTU2NTE0OTU0OH0.-zvzHvbg0A" }"
// @Router /login [post]
func Authenticator(c *gin.Context) (interface{}, error) {
var loginVals models.Login
var loginlog models.LoginLog
ua := user_agent.New(c.Request.UserAgent())
loginlog.Ipaddr = c.ClientIP()
location := tools.GetLocation(c.ClientIP())
loginlog.Location = location
loginlog.Ltime = tools.GetCurrntTime()
loginlog.Status = "0"
loginlog.Remark = c.Request.UserAgent()
browserName, browserVersion := ua.Browser()
loginlog.Browser = browserName + " " + browserVersion
loginlog.Os = ua.OS()
loginlog.Msg = "登录成功"
loginlog.Platform = ua.Platform()
if err := c.ShouldBind(&loginVals); err != nil {
loginlog.Status = "1"
loginlog.Msg = "数据解析失败"
loginlog.Username = loginVals.Username
loginlog.Create()
return nil, jwt.ErrMissingLoginValues
}
loginlog.Username = loginVals.Username
user, role, e := loginVals.GetUser()
secret := user.Uuid
code, _ := tools.NewGoogleAuth().GetCode(secret)
if code != loginVals.Code {
loginlog.Status = "1"
loginlog.Msg = "验证码错误"
loginlog.Create()
return nil, jwt.ErrInvalidVerificationode
}
if e == nil {
loginlog.Create()
return map[string]interface{}{"user": user, "role": role}, nil
} else {
loginlog.Status = "1"
loginlog.Msg = "登录失败"
loginlog.Create()
log.Println(e.Error())
}
return nil, jwt.ErrFailedAuthentication
}
func AuthCp(c *gin.Context) interface{} {
var sysuser models.SysUser
err := c.ShouldBindWith(&sysuser, binding.JSON)
tools.HasError(err, "参数错误, 错误码 -1", 500)
user, err1 := sysuser.GetOne()
tools.HasError(err1, "返回数据错误, 错误码 -2", 500)
var role models.SysRole
role.RoleId = user.Roleid
roleData, err2 := role.GetOne()
tools.HasError(err2, "返回数据错误, 错误码 -3", 500)
return map[string]interface{}{"user": user, "role": roleData}
}
// @Summary 退出登录
// @Description 获取token
// LoginHandler can be used by clients to get a jwt token.
// Reply will be of the form {"token": "TOKEN"}.
// @Accept application/json
// @Product application/json
// @Success 200 {string} string "{"code": 200, "msg": "成功退出系统" }"
// @Router /logout [post]
// @Security
func LogOut(c *gin.Context) {
var loginlog models.LoginLog
ua := user_agent.New(c.Request.UserAgent())
loginlog.Ipaddr = c.ClientIP()
location := tools.GetLocation(c.ClientIP())
loginlog.Location = location
loginlog.Ltime = tools.GetCurrntTime()
loginlog.Status = "0"
loginlog.Remark = c.Request.UserAgent()
browserName, browserVersion := ua.Browser()
loginlog.Browser = browserName + " " + browserVersion
loginlog.Os = ua.OS()
loginlog.Platform = ua.Platform()
loginlog.Username = tools.GetUserName(c)
loginlog.Msg = "退出成功"
loginlog.Create()
c.JSON(http.StatusOK, gin.H{
"code": 200,
"msg": "退出成功",
})
}
func Authorizator(data interface{}, c *gin.Context) bool {
if v, ok := data.(map[string]interface{}); ok {
u, _ := v["user"].(models.SysUser)
r, _ := v["role"].(models.SysRole)
c.Set("role", r.Name)
c.Set("roleIds", r.RoleId)
c.Set("userId", u.UserId)
c.Set("userName", u.UserName)
return true
}
return false
}
func Unauthorized(c *gin.Context, code int, message string) {
c.JSON(http.StatusOK, gin.H{
"code": code,
"msg": message,
})
}
|
package main
import "github.com/PuerkitoBio/fetchbot"
import (
"fmt"
"github.com/DennisDenuto/property-price-collector/data/training/dropbox"
"github.com/DennisDenuto/property-price-collector/site"
pphc "github.com/DennisDenuto/property-price-collector/site/propertypricehistorycom"
log "github.com/Sirupsen/logrus"
"os"
"strconv"
"time"
)
func main() {
log.SetLevel(log.DebugLevel)
var dropboxToken string
var found bool
if dropboxToken, found = os.LookupEnv("DROPBOX_TOKEN"); !found {
log.Error("missing DROPBOX_TOKEN ENV")
os.Exit(1)
}
mux := fetchbot.NewMux()
//2000 2155
minPostcode, err := strconv.Atoi(os.Getenv("START_POSTCODE"))
if err != nil {
log.Error("missing START_POSTCODE ENV")
os.Exit(1)
}
maxPostcode, err := strconv.Atoi(os.Getenv("END_POSTCODE"))
if err != nil {
log.Error("missing END_POSTCODE ENV")
os.Exit(1)
}
fetcher := fetchbot.New(mux)
fetcher.AutoClose = true
queue := fetcher.Start()
pphcFetcher := pphc.NewPropertyPriceHistoryCom("propertypricehistory.com", minPostcode, maxPostcode, &site.PostcodeSuburbStore{})
pphcFetcher.SetupMux(mux)
for _, seed := range pphcFetcher.SeedUrls {
queue.SendStringGet(seed)
}
repo := dropbox.NewPropertyHistoryDataRepo(dropboxToken)
err = saveProperties(pphcFetcher, repo, queue)
if err != nil {
log.WithError(err).Error("saving properties returned an error.")
os.Exit(1)
}
log.Debug("exiting now")
}
func saveProperties(pphcFetcher pphc.PropertyPriceHistoryCom, repo *dropbox.PropertyHistoryDataRepo, queue *fetchbot.Queue) error {
for {
select {
case property, ok := <-pphcFetcher.GetProperties():
if !ok {
log.Debug("no more properties to save. exiting")
return nil
}
err := retryDuring(1*time.Minute, 10*time.Second, func() error {
err := repo.Add(property)
if err != nil {
log.WithError(err).Error("adding property to repo errored")
return err
}
log.Infof("%+#v", property)
return nil
})
if err != nil {
log.WithError(err).Error("unable to write property into datastore (SKIPPING property)")
}
case <-queue.Done():
log.Info("Finished: no more urls to fetch.")
pphcFetcher.Done()
}
}
}
func retryDuring(duration time.Duration, sleep time.Duration, callback func() error) (err error) {
t0 := time.Now()
i := 0
for {
i++
err = callback()
if err == nil {
return
}
delta := time.Now().Sub(t0)
if delta > duration {
return fmt.Errorf("after %d attempts (during %s), last error: %s", i, delta, err)
}
time.Sleep(sleep)
log.WithError(err).Debug("retrying after error")
}
}
|
package main
import "fmt"
type Person struct {
LastName string
FirstName string
Age int
}
func (p Person) String() string {
return fmt.Sprintf("%s:%s : Age: %d", p.LastName, p.FirstName, p.Age)
}
func main() {
p := Person{
LastName: "hoge",
FirstName: "fuga",
Age: 20,
}
fmt.Println(p.String())
}
|
/*
* KSQL
*
* This is a swagger spec for ksqldb
*
* API version: 1.0.0
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package swagger
type Statement struct {
Ksql string `json:"ksql,omitempty"`
StreamsProperties *StatementStreamsProperties `json:"streamsProperties,omitempty"`
}
|
/*
* Copyright 2017 - 2019 KB Kontrakt LLC - All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package utils
import (
"errors"
)
type (
// EventHandler .
EventHandler = func(args ...interface{}) error
// Event .
Event interface {
On(handler EventHandler)
Emit(args ...interface{}) error
}
// eventImpl .
eventImpl struct {
handlers []EventHandler
}
)
// Emit .
func (ev *eventImpl) Emit(args ...interface{}) error {
var err error
for _, handler := range ev.handlers {
err = handler(args...)
if err != nil {
return err
}
}
return nil
}
// On .
func (ev *eventImpl) On(handler EventHandler) {
ev.handlers = append(ev.handlers, handler)
}
// WrapOnSingleArg .
func WrapOnSingleArg(mFunc MarshalFunc, handler EventHandler) EventHandler {
return func(args ...interface{}) error {
if len(args) != 1 {
return errors.New("failed listen event: not single argument was passed")
}
data, err := mFunc(args[0])
if err != nil {
panic(err)
}
return handler(data)
}
}
// NewSyncEvent creates default implementation
func NewSyncEvent() Event {
return &eventImpl{
make([]EventHandler, 0, 1),
}
}
|
package storage
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/naelyn/go-docker-registry/Godeps/_workspace/src/github.com/golang/glog"
)
func unmarshalJson(r io.Reader, v interface{}) error {
b, err := ioutil.ReadAll(r)
if err == nil {
err = json.Unmarshal(b, v)
}
return err
}
func fileExists(path string) (bool, error) {
_, err := os.Stat(path)
if os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
} else {
return true, nil
}
}
// returns (true, nil) if already existed
func writeFileExclusive(path string, r io.Reader) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
} else if !os.IsNotExist(err) {
return false, err
}
if err := writeFile(path, r); err != nil {
return false, err
}
return false, nil
}
func writeFileIfChanged(path string, data []byte) error {
existingData, err := ioutil.ReadFile(path)
if os.IsNotExist(err) {
// keep going
err = nil
} else if err != nil {
return err
} else {
if bytes.Equal(data, existingData) {
if glog.V(2) {
glog.Infof("path [%s] already has what it needs", path)
}
return nil
}
}
return writeFile(path, bytes.NewReader(data))
}
// TODO(rboyer): why was this ReadCloser if it never called Close()?
func writeFile(path string, r io.Reader) error {
started := time.Now()
if glog.V(2) {
glog.Info("writing to ", path)
}
dir := filepath.Dir(path)
name := filepath.Base(path)
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
f, err := ioutil.TempFile(dir, name+".tmp")
if err != nil {
return err
}
written, err := io.Copy(f, r)
if err == nil {
err = f.Sync()
}
if closeErr := f.Close(); err == nil {
err = closeErr
}
if err != nil {
// clean up after ourselves
_ = os.Remove(f.Name())
}
if err == nil {
if glog.V(2) {
glog.Info(fmt.Sprintf("Wrote %d bytes in %.06f", written, time.Since(started).Seconds()))
}
if err = os.Rename(f.Name(), path); err != nil {
_ = os.Remove(f.Name())
}
}
return err
}
|
package server
import (
"errors"
"net/http"
"github.com/calvinmclean/automated-garden/garden-app/pkg"
"github.com/rs/xid"
)
// PlantRequest wraps a Plant into a request so we can handle Bind/Render in this package
type PlantRequest struct {
*pkg.Plant
}
// Bind is used to make this struct compatible with the go-chi webserver for reading incoming
// JSON requests
func (p *PlantRequest) Bind(r *http.Request) error {
if p == nil || p.Plant == nil {
return errors.New("missing required Plant fields")
}
if p.WateringStrategy == (pkg.WateringStrategy{}) {
return errors.New("missing required watering_strategy field")
}
if p.WateringStrategy.Interval == "" {
return errors.New("missing required watering_strategy.interval field")
}
if p.WateringStrategy.WateringAmount == 0 {
return errors.New("missing required watering_strategy.watering_amount field")
}
if p.Name == "" {
return errors.New("missing required name field")
}
if p.GardenID != xid.NilID() {
return errors.New("manual specification of garden ID is not allowed")
}
return nil
}
// AggregateActionRequest wraps a AggregateAction into a request so we can handle Bind/Render in this package
type AggregateActionRequest struct {
*pkg.AggregateAction
}
// Bind is used to make this struct compatible with our REST API implemented with go-chi.
// It will verify that the request is valid
func (action *AggregateActionRequest) Bind(r *http.Request) error {
// a.AggregateAction is nil if no AggregateAction fields are sent in the request. Return an
// error to avoid a nil pointer dereference.
if action == nil || action.AggregateAction == nil || (action.Water == nil && action.Stop == nil) {
return errors.New("missing required action fields")
}
return nil
}
// GardenRequest wraps a Garden into a request so we can handle Bind/Render in this package
type GardenRequest struct {
*pkg.Garden
}
// Bind is used to make this struct compatible with the go-chi webserver for reading incoming
// JSON requests
func (g *GardenRequest) Bind(r *http.Request) error {
if g == nil || g.Garden == nil {
return errors.New("missing required Garden fields")
}
if g.Name == "" {
return errors.New("missing required name field")
}
if len(g.Plants) > 0 {
return errors.New("cannot add or modify Plants with this request")
}
return nil
}
|
package rpn
import (
"strconv"
"unicode/utf8"
)
type operator func(int, int) int
var operations = map[rune]operator{
'+': func(left, right int) int {
return left + right
},
'-': func(left, right int) int {
return left - right
},
'*': func(left, right int) int {
return left * right
},
'/': func(left, right int) int {
return left / right
},
}
var operators = func() (result string) {
for operator := range operations {
result += string(operator)
}
return
}()
//Calculate is a function to calculate string expression using reverse polish notation and return result as int64
func Calculate(expression string) int {
var expressionStack intStack
expressionArray := splitExpression(expression)
if expressionArray != nil {
for _, oper := range expressionArray {
operator, _ := utf8.DecodeRuneInString(oper)
if operation, ok := operations[operator]; ok {
if expressionStack.size() >= 2 {
expressionStack.push(operation(expressionStack.pop(), expressionStack.pop()))
} else {
panic("Operator requires two arguments")
}
} else {
if value, err := strconv.Atoi(oper); err == nil {
expressionStack.push(value)
} else {
panic("Operand is not a number")
}
}
}
return expressionStack.pop()
}
return 0
}
|
package processtack
// MinStackLink 节点元素
type MinStackLink struct {
Min int
Val int
Next *MinStackLink
}
// ConstructorLink 初始化节点
func ConstructorLink() MinStackLink {
return MinStackLink{}
}
// Push 入栈操作
func (stack *MinStackLink) Push(x int) {
// 构建节点
temp := &MinStackLink{x, x, nil}
if stack.Next == nil {
stack.Next = temp
} else if stack.Next.Min < x {
temp.Min = stack.Next.Min
temp.Next = stack.Next
stack.Next = temp
} else {
temp.Next = stack.Next
stack.Next = temp
}
}
// Pop 出栈操作
func (stack *MinStackLink) Pop() {
if stack.Next != nil {
stack.Next = stack.Next.Next
}
}
// Top 栈顶元素
func (stack *MinStackLink) Top() int {
if stack.Next != nil {
return stack.Next.Val
}
return 0
}
// GetMin 得到栈中最小元素
func (stack *MinStackLink) GetMin() int {
if stack.Next != nil {
return stack.Next.Min
}
return (1<<31) - 1
}
|
package database
import (
"gopkg.in/mgo.v2"
"themis/utils"
)
// Connect connects to the database, returning a database handle.
func Connect(configuration utils.Configuration) (*mgo.Session, *mgo.Database) {
session, err := mgo.DialWithInfo(&mgo.DialInfo {
Addrs: []string { configuration.DatabaseHost },
Username: configuration.DatabaseUser,
Password: configuration.DatabasePassword,
Database: configuration.DatabaseDatabase,
})
if err != nil {
panic(err)
}
utils.InfoLog.Printf("Connected to database at %v!\n", session.LiveServers())
return session, session.DB(configuration.DatabaseDatabase)
}
// Close closes the database connection.
func Close(database *mgo.Database) {
database.Session.Close()
}
|
package main
import (
"fmt"
"github.com/mayflower/docker-ls/lib"
)
type versionCmd struct{}
func (v versionCmd) execute(argv []string) error {
fmt.Printf("version: %s\n", lib.Version())
return nil
}
func newVersionCmd() versionCmd {
return versionCmd{}
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
gkehubpb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/gkehub/gkehub_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub"
)
// FeatureServer implements the gRPC interface for Feature.
type FeatureServer struct{}
// ProtoToFeatureResourceStateStateEnum converts a FeatureResourceStateStateEnum enum from its proto representation.
func ProtoToGkehubFeatureResourceStateStateEnum(e gkehubpb.GkehubFeatureResourceStateStateEnum) *gkehub.FeatureResourceStateStateEnum {
if e == 0 {
return nil
}
if n, ok := gkehubpb.GkehubFeatureResourceStateStateEnum_name[int32(e)]; ok {
e := gkehub.FeatureResourceStateStateEnum(n[len("GkehubFeatureResourceStateStateEnum"):])
return &e
}
return nil
}
// ProtoToFeatureStateStateCodeEnum converts a FeatureStateStateCodeEnum enum from its proto representation.
func ProtoToGkehubFeatureStateStateCodeEnum(e gkehubpb.GkehubFeatureStateStateCodeEnum) *gkehub.FeatureStateStateCodeEnum {
if e == 0 {
return nil
}
if n, ok := gkehubpb.GkehubFeatureStateStateCodeEnum_name[int32(e)]; ok {
e := gkehub.FeatureStateStateCodeEnum(n[len("GkehubFeatureStateStateCodeEnum"):])
return &e
}
return nil
}
// ProtoToFeatureResourceState converts a FeatureResourceState object from its proto representation.
func ProtoToGkehubFeatureResourceState(p *gkehubpb.GkehubFeatureResourceState) *gkehub.FeatureResourceState {
if p == nil {
return nil
}
obj := &gkehub.FeatureResourceState{
State: ProtoToGkehubFeatureResourceStateStateEnum(p.GetState()),
HasResources: dcl.Bool(p.GetHasResources()),
}
return obj
}
// ProtoToFeatureSpec converts a FeatureSpec object from its proto representation.
func ProtoToGkehubFeatureSpec(p *gkehubpb.GkehubFeatureSpec) *gkehub.FeatureSpec {
if p == nil {
return nil
}
obj := &gkehub.FeatureSpec{
Multiclusteringress: ProtoToGkehubFeatureSpecMulticlusteringress(p.GetMulticlusteringress()),
}
return obj
}
// ProtoToFeatureSpecMulticlusteringress converts a FeatureSpecMulticlusteringress object from its proto representation.
func ProtoToGkehubFeatureSpecMulticlusteringress(p *gkehubpb.GkehubFeatureSpecMulticlusteringress) *gkehub.FeatureSpecMulticlusteringress {
if p == nil {
return nil
}
obj := &gkehub.FeatureSpecMulticlusteringress{
ConfigMembership: dcl.StringOrNil(p.GetConfigMembership()),
}
return obj
}
// ProtoToFeatureState converts a FeatureState object from its proto representation.
func ProtoToGkehubFeatureState(p *gkehubpb.GkehubFeatureState) *gkehub.FeatureState {
if p == nil {
return nil
}
obj := &gkehub.FeatureState{
State: ProtoToGkehubFeatureStateState(p.GetState()),
}
return obj
}
// ProtoToFeatureStateState converts a FeatureStateState object from its proto representation.
func ProtoToGkehubFeatureStateState(p *gkehubpb.GkehubFeatureStateState) *gkehub.FeatureStateState {
if p == nil {
return nil
}
obj := &gkehub.FeatureStateState{
Code: ProtoToGkehubFeatureStateStateCodeEnum(p.GetCode()),
Description: dcl.StringOrNil(p.GetDescription()),
UpdateTime: dcl.StringOrNil(p.GetUpdateTime()),
}
return obj
}
// ProtoToFeature converts a Feature resource from its proto representation.
func ProtoToFeature(p *gkehubpb.GkehubFeature) *gkehub.Feature {
obj := &gkehub.Feature{
Name: dcl.StringOrNil(p.GetName()),
ResourceState: ProtoToGkehubFeatureResourceState(p.GetResourceState()),
Spec: ProtoToGkehubFeatureSpec(p.GetSpec()),
State: ProtoToGkehubFeatureState(p.GetState()),
CreateTime: dcl.StringOrNil(p.GetCreateTime()),
UpdateTime: dcl.StringOrNil(p.GetUpdateTime()),
DeleteTime: dcl.StringOrNil(p.GetDeleteTime()),
Project: dcl.StringOrNil(p.GetProject()),
Location: dcl.StringOrNil(p.GetLocation()),
}
return obj
}
// FeatureResourceStateStateEnumToProto converts a FeatureResourceStateStateEnum enum to its proto representation.
func GkehubFeatureResourceStateStateEnumToProto(e *gkehub.FeatureResourceStateStateEnum) gkehubpb.GkehubFeatureResourceStateStateEnum {
if e == nil {
return gkehubpb.GkehubFeatureResourceStateStateEnum(0)
}
if v, ok := gkehubpb.GkehubFeatureResourceStateStateEnum_value["FeatureResourceStateStateEnum"+string(*e)]; ok {
return gkehubpb.GkehubFeatureResourceStateStateEnum(v)
}
return gkehubpb.GkehubFeatureResourceStateStateEnum(0)
}
// FeatureStateStateCodeEnumToProto converts a FeatureStateStateCodeEnum enum to its proto representation.
func GkehubFeatureStateStateCodeEnumToProto(e *gkehub.FeatureStateStateCodeEnum) gkehubpb.GkehubFeatureStateStateCodeEnum {
if e == nil {
return gkehubpb.GkehubFeatureStateStateCodeEnum(0)
}
if v, ok := gkehubpb.GkehubFeatureStateStateCodeEnum_value["FeatureStateStateCodeEnum"+string(*e)]; ok {
return gkehubpb.GkehubFeatureStateStateCodeEnum(v)
}
return gkehubpb.GkehubFeatureStateStateCodeEnum(0)
}
// FeatureResourceStateToProto converts a FeatureResourceState object to its proto representation.
func GkehubFeatureResourceStateToProto(o *gkehub.FeatureResourceState) *gkehubpb.GkehubFeatureResourceState {
if o == nil {
return nil
}
p := &gkehubpb.GkehubFeatureResourceState{}
p.SetState(GkehubFeatureResourceStateStateEnumToProto(o.State))
p.SetHasResources(dcl.ValueOrEmptyBool(o.HasResources))
return p
}
// FeatureSpecToProto converts a FeatureSpec object to its proto representation.
func GkehubFeatureSpecToProto(o *gkehub.FeatureSpec) *gkehubpb.GkehubFeatureSpec {
if o == nil {
return nil
}
p := &gkehubpb.GkehubFeatureSpec{}
p.SetMulticlusteringress(GkehubFeatureSpecMulticlusteringressToProto(o.Multiclusteringress))
return p
}
// FeatureSpecMulticlusteringressToProto converts a FeatureSpecMulticlusteringress object to its proto representation.
func GkehubFeatureSpecMulticlusteringressToProto(o *gkehub.FeatureSpecMulticlusteringress) *gkehubpb.GkehubFeatureSpecMulticlusteringress {
if o == nil {
return nil
}
p := &gkehubpb.GkehubFeatureSpecMulticlusteringress{}
p.SetConfigMembership(dcl.ValueOrEmptyString(o.ConfigMembership))
return p
}
// FeatureStateToProto converts a FeatureState object to its proto representation.
func GkehubFeatureStateToProto(o *gkehub.FeatureState) *gkehubpb.GkehubFeatureState {
if o == nil {
return nil
}
p := &gkehubpb.GkehubFeatureState{}
p.SetState(GkehubFeatureStateStateToProto(o.State))
return p
}
// FeatureStateStateToProto converts a FeatureStateState object to its proto representation.
func GkehubFeatureStateStateToProto(o *gkehub.FeatureStateState) *gkehubpb.GkehubFeatureStateState {
if o == nil {
return nil
}
p := &gkehubpb.GkehubFeatureStateState{}
p.SetCode(GkehubFeatureStateStateCodeEnumToProto(o.Code))
p.SetDescription(dcl.ValueOrEmptyString(o.Description))
p.SetUpdateTime(dcl.ValueOrEmptyString(o.UpdateTime))
return p
}
// FeatureToProto converts a Feature resource to its proto representation.
func FeatureToProto(resource *gkehub.Feature) *gkehubpb.GkehubFeature {
p := &gkehubpb.GkehubFeature{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetResourceState(GkehubFeatureResourceStateToProto(resource.ResourceState))
p.SetSpec(GkehubFeatureSpecToProto(resource.Spec))
p.SetState(GkehubFeatureStateToProto(resource.State))
p.SetCreateTime(dcl.ValueOrEmptyString(resource.CreateTime))
p.SetUpdateTime(dcl.ValueOrEmptyString(resource.UpdateTime))
p.SetDeleteTime(dcl.ValueOrEmptyString(resource.DeleteTime))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
p.SetLocation(dcl.ValueOrEmptyString(resource.Location))
mLabels := make(map[string]string, len(resource.Labels))
for k, r := range resource.Labels {
mLabels[k] = r
}
p.SetLabels(mLabels)
return p
}
// applyFeature handles the gRPC request by passing it to the underlying Feature Apply() method.
func (s *FeatureServer) applyFeature(ctx context.Context, c *gkehub.Client, request *gkehubpb.ApplyGkehubFeatureRequest) (*gkehubpb.GkehubFeature, error) {
p := ProtoToFeature(request.GetResource())
res, err := c.ApplyFeature(ctx, p)
if err != nil {
return nil, err
}
r := FeatureToProto(res)
return r, nil
}
// applyGkehubFeature handles the gRPC request by passing it to the underlying Feature Apply() method.
func (s *FeatureServer) ApplyGkehubFeature(ctx context.Context, request *gkehubpb.ApplyGkehubFeatureRequest) (*gkehubpb.GkehubFeature, error) {
cl, err := createConfigFeature(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyFeature(ctx, cl, request)
}
// DeleteFeature handles the gRPC request by passing it to the underlying Feature Delete() method.
func (s *FeatureServer) DeleteGkehubFeature(ctx context.Context, request *gkehubpb.DeleteGkehubFeatureRequest) (*emptypb.Empty, error) {
cl, err := createConfigFeature(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteFeature(ctx, ProtoToFeature(request.GetResource()))
}
// ListGkehubFeature handles the gRPC request by passing it to the underlying FeatureList() method.
func (s *FeatureServer) ListGkehubFeature(ctx context.Context, request *gkehubpb.ListGkehubFeatureRequest) (*gkehubpb.ListGkehubFeatureResponse, error) {
cl, err := createConfigFeature(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListFeature(ctx, request.GetProject(), request.GetLocation())
if err != nil {
return nil, err
}
var protos []*gkehubpb.GkehubFeature
for _, r := range resources.Items {
rp := FeatureToProto(r)
protos = append(protos, rp)
}
p := &gkehubpb.ListGkehubFeatureResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigFeature(ctx context.Context, service_account_file string) (*gkehub.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return gkehub.NewClient(conf), nil
}
|
package amqp_kit
import (
"context"
"net/http"
"testing"
"github.com/streadway/amqp"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
)
func TestNewError(t *testing.T) {
e := NewError(`test message`, `test_message`, http.StatusBadRequest)
assert.Equal(t, e.Code, `test_message`)
assert.Equal(t, e.StatusCode, http.StatusBadRequest)
assert.Equal(t, e.Message, `test message`)
}
func TestWrapErrorWithCode(t *testing.T) {
e := NewError(`test message`, `test_message`, http.StatusInternalServerError)
e1 := WrapError(e, `test message 1`)
assert.Equal(t, e1.Message, `test message 1`)
assert.Equal(t, e.Message, `test message`)
assert.Equal(t, e1.StatusCode, http.StatusInternalServerError)
assert.Equal(t, e1.Code, `test_message`)
}
func TestErrorWithCode_Error(t *testing.T) {
e := NewError(`test message`, `test_message`, http.StatusBadRequest)
assert.Equal(t, `test message`, e.Error())
}
type errSuite struct {
suite.Suite
dsn string
}
func (s *errSuite) SetupSuite() {
s.dsn = MakeDsn(&Config{
"127.0.0.1:5672",
"guest",
"guest",
"",
})
}
func (s *errSuite) TearDownSuite() {}
func TestErrSuite(t *testing.T) {
suite.Run(t, new(errSuite))
}
func (s *errSuite) TestErrResponse() {
conn, err := amqp.Dial(s.dsn)
s.Require().NoError(err)
ch, err := conn.Channel()
s.Require().NoError(err)
err = Declare(ch, `test`, `test`,
[]string{`key.request.test`, `key.request-err.test`, `key.response.test`})
s.Require().NoError(err)
dec1 := make(chan *amqp.Delivery)
dec2 := make(chan *amqp.Delivery)
subs := []SubscribeInfo{
{
Q: `test`,
Name: ``,
Key: `key.request.test`,
E: func(ctx context.Context, request interface{}) (response interface{}, err error) {
res := Response{
Data: struct {
Foo string `json:"foo"`
}{Foo: "bar"},
}
return res, nil
},
Dec: func(i context.Context, delivery *amqp.Delivery) (request interface{}, err error) {
s.Require().Equal(delivery.RoutingKey, `key.request.test`)
dec1 <- delivery
return delivery.Body, nil
},
Enc: EncodeJSONResponse,
O: []SubscriberOption{
SubscriberAfter(
SetAckAfterEndpoint(false),
),
SubscriberBefore(
SetPublishExchange(`test`),
SetPublishKey(`key.response.test`),
),
},
},
{
Q: `test`,
Name: ``,
Key: `key.request-err.test`,
E: func(ctx context.Context, request interface{}) (response interface{}, err error) {
return nil, NewError(`err-message`, `err_message`, http.StatusBadRequest)
},
Dec: func(i context.Context, delivery *amqp.Delivery) (request interface{}, err error) {
s.Require().Equal(delivery.RoutingKey, `key.request-err.test`)
return delivery.Body, nil
},
Enc: EncodeJSONResponse,
O: []SubscriberOption{
SubscriberAfter(
SetAckAfterEndpoint(false),
),
SubscriberBefore(
SetPublishExchange(`test`),
SetPublishKey(`key.response.test`),
),
},
},
{
Q: `test`,
Name: ``,
Key: `key.response.test`,
E: func(ctx context.Context, request interface{}) (response interface{}, err error) {
return nil, nil
},
Dec: func(i context.Context, delivery *amqp.Delivery) (request interface{}, err error) {
s.Equal(delivery.RoutingKey, `key.response.test`)
dec2 <- delivery
return delivery.Body, nil
},
Enc: EncodeNopResponse,
O: []SubscriberOption{
SubscriberAfter(
SetAckAfterEndpoint(false),
),
},
},
}
ser := NewServer(subs, conn)
err = ser.Serve()
s.Require().NoError(err)
pub := NewPublisher(ch)
err = pub.Publish("test", "key.request.test", `cor_1`, []byte(`{"f":"b"}`))
s.NoError(err)
d := <-dec1
s.Equal(d.Body, []byte(`{"f":"b"}`))
d = <-dec2
s.Equal(d.Body, []byte(`{"data":{"foo":"bar"}}`))
err = pub.Publish("test", "key.request-err.test", `cor_2`, []byte(`{"f":"b1"}`))
s.NoError(err)
d = <-dec2
s.EqualValues(d.Body, []byte(`{"error":{"code":"err_message","message":"err-message","status_code":400}}`))
}
|
package model
const (
Port string= ":8899"
)
|
package helpers
import (
"time"
)
func GetCurrentTimeStamp() string{
return time.Now().Format("2006-01-02 15:04:05")
}
func GetImageDirectory() string{
return time.Now().Format("2006/01/02/15")
}
func CurrentTimeMillis() int64{
return time.Now().Unix()*1000
}
|
package main
import (
"fmt"
"github.com/hnakamur/go-scp"
"github.com/howeyc/gopass"
"golang.org/x/crypto/ssh"
"gopkg.in/yaml.v2"
"os"
"path/filepath"
"strings"
)
type Config struct {
Server struct {
User string `yaml:"user"`
Port int8 `yaml:"port"`
Host string `yaml:"host"`
} `yaml:"server"`
Path struct {
Remote string `yaml:"remote"`
Local string `yaml:"local"`
} `yaml:"path"`
}
func main() {
var cfg Config
readConfig(&cfg)
client, err := connect(cfg)
if err != nil {
print(err.Error())
os.Exit(0)
}
remoteFolderList := findRemoteDirs(client, cfg.Path.Remote)
for _, dir := range remoteFolderList {
remoteFileList := findRemoteFiles(client, dir)
for _, remoteFileName := range remoteFileList {
localFileName := cfg.Path.Local + remoteFileName[len(cfg.Path.Remote):]
if fileNotExists(localFileName) {
testDir := filepath.Dir(localFileName)
if fileNotExists(testDir) {
fmt.Println("Create dir " + testDir)
err = os.MkdirAll(testDir, 0644)
if err != nil {
fmt.Println(err)
}
}
fmt.Println("Copy remote " + remoteFileName + " to local " + localFileName)
err = scp.NewSCP(client).ReceiveFile(remoteFileName, localFileName)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
}
}
}
func readConfig(cfg *Config) {
f, err := os.Open("config.yml")
if err != nil {
fmt.Println(err)
os.Exit(2)
}
defer f.Close()
decoder := yaml.NewDecoder(f)
err = decoder.Decode(cfg)
if err != nil {
fmt.Println(err)
os.Exit(2)
}
//todo тут надо проверить все поля конфига
}
func connect(cfg Config) (*ssh.Client, error){
println("password:")
password, _ := gopass.GetPasswd()
config := &ssh.ClientConfig{
User: cfg.Server.User,
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
Auth: []ssh.AuthMethod{
ssh.Password(string(password)),
},
}
addr := fmt.Sprintf("%s:%d", cfg.Server.Host, cfg.Server.Port)
return ssh.Dial("tcp", addr, config)
}
func findRemoteFiles(client *ssh.Client, dir string) []string {
session, err := client.NewSession()
if err != nil {
panic(err.Error())
}
defer session.Close()
cmd := fmt.Sprintf("find %s -maxdepth 1 -type f", dir)
b, _ := session.CombinedOutput(cmd)
return strings.Fields(string(b))
}
func findRemoteDirs(client *ssh.Client, remoteFolder string) []string {
session, err := client.NewSession()
if err != nil {
panic(err.Error())
}
defer session.Close()
cmd := fmt.Sprintf("find %s -type d", remoteFolder)
b, _ := session.CombinedOutput(cmd)
return strings.Fields(string(b))
}
func fileNotExists(filename string) bool {
_, err := os.Stat(filename)
return err != nil && os.IsNotExist(err)
}
|
package main
import (
"context"
"strings"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
)
func getClientIP(request events.ALBTargetGroupRequest) string {
fwdHeader := request.Headers["x-forwarded-for"]
// if there are multiple IPs, use the first in chain
IPs := strings.Split(fwdHeader, ", ")
clientIP := IPs[0]
return clientIP
}
// HandleRequest will handle request event of Application Loadbalancer (ELB)
// It returns the httpResponse
func HandleRequest(ctx context.Context, request events.ALBTargetGroupRequest) (events.ALBTargetGroupResponse, error) {
httpResponse := events.ALBTargetGroupResponse{
Body: getClientIP(request),
StatusCode: 200,
StatusDescription: "200 OK",
IsBase64Encoded: false,
Headers: map[string]string{}}
return httpResponse, nil
}
func main() {
lambda.Start(HandleRequest)
}
|
// Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
package admapi
// Endpoints for creating and getting Distributed key shares.
import (
"encoding/base64"
"fmt"
"net/http"
"time"
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/address"
dkg_pkg "github.com/iotaledger/wasp/packages/dkg"
"github.com/iotaledger/wasp/packages/tcrypto"
"github.com/iotaledger/wasp/packages/webapi/httperrors"
"github.com/iotaledger/wasp/packages/webapi/model"
"github.com/iotaledger/wasp/packages/webapi/routes"
"github.com/iotaledger/wasp/plugins/dkg"
"github.com/iotaledger/wasp/plugins/registry"
"github.com/labstack/echo/v4"
"github.com/pangpanglabs/echoswagger/v2"
"go.dedis.ch/kyber/v3"
)
func addDKSharesEndpoints(adm echoswagger.ApiGroup) {
requestExample := model.DKSharesPostRequest{
PeerNetIDs: []string{"wasp1:4000", "wasp2:4000", "wasp3:4000", "wasp4:4000"},
PeerPubKeys: []string{base64.StdEncoding.EncodeToString([]byte("key"))},
Threshold: 3,
TimeoutMS: 10000,
}
infoExample := model.DKSharesInfo{
Address: address.Address{5, 6, 7, 8}.String(),
SharedPubKey: base64.StdEncoding.EncodeToString([]byte("key")),
PubKeyShares: []string{base64.StdEncoding.EncodeToString([]byte("key"))},
Threshold: 3,
PeerIndex: nil,
}
adm.POST(routes.DKSharesPost(), handleDKSharesPost).
AddParamBody(requestExample, "DKSharesPostRequest", "Request parameters", true).
AddResponse(http.StatusOK, "DK shares info", infoExample, nil).
SetSummary("Generate a new distributed key")
adm.GET(routes.DKSharesGet(":sharedAddress"), handleDKSharesGet).
AddParamPath("", "sharedAddress", "Address of the DK share (base58)").
AddResponse(http.StatusOK, "DK shares info", infoExample, nil).
SetSummary("Get distributed key properties")
}
func handleDKSharesPost(c echo.Context) error {
var req model.DKSharesPostRequest
var err error
var suite = dkg.DefaultNode().GroupSuite()
if err = c.Bind(&req); err != nil {
return httperrors.BadRequest("Invalid request body.")
}
if req.PeerPubKeys != nil && len(req.PeerNetIDs) != len(req.PeerPubKeys) {
return httperrors.BadRequest("Inconsistent PeerNetIDs and PeerPubKeys.")
}
var peerPubKeys []kyber.Point = nil
if req.PeerPubKeys != nil {
peerPubKeys = make([]kyber.Point, len(req.PeerPubKeys))
for i := range req.PeerPubKeys {
peerPubKeys[i] = suite.Point()
b, err := base64.StdEncoding.DecodeString(req.PeerPubKeys[i])
if err != nil {
return httperrors.BadRequest(fmt.Sprintf("Invalid PeerPubKeys[%v]=%v", i, req.PeerPubKeys[i]))
}
if err = peerPubKeys[i].UnmarshalBinary(b); err != nil {
return httperrors.BadRequest(fmt.Sprintf("Invalid PeerPubKeys[%v]=%v", i, req.PeerPubKeys[i]))
}
}
}
var dkShare *tcrypto.DKShare
dkShare, err = dkg.DefaultNode().GenerateDistributedKey(
req.PeerNetIDs,
peerPubKeys,
req.Threshold,
1*time.Second,
3*time.Second,
time.Duration(req.TimeoutMS)*time.Millisecond,
)
if err != nil {
if _, ok := err.(dkg_pkg.InvalidParamsError); ok {
return httperrors.BadRequest(err.Error())
}
return echo.NewHTTPError(http.StatusInternalServerError, err)
}
var response *model.DKSharesInfo
if response, err = makeDKSharesInfo(dkShare); err != nil {
return echo.NewHTTPError(http.StatusInternalServerError, err)
}
return c.JSON(http.StatusOK, response)
}
func handleDKSharesGet(c echo.Context) error {
var err error
var dkShare *tcrypto.DKShare
var sharedAddress address.Address
if sharedAddress, err = address.FromBase58(c.Param("sharedAddress")); err != nil {
return echo.NewHTTPError(http.StatusBadRequest, err)
}
if dkShare, err = registry.DefaultRegistry().LoadDKShare(&sharedAddress); err != nil {
return echo.NewHTTPError(http.StatusInternalServerError, err)
}
var response *model.DKSharesInfo
if response, err = makeDKSharesInfo(dkShare); err != nil {
return echo.NewHTTPError(http.StatusInternalServerError, err)
}
return c.JSON(http.StatusOK, response)
}
func makeDKSharesInfo(dkShare *tcrypto.DKShare) (*model.DKSharesInfo, error) {
var err error
b, err := dkShare.SharedPublic.MarshalBinary()
if err != nil {
return nil, err
}
sharedPubKey := base64.StdEncoding.EncodeToString(b)
pubKeyShares := make([]string, len(dkShare.PublicShares))
for i := range dkShare.PublicShares {
b, err := dkShare.PublicShares[i].MarshalBinary()
if err != nil {
return nil, err
}
pubKeyShares[i] = base64.StdEncoding.EncodeToString(b)
}
return &model.DKSharesInfo{
Address: dkShare.Address.String(),
SharedPubKey: sharedPubKey,
PubKeyShares: pubKeyShares,
Threshold: dkShare.T,
PeerIndex: dkShare.Index,
}, nil
}
|
package domain
import (
"fmt"
"github.com/tokopedia/tdk/go/app/resource"
)
type Order struct {
OrderID int
ProductID int
Quantity int
Invoice string
}
type OrderDomain struct {
resource OrderResourceItf
}
func InitOrderDomain(rsc OrderResourceItf) OrderDomain {
return OrderDomain{
resource: rsc,
}
}
func (d OrderDomain) IsValidStock(order Order) bool {
// first get stock from resource
stock := d.resource.GetStock(order.ProductID)
// and return the bool
return stock >= order.Quantity
}
func (d OrderDomain) CreateOrder(order *Order) error {
// lets generate invoice before we apply it to DB
idinvoice := "123"
order.Invoice = fmt.Sprintf("INV/%s", idinvoice)
// apply it to database
d.resource.InsertOrder(order)
return nil
}
type OrderResourceItf interface {
GetStock(productID int) int
InsertOrder(*Order) error
}
type OrderResource struct {
DB resource.SQLDB
Redis resource.Redis
}
func (rsc OrderResource) GetStock(productID int) int {
// we query Redis to get stock
// example:
// key := fmt.Sprintf("stock_%v", order.ProductID)
// stock, _ = Redis.Int(rsc.Redis.Do("GET", key ))
// lets return dummy stock
return 10
}
func (rsc OrderResource) InsertOrder(order *Order) error {
// apply order to database
// example:
// rsc.DB.Exec("INSERT INTO tbl_order values(?,?,?)", order)
return nil
}
|
package models
import (
"testing"
"golang.org/x/crypto/nacl/secretbox"
)
var userTokenKey = []byte(`I3w8GGTsb9R3SKCvRzUd4aNasYIhX2IC`)
func TestUISession_EncryptandSetUserToken(t *testing.T) {
uis := UISession{}
var key [32]byte
copy(key[:], userTokenKey)
if err := uis.EncryptandSetUserToken([]byte(`asdf`), key); err != nil {
t.Fatalf("should have succeeded: %v", err)
}
// msg size + nonce + overhead
if n := len(uis.EncryptedUserToken); n != 4+24+secretbox.Overhead {
t.Fatalf("bad size: %v", n)
}
}
func TestUISession_GetUserToken(t *testing.T) {
uis := UISession{}
var key [32]byte
copy(key[:], userTokenKey)
if err := uis.EncryptandSetUserToken([]byte(`asdf`), key); err != nil {
t.Fatalf("encrypt should have succeeded: %v", err)
}
tkn, err := uis.GetUserToken(key)
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if tkn != "asdf" {
t.Fatalf("unexpected token: %v", string(tkn))
}
}
|
package protocol
import (
"bytes"
"github.com/giskook/mdas_client/base"
)
type RestartPacket struct {
Tid uint64
Serial uint16
}
func (p *RestartPacket) Serialize() []byte {
var writer bytes.Buffer
WriteHeader(&writer, 0,
PROTOCOL_REP_RESTART, p.Tid, p.Serial)
base.WriteDWord(&writer, 1)
base.WriteLength(&writer)
base.WriteWord(&writer, CalcCRC(writer.Bytes()[1:], uint16(writer.Len())-1))
writer.WriteByte(PROTOCOL_END_FLAG)
return writer.Bytes()
}
|
package subscription
import (
"fmt"
)
// ErrSubscriptionNotFound occurs when subscription cannot be found.
type ErrSubscriptionNotFound struct {
ID ID
}
func (e ErrSubscriptionNotFound) Error() string {
return fmt.Sprintf("Subscription %q not found.", e.ID)
}
// ErrSubscriptionAlreadyExists occurs when subscription with the same ID already exists.
type ErrSubscriptionAlreadyExists struct {
ID ID
}
func (e ErrSubscriptionAlreadyExists) Error() string {
return fmt.Sprintf("Subscription %q already exists.", e.ID)
}
// ErrInvalidSubscriptionUpdate occurs when a client tries to update a subscription in a way that changes the subscription ID.
type ErrInvalidSubscriptionUpdate struct {
Field string
}
func (e ErrInvalidSubscriptionUpdate) Error() string {
return fmt.Sprintf("Invalid update. '%v' of existing subscription cannot be updated.", e.Field)
}
// ErrSubscriptionValidation occurs when subscription payload doesn't validate.
type ErrSubscriptionValidation struct {
Message string
}
func (e ErrSubscriptionValidation) Error() string {
return fmt.Sprintf("Subscription doesn't validate. Validation error: %s", e.Message)
}
// ErrPathConfict occurs when HTTP subscription path conflicts with existing path.
type ErrPathConfict struct {
Message string
}
func (e ErrPathConfict) Error() string {
return fmt.Sprintf("Subscription path conflict: %s", e.Message)
}
|
package handlers
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/authelia/authelia/v4/internal/configuration/schema"
)
func TestAuthzBuilder_WithConfig(t *testing.T) {
builder := NewAuthzBuilder()
builder.WithConfig(&schema.Configuration{
AuthenticationBackend: schema.AuthenticationBackend{
RefreshInterval: "always",
},
})
assert.Equal(t, time.Second*0, builder.config.RefreshInterval)
builder.WithConfig(&schema.Configuration{
AuthenticationBackend: schema.AuthenticationBackend{
RefreshInterval: "disable",
},
})
assert.Equal(t, time.Second*-1, builder.config.RefreshInterval)
builder.WithConfig(&schema.Configuration{
AuthenticationBackend: schema.AuthenticationBackend{
RefreshInterval: "1m",
},
})
assert.Equal(t, time.Minute, builder.config.RefreshInterval)
builder.WithConfig(nil)
assert.Equal(t, time.Minute, builder.config.RefreshInterval)
}
func TestAuthzBuilder_WithEndpointConfig(t *testing.T) {
builder := NewAuthzBuilder()
builder.WithEndpointConfig(schema.ServerEndpointsAuthz{
Implementation: "ExtAuthz",
})
assert.Equal(t, AuthzImplExtAuthz, builder.implementation)
builder.WithEndpointConfig(schema.ServerEndpointsAuthz{
Implementation: "ForwardAuth",
})
assert.Equal(t, AuthzImplForwardAuth, builder.implementation)
builder.WithEndpointConfig(schema.ServerEndpointsAuthz{
Implementation: "AuthRequest",
})
assert.Equal(t, AuthzImplAuthRequest, builder.implementation)
builder.WithEndpointConfig(schema.ServerEndpointsAuthz{
Implementation: "Legacy",
})
assert.Equal(t, AuthzImplLegacy, builder.implementation)
builder.WithEndpointConfig(schema.ServerEndpointsAuthz{
Implementation: "ExtAuthz",
AuthnStrategies: []schema.ServerEndpointsAuthzAuthnStrategy{
{Name: "HeaderProxyAuthorization"},
{Name: "CookieSession"},
},
})
assert.Len(t, builder.strategies, 2)
builder.WithEndpointConfig(schema.ServerEndpointsAuthz{
Implementation: "ExtAuthz",
AuthnStrategies: []schema.ServerEndpointsAuthzAuthnStrategy{
{Name: "HeaderAuthorization"},
{Name: "HeaderProxyAuthorization"},
{Name: "HeaderAuthRequestProxyAuthorization"},
{Name: "HeaderLegacy"},
{Name: "CookieSession"},
},
})
assert.Len(t, builder.strategies, 5)
}
|
package cookie
import (
"net/http"
"net/http/cookiejar"
"net/http/httptest"
"net/url"
"testing"
)
type test struct {
A string
B int
}
func TestCookie(t *testing.T) {
testFunc := func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
// set struct
c := Init(r, "test_struct")
c.Value(test{"Hello World", 50})
c.Expire().Hour(1)
c.Crypto().Sign()
c.Crypto().Encrypt()
c.Save(w)
// set string
c = Init(r, "test_string")
c.Value("Hello World")
c.Expire().Hour(1)
c.Save(w)
// set int64
c = Init(r, "test_int64")
c.Value(int64(-69))
c.Expire().Hour(1)
c.Save(w)
// set uint64
c = Init(r, "test_uint64")
c.Value(uint64(69))
c.Expire().Hour(1)
c.Save(w)
// set float64
c = Init(r, "test_float64")
c.Value(float64(69.69))
c.Expire().Hour(1)
c.Save(w)
// set bool
c = Init(r, "test_bool")
c.Value(true)
c.Expire().Hour(1)
c.Save(w)
case "POST":
// test struct
c := Init(r, "test_struct")
if !c.Exist() {
t.Error("'test_struct' should exist!")
return
}
c.Crypto().Decrypt()
if !c.Crypto().Check() {
t.Error("Signature check failed.")
return
}
s := test{}
c.Scan(&s)
if s.A != "Hello World" && s.B != 50 {
t.Error("Scanner Failed!")
return
}
// test string
c = Init(r, "test_string")
if !c.Exist() {
t.Error("'test_string' should exist!")
return
}
var str string
c.Scan(&str)
if str != "Hello World" {
t.Error("Scanner Failed!")
return
}
// test int64
c = Init(r, "test_int64")
if !c.Exist() {
t.Error("'test_int64' should exist!")
return
}
var i64 int64
c.Scan(&i64)
if i64 != -69 {
t.Error("Scanner Failed!")
return
}
// test uint64
c = Init(r, "test_uint64")
if !c.Exist() {
t.Error("'test_uint64' should exist!")
return
}
var u64 uint64
c.Scan(&u64)
if u64 != 69 {
t.Error("Scanner Failed!")
return
}
// test float64
c = Init(r, "test_float64")
if !c.Exist() {
t.Error("'test_float64' should exist!")
return
}
var f64 float64
c.Scan(&f64)
if f64 != 69.69 {
t.Error("Scanner Failed!")
return
}
// test bool
c = Init(r, "test_bool")
if !c.Exist() {
t.Error("'test_bool' should exist!")
return
}
b := false
c.Scan(&b)
if b == false {
t.Error("Scanner Failed!")
return
}
}
}
ts := httptest.NewServer(http.HandlerFunc(testFunc))
jar, _ := cookiejar.New(nil)
client := &http.Client{}
client.Jar = jar
value := url.Values{}
client.Get(ts.URL)
client.PostForm(ts.URL, value)
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package beta
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
)
func (r *MetricsScope) validate() error {
if err := dcl.Required(r, "name"); err != nil {
return err
}
return nil
}
func (r *MetricsScopeMonitoredProjects) validate() error {
return nil
}
func (r *MetricsScope) basePath() string {
params := map[string]interface{}{}
return dcl.Nprintf("https://monitoring.googleapis.com/v1/", params)
}
func (r *MetricsScope) getURL(userBasePath string) (string, error) {
nr := r.urlNormalized()
params := map[string]interface{}{
"name": dcl.ValueOrEmptyString(nr.Name),
}
return dcl.URL("locations/global/metricsScopes/{{name}}", nr.basePath(), userBasePath, params), nil
}
// metricsScopeApiOperation represents a mutable operation in the underlying REST
// API such as Create, Update, or Delete.
type metricsScopeApiOperation interface {
do(context.Context, *MetricsScope, *Client) error
}
// Create operations are similar to Update operations, although they do not have
// specific request objects. The Create request object is the json encoding of
// the resource, which is modified by res.marshal to form the base request body.
type createMetricsScopeOperation struct {
response map[string]interface{}
}
func (op *createMetricsScopeOperation) FirstResponse() (map[string]interface{}, bool) {
return op.response, len(op.response) > 0
}
func (c *Client) getMetricsScopeRaw(ctx context.Context, r *MetricsScope) ([]byte, error) {
u, err := r.getURL(c.Config.BasePath)
if err != nil {
return nil, err
}
resp, err := dcl.SendRequest(ctx, c.Config, "GET", u, &bytes.Buffer{}, c.Config.RetryProvider)
if err != nil {
return nil, err
}
defer resp.Response.Body.Close()
b, err := ioutil.ReadAll(resp.Response.Body)
if err != nil {
return nil, err
}
return b, nil
}
func (c *Client) metricsScopeDiffsForRawDesired(ctx context.Context, rawDesired *MetricsScope, opts ...dcl.ApplyOption) (initial, desired *MetricsScope, diffs []*dcl.FieldDiff, err error) {
c.Config.Logger.InfoWithContext(ctx, "Fetching initial state...")
// First, let us see if the user provided a state hint. If they did, we will start fetching based on that.
var fetchState *MetricsScope
if sh := dcl.FetchStateHint(opts); sh != nil {
if r, ok := sh.(*MetricsScope); !ok {
c.Config.Logger.WarningWithContextf(ctx, "Initial state hint was of the wrong type; expected MetricsScope, got %T", sh)
} else {
fetchState = r
}
}
if fetchState == nil {
fetchState = rawDesired
}
// 1.2: Retrieval of raw initial state from API
rawInitial, err := c.GetMetricsScope(ctx, fetchState)
if rawInitial == nil {
if !dcl.IsNotFound(err) {
c.Config.Logger.WarningWithContextf(ctx, "Failed to retrieve whether a MetricsScope resource already exists: %s", err)
return nil, nil, nil, fmt.Errorf("failed to retrieve MetricsScope resource: %v", err)
}
c.Config.Logger.InfoWithContext(ctx, "Found that MetricsScope resource did not exist.")
// Perform canonicalization to pick up defaults.
desired, err = canonicalizeMetricsScopeDesiredState(rawDesired, rawInitial)
return nil, desired, nil, err
}
c.Config.Logger.InfoWithContextf(ctx, "Found initial state for MetricsScope: %v", rawInitial)
c.Config.Logger.InfoWithContextf(ctx, "Initial desired state for MetricsScope: %v", rawDesired)
// The Get call applies postReadExtract and so the result may contain fields that are not part of API version.
if err := extractMetricsScopeFields(rawInitial); err != nil {
return nil, nil, nil, err
}
// 1.3: Canonicalize raw initial state into initial state.
initial, err = canonicalizeMetricsScopeInitialState(rawInitial, rawDesired)
if err != nil {
return nil, nil, nil, err
}
c.Config.Logger.InfoWithContextf(ctx, "Canonicalized initial state for MetricsScope: %v", initial)
// 1.4: Canonicalize raw desired state into desired state.
desired, err = canonicalizeMetricsScopeDesiredState(rawDesired, rawInitial, opts...)
if err != nil {
return nil, nil, nil, err
}
c.Config.Logger.InfoWithContextf(ctx, "Canonicalized desired state for MetricsScope: %v", desired)
// 2.1: Comparison of initial and desired state.
diffs, err = diffMetricsScope(c, desired, initial, opts...)
return initial, desired, diffs, err
}
func canonicalizeMetricsScopeInitialState(rawInitial, rawDesired *MetricsScope) (*MetricsScope, error) {
// TODO(magic-modules-eng): write canonicalizer once relevant traits are added.
return rawInitial, nil
}
/*
* Canonicalizers
*
* These are responsible for converting either a user-specified config or a
* GCP API response to a standard format that can be used for difference checking.
* */
func canonicalizeMetricsScopeDesiredState(rawDesired, rawInitial *MetricsScope, opts ...dcl.ApplyOption) (*MetricsScope, error) {
if rawInitial == nil {
// Since the initial state is empty, the desired state is all we have.
// We canonicalize the remaining nested objects with nil to pick up defaults.
return rawDesired, nil
}
canonicalDesired := &MetricsScope{}
if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawInitial.Name) {
canonicalDesired.Name = rawInitial.Name
} else {
canonicalDesired.Name = rawDesired.Name
}
return canonicalDesired, nil
}
func canonicalizeMetricsScopeNewState(c *Client, rawNew, rawDesired *MetricsScope) (*MetricsScope, error) {
if dcl.IsEmptyValueIndirect(rawNew.Name) && dcl.IsEmptyValueIndirect(rawDesired.Name) {
rawNew.Name = rawDesired.Name
} else {
if dcl.PartialSelfLinkToSelfLink(rawDesired.Name, rawNew.Name) {
rawNew.Name = rawDesired.Name
}
}
if dcl.IsEmptyValueIndirect(rawNew.CreateTime) && dcl.IsEmptyValueIndirect(rawDesired.CreateTime) {
rawNew.CreateTime = rawDesired.CreateTime
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.UpdateTime) && dcl.IsEmptyValueIndirect(rawDesired.UpdateTime) {
rawNew.UpdateTime = rawDesired.UpdateTime
} else {
}
if dcl.IsEmptyValueIndirect(rawNew.MonitoredProjects) && dcl.IsEmptyValueIndirect(rawDesired.MonitoredProjects) {
rawNew.MonitoredProjects = rawDesired.MonitoredProjects
} else {
rawNew.MonitoredProjects = canonicalizeNewMetricsScopeMonitoredProjectsSlice(c, rawDesired.MonitoredProjects, rawNew.MonitoredProjects)
}
return rawNew, nil
}
func canonicalizeMetricsScopeMonitoredProjects(des, initial *MetricsScopeMonitoredProjects, opts ...dcl.ApplyOption) *MetricsScopeMonitoredProjects {
if des == nil {
return initial
}
if des.empty {
return des
}
if initial == nil {
return des
}
cDes := &MetricsScopeMonitoredProjects{}
if dcl.StringCanonicalize(des.Name, initial.Name) || dcl.IsZeroValue(des.Name) {
cDes.Name = initial.Name
} else {
cDes.Name = des.Name
}
return cDes
}
func canonicalizeMetricsScopeMonitoredProjectsSlice(des, initial []MetricsScopeMonitoredProjects, opts ...dcl.ApplyOption) []MetricsScopeMonitoredProjects {
if dcl.IsEmptyValueIndirect(des) {
return initial
}
if len(des) != len(initial) {
items := make([]MetricsScopeMonitoredProjects, 0, len(des))
for _, d := range des {
cd := canonicalizeMetricsScopeMonitoredProjects(&d, nil, opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
items := make([]MetricsScopeMonitoredProjects, 0, len(des))
for i, d := range des {
cd := canonicalizeMetricsScopeMonitoredProjects(&d, &initial[i], opts...)
if cd != nil {
items = append(items, *cd)
}
}
return items
}
func canonicalizeNewMetricsScopeMonitoredProjects(c *Client, des, nw *MetricsScopeMonitoredProjects) *MetricsScopeMonitoredProjects {
if des == nil {
return nw
}
if nw == nil {
if dcl.IsEmptyValueIndirect(des) {
c.Config.Logger.Info("Found explicitly empty value for MetricsScopeMonitoredProjects while comparing non-nil desired to nil actual. Returning desired object.")
return des
}
return nil
}
if dcl.StringCanonicalize(des.Name, nw.Name) {
nw.Name = des.Name
}
return nw
}
func canonicalizeNewMetricsScopeMonitoredProjectsSet(c *Client, des, nw []MetricsScopeMonitoredProjects) []MetricsScopeMonitoredProjects {
if des == nil {
return nw
}
// Find the elements in des that are also in nw and canonicalize them. Remove matched elements from nw.
var items []MetricsScopeMonitoredProjects
for _, d := range des {
matchedIndex := -1
for i, n := range nw {
if diffs, _ := compareMetricsScopeMonitoredProjectsNewStyle(&d, &n, dcl.FieldName{}); len(diffs) == 0 {
matchedIndex = i
break
}
}
if matchedIndex != -1 {
items = append(items, *canonicalizeNewMetricsScopeMonitoredProjects(c, &d, &nw[matchedIndex]))
nw = append(nw[:matchedIndex], nw[matchedIndex+1:]...)
}
}
// Also include elements in nw that are not matched in des.
items = append(items, nw...)
return items
}
func canonicalizeNewMetricsScopeMonitoredProjectsSlice(c *Client, des, nw []MetricsScopeMonitoredProjects) []MetricsScopeMonitoredProjects {
if des == nil {
return nw
}
// Lengths are unequal. A diff will occur later, so we shouldn't canonicalize.
// Return the original array.
if len(des) != len(nw) {
return nw
}
var items []MetricsScopeMonitoredProjects
for i, d := range des {
n := nw[i]
items = append(items, *canonicalizeNewMetricsScopeMonitoredProjects(c, &d, &n))
}
return items
}
// The differ returns a list of diffs, along with a list of operations that should be taken
// to remedy them. Right now, it does not attempt to consolidate operations - if several
// fields can be fixed with a patch update, it will perform the patch several times.
// Diffs on some fields will be ignored if the `desired` state has an empty (nil)
// value. This empty value indicates that the user does not care about the state for
// the field. Empty fields on the actual object will cause diffs.
// TODO(magic-modules-eng): for efficiency in some resources, add batching.
func diffMetricsScope(c *Client, desired, actual *MetricsScope, opts ...dcl.ApplyOption) ([]*dcl.FieldDiff, error) {
if desired == nil || actual == nil {
return nil, fmt.Errorf("nil resource passed to diff - always a programming error: %#v, %#v", desired, actual)
}
c.Config.Logger.Infof("Diff function called with desired state: %v", desired)
c.Config.Logger.Infof("Diff function called with actual state: %v", actual)
var fn dcl.FieldName
var newDiffs []*dcl.FieldDiff
// New style diffs.
if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.UpdateTime, actual.UpdateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("UpdateTime")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if ds, err := dcl.Diff(desired.MonitoredProjects, actual.MonitoredProjects, dcl.DiffInfo{OutputOnly: true, ObjectFunction: compareMetricsScopeMonitoredProjectsNewStyle, EmptyObject: EmptyMetricsScopeMonitoredProjects, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("MonitoredProjects")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
newDiffs = append(newDiffs, ds...)
}
if len(newDiffs) > 0 {
c.Config.Logger.Infof("Diff function found diffs: %v", newDiffs)
}
return newDiffs, nil
}
func compareMetricsScopeMonitoredProjectsNewStyle(d, a interface{}, fn dcl.FieldName) ([]*dcl.FieldDiff, error) {
var diffs []*dcl.FieldDiff
desired, ok := d.(*MetricsScopeMonitoredProjects)
if !ok {
desiredNotPointer, ok := d.(MetricsScopeMonitoredProjects)
if !ok {
return nil, fmt.Errorf("obj %v is not a MetricsScopeMonitoredProjects or *MetricsScopeMonitoredProjects", d)
}
desired = &desiredNotPointer
}
actual, ok := a.(*MetricsScopeMonitoredProjects)
if !ok {
actualNotPointer, ok := a.(MetricsScopeMonitoredProjects)
if !ok {
return nil, fmt.Errorf("obj %v is not a MetricsScopeMonitoredProjects", a)
}
actual = &actualNotPointer
}
if ds, err := dcl.Diff(desired.Name, actual.Name, dcl.DiffInfo{OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("Name")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
if ds, err := dcl.Diff(desired.CreateTime, actual.CreateTime, dcl.DiffInfo{OutputOnly: true, OperationSelector: dcl.RequiresRecreate()}, fn.AddNest("CreateTime")); len(ds) != 0 || err != nil {
if err != nil {
return nil, err
}
diffs = append(diffs, ds...)
}
return diffs, nil
}
// urlNormalized returns a copy of the resource struct with values normalized
// for URL substitutions. For instance, it converts long-form self-links to
// short-form so they can be substituted in.
func (r *MetricsScope) urlNormalized() *MetricsScope {
normalized := dcl.Copy(*r).(MetricsScope)
normalized.Name = dcl.SelfLinkToName(r.Name)
return &normalized
}
func (r *MetricsScope) updateURL(userBasePath, updateName string) (string, error) {
return "", fmt.Errorf("unknown update name: %s", updateName)
}
// marshal encodes the MetricsScope resource into JSON for a Create request, and
// performs transformations from the resource schema to the API schema if
// necessary.
func (r *MetricsScope) marshal(c *Client) ([]byte, error) {
m, err := expandMetricsScope(c, r)
if err != nil {
return nil, fmt.Errorf("error marshalling MetricsScope: %w", err)
}
return json.Marshal(m)
}
// unmarshalMetricsScope decodes JSON responses into the MetricsScope resource schema.
func unmarshalMetricsScope(b []byte, c *Client, res *MetricsScope) (*MetricsScope, error) {
var m map[string]interface{}
if err := json.Unmarshal(b, &m); err != nil {
return nil, err
}
return unmarshalMapMetricsScope(m, c, res)
}
func unmarshalMapMetricsScope(m map[string]interface{}, c *Client, res *MetricsScope) (*MetricsScope, error) {
flattened := flattenMetricsScope(c, m, res)
if flattened == nil {
return nil, fmt.Errorf("attempted to flatten empty json object")
}
return flattened, nil
}
// expandMetricsScope expands MetricsScope into a JSON request object.
func expandMetricsScope(c *Client, f *MetricsScope) (map[string]interface{}, error) {
m := make(map[string]interface{})
res := f
_ = res
if v, err := dcl.ExpandProjectIDsToNumbers(c.Config, f.Name); err != nil {
return nil, fmt.Errorf("error expanding Name into name: %w", err)
} else if !dcl.IsEmptyValueIndirect(v) {
m["name"] = v
}
return m, nil
}
// flattenMetricsScope flattens MetricsScope from a JSON request object into the
// MetricsScope type.
func flattenMetricsScope(c *Client, i interface{}, res *MetricsScope) *MetricsScope {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
if len(m) == 0 {
return nil
}
resultRes := &MetricsScope{}
resultRes.Name = dcl.FlattenProjectNumbersToIDs(c.Config, dcl.FlattenString(m["name"]))
resultRes.CreateTime = dcl.FlattenString(m["createTime"])
resultRes.UpdateTime = dcl.FlattenString(m["updateTime"])
resultRes.MonitoredProjects = flattenMetricsScopeMonitoredProjectsSlice(c, m["monitoredProjects"], res)
return resultRes
}
// expandMetricsScopeMonitoredProjectsMap expands the contents of MetricsScopeMonitoredProjects into a JSON
// request object.
func expandMetricsScopeMonitoredProjectsMap(c *Client, f map[string]MetricsScopeMonitoredProjects, res *MetricsScope) (map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := make(map[string]interface{})
for k, item := range f {
i, err := expandMetricsScopeMonitoredProjects(c, &item, res)
if err != nil {
return nil, err
}
if i != nil {
items[k] = i
}
}
return items, nil
}
// expandMetricsScopeMonitoredProjectsSlice expands the contents of MetricsScopeMonitoredProjects into a JSON
// request object.
func expandMetricsScopeMonitoredProjectsSlice(c *Client, f []MetricsScopeMonitoredProjects, res *MetricsScope) ([]map[string]interface{}, error) {
if f == nil {
return nil, nil
}
items := []map[string]interface{}{}
for _, item := range f {
i, err := expandMetricsScopeMonitoredProjects(c, &item, res)
if err != nil {
return nil, err
}
items = append(items, i)
}
return items, nil
}
// flattenMetricsScopeMonitoredProjectsMap flattens the contents of MetricsScopeMonitoredProjects from a JSON
// response object.
func flattenMetricsScopeMonitoredProjectsMap(c *Client, i interface{}, res *MetricsScope) map[string]MetricsScopeMonitoredProjects {
a, ok := i.(map[string]interface{})
if !ok {
return map[string]MetricsScopeMonitoredProjects{}
}
if len(a) == 0 {
return map[string]MetricsScopeMonitoredProjects{}
}
items := make(map[string]MetricsScopeMonitoredProjects)
for k, item := range a {
items[k] = *flattenMetricsScopeMonitoredProjects(c, item.(map[string]interface{}), res)
}
return items
}
// flattenMetricsScopeMonitoredProjectsSlice flattens the contents of MetricsScopeMonitoredProjects from a JSON
// response object.
func flattenMetricsScopeMonitoredProjectsSlice(c *Client, i interface{}, res *MetricsScope) []MetricsScopeMonitoredProjects {
a, ok := i.([]interface{})
if !ok {
return []MetricsScopeMonitoredProjects{}
}
if len(a) == 0 {
return []MetricsScopeMonitoredProjects{}
}
items := make([]MetricsScopeMonitoredProjects, 0, len(a))
for _, item := range a {
items = append(items, *flattenMetricsScopeMonitoredProjects(c, item.(map[string]interface{}), res))
}
return items
}
// expandMetricsScopeMonitoredProjects expands an instance of MetricsScopeMonitoredProjects into a JSON
// request object.
func expandMetricsScopeMonitoredProjects(c *Client, f *MetricsScopeMonitoredProjects, res *MetricsScope) (map[string]interface{}, error) {
if dcl.IsEmptyValueIndirect(f) {
return nil, nil
}
m := make(map[string]interface{})
if v := f.Name; !dcl.IsEmptyValueIndirect(v) {
m["name"] = v
}
return m, nil
}
// flattenMetricsScopeMonitoredProjects flattens an instance of MetricsScopeMonitoredProjects from a JSON
// response object.
func flattenMetricsScopeMonitoredProjects(c *Client, i interface{}, res *MetricsScope) *MetricsScopeMonitoredProjects {
m, ok := i.(map[string]interface{})
if !ok {
return nil
}
r := &MetricsScopeMonitoredProjects{}
if dcl.IsEmptyValueIndirect(i) {
return EmptyMetricsScopeMonitoredProjects
}
r.Name = dcl.FlattenString(m["name"])
r.CreateTime = dcl.FlattenString(m["createTime"])
return r
}
// This function returns a matcher that checks whether a serialized resource matches this resource
// in its parameters (as defined by the fields in a Get, which definitionally define resource
// identity). This is useful in extracting the element from a List call.
func (r *MetricsScope) matcher(c *Client) func([]byte) bool {
return func(b []byte) bool {
cr, err := unmarshalMetricsScope(b, c, r)
if err != nil {
c.Config.Logger.Warning("failed to unmarshal provided resource in matcher.")
return false
}
nr := r.urlNormalized()
ncr := cr.urlNormalized()
c.Config.Logger.Infof("looking for %v\nin %v", nr, ncr)
if nr.Name == nil && ncr.Name == nil {
c.Config.Logger.Info("Both Name fields null - considering equal.")
} else if nr.Name == nil || ncr.Name == nil {
c.Config.Logger.Info("Only one Name field is null - considering unequal.")
return false
} else if *nr.Name != *ncr.Name {
return false
}
return true
}
}
type metricsScopeDiff struct {
// The diff should include one or the other of RequiresRecreate or UpdateOp.
RequiresRecreate bool
UpdateOp metricsScopeApiOperation
FieldName string // used for error logging
}
func convertFieldDiffsToMetricsScopeDiffs(config *dcl.Config, fds []*dcl.FieldDiff, opts []dcl.ApplyOption) ([]metricsScopeDiff, error) {
opNamesToFieldDiffs := make(map[string][]*dcl.FieldDiff)
// Map each operation name to the field diffs associated with it.
for _, fd := range fds {
for _, ro := range fd.ResultingOperation {
if fieldDiffs, ok := opNamesToFieldDiffs[ro]; ok {
fieldDiffs = append(fieldDiffs, fd)
opNamesToFieldDiffs[ro] = fieldDiffs
} else {
config.Logger.Infof("%s required due to diff: %v", ro, fd)
opNamesToFieldDiffs[ro] = []*dcl.FieldDiff{fd}
}
}
}
var diffs []metricsScopeDiff
// For each operation name, create a metricsScopeDiff which contains the operation.
for opName, fieldDiffs := range opNamesToFieldDiffs {
// Use the first field diff's field name for logging required recreate error.
diff := metricsScopeDiff{FieldName: fieldDiffs[0].FieldName}
if opName == "Recreate" {
diff.RequiresRecreate = true
} else {
apiOp, err := convertOpNameToMetricsScopeApiOperation(opName, fieldDiffs, opts...)
if err != nil {
return diffs, err
}
diff.UpdateOp = apiOp
}
diffs = append(diffs, diff)
}
return diffs, nil
}
func convertOpNameToMetricsScopeApiOperation(opName string, fieldDiffs []*dcl.FieldDiff, opts ...dcl.ApplyOption) (metricsScopeApiOperation, error) {
switch opName {
default:
return nil, fmt.Errorf("no such operation with name: %v", opName)
}
}
func extractMetricsScopeFields(r *MetricsScope) error {
return nil
}
func extractMetricsScopeMonitoredProjectsFields(r *MetricsScope, o *MetricsScopeMonitoredProjects) error {
return nil
}
func postReadExtractMetricsScopeFields(r *MetricsScope) error {
return nil
}
func postReadExtractMetricsScopeMonitoredProjectsFields(r *MetricsScope, o *MetricsScopeMonitoredProjects) error {
return nil
}
|
package mysql
import (
"database/sql"
"github.com/Tanibox/tania-core/src/assets/repository"
"github.com/Tanibox/tania-core/src/assets/storage"
)
type ReservoirReadRepositoryMysql struct {
DB *sql.DB
}
func NewReservoirReadRepositoryMysql(db *sql.DB) repository.ReservoirReadRepository {
return &ReservoirReadRepositoryMysql{DB: db}
}
func (f *ReservoirReadRepositoryMysql) Save(reservoirRead *storage.ReservoirRead) <-chan error {
result := make(chan error)
go func() {
count := 0
err := f.DB.QueryRow(`SELECT COUNT(*) FROM RESERVOIR_READ WHERE UID = ?`, reservoirRead.UID.Bytes()).Scan(&count)
if err != nil {
result <- err
}
if count > 0 {
_, err = f.DB.Exec(`UPDATE RESERVOIR_READ SET
NAME = ?, WATERSOURCE_TYPE = ?, WATERSOURCE_CAPACITY = ?, FARM_UID = ?,
FARM_NAME = ?, CREATED_DATE = ?
WHERE UID = ?`,
reservoirRead.Name,
reservoirRead.WaterSource.Type,
reservoirRead.WaterSource.Capacity,
reservoirRead.Farm.UID.Bytes(),
reservoirRead.Farm.Name,
reservoirRead.CreatedDate,
reservoirRead.UID.Bytes())
if err != nil {
result <- err
}
if len(reservoirRead.Notes) > 0 {
// Just delete them all then insert them all again.
// We can refactor it later.
_, err := f.DB.Exec(`DELETE FROM RESERVOIR_READ_NOTES WHERE RESERVOIR_UID = ?`, reservoirRead.UID.Bytes())
if err != nil {
result <- err
}
for _, v := range reservoirRead.Notes {
_, err := f.DB.Exec(`INSERT INTO RESERVOIR_READ_NOTES (UID, RESERVOIR_UID, CONTENT, CREATED_DATE)
VALUES (?, ?, ?, ?)`, v.UID.Bytes(), reservoirRead.UID.Bytes(), v.Content, v.CreatedDate)
if err != nil {
result <- err
}
}
}
} else {
_, err = f.DB.Exec(`INSERT INTO RESERVOIR_READ
(UID, NAME, WATERSOURCE_TYPE, WATERSOURCE_CAPACITY, FARM_UID, FARM_NAME, CREATED_DATE)
VALUES (?, ?, ?, ?, ?, ?, ?)`,
reservoirRead.UID.Bytes(),
reservoirRead.Name,
reservoirRead.WaterSource.Type,
reservoirRead.WaterSource.Capacity,
reservoirRead.Farm.UID.Bytes(),
reservoirRead.Farm.Name,
reservoirRead.CreatedDate)
if err != nil {
result <- err
}
}
result <- nil
close(result)
}()
return result
}
|
package main
import (
"bufio"
"fmt"
"github.com/kr/pty"
"io"
"log"
"os"
"os/exec"
"regexp"
"strings"
"time"
)
type Deployer struct {
config *PluginConfig
runningJob *DeployJob
}
type DeployJob struct {
process *os.Process
params *DeployParams
quit chan bool
kill chan bool
killing bool
}
func NewDeployer(bot *Hipbot) *Deployer {
dep := new(Deployer)
dep.config = &PluginConfig{
EchoMessages: false,
OnlyMentions: true,
}
return dep
}
func (dep *Deployer) Config() *PluginConfig {
return dep.config
}
/**
* Examples:
* deploy to stage, branch boo, tags boom, reload-streambed
* deploy to stage the branch santa-claus with tags boom, reload-streambed
* deploy on prod, branch boo with tags: ahuh, mama, papa
* deploy to stage the branch master
* deploy prod branch boo // shortest form
* or second regexp:
* deploy branch boo to stage
* deploy santa-claus to stage with tags: kaboom
*/
var deployFormat = regexp.MustCompile(`deploy( ([a-zA-Z0-9_\.-]+))? to ([a-z_-]+)((,| with)? tags?:? ?(.+))?`)
func (dep *Deployer) Handle(bot *Hipbot, msg *BotMessage) {
if match := deployFormat.FindStringSubmatch(msg.Body); match != nil {
if dep.runningJob != nil {
params := dep.runningJob.params
bot.Reply(msg, fmt.Sprintf("Deploy currently running, initated by %s: env=%s branch=%s tags=%s", params.initiatedBy, params.environment, params.branch, params.Tags()))
return
} else {
params := &DeployParams{environment: match[3], branch: match[2], tags: match[6], initiatedBy: msg.FromNick()}
dep.handleDeploy(bot, msg, params)
}
return
}
if msg.Contains("cancel deploy") {
if dep.runningJob == nil {
bot.Reply(msg, "No deploy running, sorry man..")
} else {
if dep.runningJob.killing == true {
bot.Reply(msg, "deploy: Interrupt signal already sent, waiting to die")
return
} else {
bot.Reply(msg, "deploy: Sending Interrupt signal...")
dep.runningJob.killing = true
dep.runningJob.kill <- true
}
}
return
}
}
type DeployParams struct {
environment string
branch string
tags string
initiatedBy string
}
func (p *DeployParams) Tags() string {
return strings.Replace(p.tags, " ", "", -1)
}
func (dep *Deployer) handleDeploy(bot *Hipbot, msg *BotMessage, params *DeployParams) {
bot.Reply(msg, fmt.Sprintf("[process] Running deploy env=%s, branch=%s, tags=%s", params.environment, params.branch, params.Tags()))
cmdArgs := []string{"ansible-playbook", "-i", "hosts_vagrant", "playbook_vagrant.yml"}
tags := params.Tags()
if tags != "" {
cmdArgs = append(cmdArgs, "--tags", tags)
}
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
cmd.Dir = "/home/abourget/plotly/deploy"
cmd.Env = append(os.Environ(), "ANSIBLE_NOCOLOR=1")
pty, err := pty.Start(cmd)
if err != nil {
log.Fatal(err)
}
dep.runningJob = &DeployJob{process: cmd.Process, params: params, quit: make(chan bool, 2), kill: make(chan bool, 2)}
go manageDeployIo(bot, msg, pty)
go dep.manageKillProcess(pty)
if err := cmd.Wait(); err != nil {
bot.Reply(msg, fmt.Sprintf("[process] terminated: %s", err))
} else {
bot.Reply(msg, "[process] terminated without error")
}
dep.runningJob.quit <- true
dep.runningJob = nil
}
func (dep *Deployer) manageKillProcess(pty *os.File) {
runningJob := dep.runningJob
select {
case <-runningJob.quit:
return
case <-runningJob.kill:
dep.runningJob.process.Signal(os.Interrupt)
time.Sleep(3 * time.Second)
if dep.runningJob != nil {
dep.runningJob.process.Kill()
}
}
}
func manageDeployIo(bot *Hipbot, msg *BotMessage, reader io.Reader) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
bot.Reply(msg, fmt.Sprintf("%s", scanner.Text()))
}
}
|
package sessions
import (
"encoding/json"
"errors"
"time"
"github.com/go-jose/go-jose/v3/jwt"
"github.com/google/uuid"
)
// ErrMissingID is the error for a session state that has no ID set.
var ErrMissingID = errors.New("invalid session: missing id")
// timeNow is time.Now but pulled out as a variable for tests.
var timeNow = time.Now
// State is our object that keeps track of a user's session state
type State struct {
// Public claim values (as specified in RFC 7519).
Issuer string `json:"iss,omitempty"`
Subject string `json:"sub,omitempty"`
Audience jwt.Audience `json:"aud,omitempty"`
IssuedAt *jwt.NumericDate `json:"iat,omitempty"`
ID string `json:"jti,omitempty"`
// Azure returns OID which should be used instead of subject.
OID string `json:"oid,omitempty"`
// DatabrokerServerVersion tracks the last referenced databroker server version
// for the saved session.
DatabrokerServerVersion uint64 `json:"databroker_server_version,omitempty"`
// DatabrokerRecordVersion tracks the last referenced databroker record version
// for the saved session.
DatabrokerRecordVersion uint64 `json:"databroker_record_version,omitempty"`
// IdentityProviderID is the identity provider for the session.
IdentityProviderID string `json:"idp_id,omitempty"`
}
// NewState creates a new State.
func NewState(idpID string) *State {
return &State{
IssuedAt: jwt.NewNumericDate(timeNow()),
ID: uuid.NewString(),
IdentityProviderID: idpID,
}
}
// WithNewIssuer creates a new State from an existing State.
func (s *State) WithNewIssuer(issuer string, audience []string) State {
newState := State{}
if s != nil {
newState = *s
}
newState.IssuedAt = jwt.NewNumericDate(timeNow())
newState.Audience = audience
newState.Issuer = issuer
return newState
}
// UserID returns the corresponding user ID for a session.
func (s *State) UserID() string {
if s.OID != "" {
return s.OID
}
return s.Subject
}
// UnmarshalJSON returns a State struct from JSON. Additionally munges
// a user's session by using by setting `user` claim to `sub` if empty.
func (s *State) UnmarshalJSON(data []byte) error {
type StateAlias State
a := &struct {
*StateAlias
}{
StateAlias: (*StateAlias)(s),
}
if err := json.Unmarshal(data, &a); err != nil {
return err
}
if s.ID == "" {
return ErrMissingID
}
return nil
}
|
package drop
import (
"fmt"
"log"
"time"
"github.com/boltdb/bolt"
"github.com/fxnn/deadbox/config"
"github.com/fxnn/deadbox/daemon"
"github.com/fxnn/deadbox/model"
"github.com/fxnn/deadbox/rest"
)
type Daemonized interface {
model.Drop
daemon.Daemon
}
// facade contains the implementation of model.Drop.
// As a facade, it redirects the method calls to the actual implementing
// structs.
type facade struct {
daemon.Daemon
name string
listenAddress string
tls rest.TLS
*workers
*requests
*responses
}
func New(c *config.Drop, db *bolt.DB, tls rest.TLS) Daemonized {
f := &facade{
name: c.Name,
listenAddress: c.ListenAddress,
tls: tls,
workers: &workers{db, time.Duration(c.MaxWorkerTimeoutInSeconds) * time.Second},
requests: &requests{db, time.Duration(c.MaxRequestTimeoutInSeconds) * time.Second},
responses: &responses{db, time.Duration(c.MaxRequestTimeoutInSeconds) * time.Second},
}
f.Daemon = daemon.New(f.main)
return f
}
func (f *facade) main(stop <-chan struct{}) error {
server := rest.NewServer(f.listenAddress, f.tls, f)
if err := server.StartServing(); err != nil {
return fmt.Errorf("drop %s on %s could not be started: %s", f.quotedName(), f.listenAddress, err)
}
// @todo #10 secure drop against DoS and bruteforce attacks
log.Println("drop", f.quotedName(), "on", f.listenAddress, "is now listening")
for {
select {
case <-stop:
log.Println("drop", f.quotedName(), "on", f.listenAddress, "shutting down")
return server.Close()
}
}
}
func (f *facade) quotedName() string {
return "'" + f.name + "'"
}
|
package cmds
import (
"os"
"testing"
"github.com/BaritoLog/go-boilerplate/slicekit"
. "github.com/BaritoLog/go-boilerplate/testkit"
log "github.com/sirupsen/logrus"
)
func init() {
log.SetLevel(log.ErrorLevel)
}
func TestGetKafkaBrokers(t *testing.T) {
FatalIf(t, !slicekit.StringSliceEqual(configKafkaBrokers(), DefaultKafkaBrokers), "should return default")
os.Setenv(EnvKafkaBrokers, "some-kafka-01:9092, some-kafka-02:9092")
defer os.Clearenv()
envKafkaBrokers := sliceEnvOrDefault(EnvKafkaBrokers, ",", DefaultKafkaBrokers)
FatalIf(t, !slicekit.StringSliceEqual(configKafkaBrokers(), envKafkaBrokers), "should get from env variable")
}
func TestGetConsulElastisearchName(t *testing.T) {
FatalIf(t, configConsulElasticsearchName() != DefaultConsulElasticsearchName, "should return default ")
os.Setenv(EnvConsulElasticsearchName, "elastic11")
defer os.Clearenv()
FatalIf(t, configConsulElasticsearchName() != "elastic11", "should get from env variable")
}
func TestGetElasticsearchUrls(t *testing.T) {
FatalIf(t, !slicekit.StringSliceEqual(configElasticsearchUrls(), DefaultElasticsearchUrls), "should return default")
os.Setenv(EnvElasticsearchUrls, "http://some-elasticsearch-01:9200, http://some-elasticsearch-02:9200")
defer os.Clearenv()
envEsUrls := sliceEnvOrDefault(EnvElasticsearchUrls, ",", DefaultElasticsearchUrls)
FatalIf(t, !slicekit.StringSliceEqual(configElasticsearchUrls(), envEsUrls), "should get from env variable")
}
func TestGetKafkaGroupID(t *testing.T) {
FatalIf(t, configKafkaGroupId() != DefaultKafkaGroupID, "should return default ")
os.Setenv(EnvKafkaGroupID, "some-group-id")
defer os.Clearenv()
FatalIf(t, configKafkaGroupId() != "some-group-id", "should get from env variable")
}
func TestGetPushMetricUrl(t *testing.T) {
FatalIf(t, configPushMetricUrl() != DefaultPushMetricUrl, "should return default ")
os.Setenv(EnvPushMetricUrl, "http://some-push-metric")
defer os.Clearenv()
FatalIf(t, configPushMetricUrl() != "http://some-push-metric", "should get from env variable")
}
func TestGetPushMetricInterval(t *testing.T) {
FatalIf(t, configPushMetricInterval() != DefaultPushMetricInterval, "should return default ")
os.Setenv(EnvPushMetricInterval, "22s")
defer os.Clearenv()
FatalIf(t, configPushMetricInterval() != "22s", "should get from env variable")
}
func TestGetProducerAddressGrpc(t *testing.T) {
FatalIf(t, configProducerAddressGrpc() != DefaultProducerAddressGrpc, "should return default ")
os.Setenv(EnvProducerAddressGrpc, ":12345")
defer os.Clearenv()
FatalIf(t, configProducerAddressGrpc() != ":12345", "should get from env variable")
}
func TestGetProducerAddressRest(t *testing.T) {
FatalIf(t, configProducerAddressRest() != DefaultProducerAddressRest, "should return default ")
os.Setenv(EnvProducerAddressRest, ":12345")
defer os.Clearenv()
FatalIf(t, configProducerAddressRest() != ":12345", "should get from env variable")
}
func TestGetProducerMaxRetry(t *testing.T) {
FatalIf(t, configProducerMaxRetry() != DefaultProducerMaxRetry, "should return default ")
os.Setenv(EnvProducerMaxRetry, "989")
defer os.Clearenv()
FatalIf(t, configProducerMaxRetry() != 989, "should get from env variable")
}
func TestGetProducerMaxTPS(t *testing.T) {
FatalIf(t, configProducerMaxTPS() != DefaultProducerMaxTPS, "should return default ")
os.Setenv(EnvProducerMaxTPS, "222")
defer os.Clearenv()
FatalIf(t, configProducerMaxTPS() != 222, "should get from env variable")
}
func TestConfigConsulKafkaName(t *testing.T) {
FatalIf(t, configConsulKafkaName() != DefaultConsulKafkaName, "should return default ")
os.Setenv(EnvConsulKafkaName, "some-kafka-name")
defer os.Clearenv()
FatalIf(t, configConsulKafkaName() != "some-kafka-name", "should get from env variable")
}
func TestConfigKafkaTopicSuffix(t *testing.T) {
FatalIf(t, configKafkaTopicSuffix() != DefaultKafkaTopicSuffix, "should return default ")
os.Setenv(EnvKafkaTopicSuffix, "some-topic-suffix")
defer os.Clearenv()
FatalIf(t, configKafkaTopicSuffix() != "some-topic-suffix", "should get from env variable")
}
func TestConfigNewTopicEventName(t *testing.T) {
FatalIf(t, configNewTopicEvent() != DefaultNewTopicEventName, "should return default")
os.Setenv(EnvNewTopicEventName, "some-new-topic-event")
defer os.Clearenv()
FatalIf(t, configNewTopicEvent() != "some-new-topic-event", "should get from env variable")
}
func TestGetConsumerElasticsearchRetrierInterval(t *testing.T) {
FatalIf(t, configElasticsearchRetrierInterval() != DefaultElasticsearchRetrierInterval, "should return default ")
os.Setenv(EnvConsumerElasticsearchRetrierInterval, "30s")
defer os.Clearenv()
FatalIf(t, configElasticsearchRetrierInterval() != "30s", "should get from env variable")
}
func TestConfigConsumerGroupSessionTimeout(t *testing.T) {
FatalIf(t, configConsumerGroupSessionTimeout() != DefaultConsumerGroupSessionTimeout, "should return default ")
os.Setenv(EnvConsumerGroupSessionTimeout, "100")
defer os.Clearenv()
FatalIf(t, configConsumerGroupSessionTimeout() != 100, "should get from env variable")
}
func TestConfigConsumerGroupHeartbeatInterval(t *testing.T) {
FatalIf(t, configConsumerGroupHeartbeatInterval() != DefaultConsumerGroupHeartbeatInterval, "should return default ")
os.Setenv(EnvConsumerGroupHeartbeatInterval, "30")
defer os.Clearenv()
FatalIf(t, configConsumerGroupHeartbeatInterval() != 30, "should get from env variable")
}
func TestConfigConsumerMaxProcessingTime(t *testing.T) {
FatalIf(t, configConsumerMaxProcessingTime() != DefaultConsumerMaxProcessingTime, "should return default ")
os.Setenv(EnvConsumerMaxProcessingTime, "600")
defer os.Clearenv()
FatalIf(t, configConsumerMaxProcessingTime() != 600, "should get from env variable")
}
|
package key
import "github.com/giantswarm/microerror"
var wrongTypeError = µerror.Error{
Kind: "wrongTypeError",
}
// IsWrongTypeError asserts wrongTypeError.
func IsWrongTypeError(err error) bool {
return microerror.Cause(err) == wrongTypeError
}
var malformedCloudConfigKeyError = µerror.Error{
Kind: "malformedCloudConfigKeyError",
}
// IsMalformedCloudConfigKey asserts malformedCloudConfigKeyError.
func IsMalformedCloudConfigKey(err error) bool {
return microerror.Cause(err) == malformedCloudConfigKeyError
}
var missingCloudConfigKeyError = µerror.Error{
Kind: "missingCloudConfigKeyError",
}
// IsMissingCloudConfigKey asserts missingCloudConfigKeyError.
func IsMissingCloudConfigKey(err error) bool {
return microerror.Cause(err) == missingCloudConfigKeyError
}
var notFoundError = µerror.Error{
Kind: "notFoundError",
}
// IsNotFound asserts notFoundError.
func IsNotFound(err error) bool {
return microerror.Cause(err) == notFoundError
}
var invalidConfigError = µerror.Error{
Kind: "invalidConfigError",
}
// IsInvalidConfig asserts invalidConfigError.
func IsInvalidConfig(err error) bool {
return microerror.Cause(err) == invalidConfigError
}
|
package main
import (
"fmt"
"time"
)
func increment(ch chan bool, x *int) {
ch <- true
*x++
<-ch
}
func main() {
pipline := make(chan bool, 1)
num := 0
for i := 0; i < 1000; i++ {
go increment(pipline, &num)
}
time.Sleep(time.Second)
fmt.Println("num 的值 : ", num)
}
|
// [_命令行参数_](http://en.wikipedia.org/wiki/Command-line_interface#Arguments)
// 是指定程序运行参数的一个常见方式。例如,`go run hello.go`,
// 程序 `go` 使用了 `run` 和 `hello.go` 两个参数。
package main
import "os"
import "fmt"
func main() {
// `os.Args` 提供原始命令行参数访问功能。注意,切片中
// 的第一个参数是该程序的路径,并且 `os.Args[1:]`保存
// 所有程序的的参数。
argsWithProg := os.Args
argsWithoutProg := os.Args[1:]
// 你可以使用标准的索引位置方式取得单个参数的值。
arg := os.Args[3]
fmt.Println(argsWithProg)
fmt.Println(argsWithoutProg)
fmt.Println(arg)
}
|
package dao
import "github.com/stretchr/testify/mock"
import _ "github.com/lib/pq"
type MockSession struct {
mock.Mock
}
func (_m *MockSession) LoadAgent(id string) (*Agent, error) {
ret := _m.Called(id)
var r0 *Agent
if rf, ok := ret.Get(0).(func(string) *Agent); ok {
r0 = rf(id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*Agent)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(id)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *MockSession) LoadLoadBalancer(id string) (*LoadBalancer, error) {
ret := _m.Called(id)
var r0 *LoadBalancer
if rf, ok := ret.Get(0).(func(string) *LoadBalancer); ok {
r0 = rf(id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*LoadBalancer)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(id)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *MockSession) LoadLoadBalancers() ([]LoadBalancer, error) {
ret := _m.Called()
var r0 []LoadBalancer
if rf, ok := ret.Get(0).(func() []LoadBalancer); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]LoadBalancer)
}
}
var r1 error
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *MockSession) LoadBalancerAgents(id string) ([]Agent, error) {
ret := _m.Called(id)
var r0 []Agent
if rf, ok := ret.Get(0).(func(string) []Agent); ok {
r0 = rf(id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]Agent)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(id)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *MockSession) NewAgent() *Agent {
ret := _m.Called()
var r0 *Agent
if rf, ok := ret.Get(0).(func() *Agent); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*Agent)
}
}
return r0
}
func (_m *MockSession) NewLoadBalancer() *LoadBalancer {
ret := _m.Called()
var r0 *LoadBalancer
if rf, ok := ret.Get(0).(func() *LoadBalancer); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*LoadBalancer)
}
}
return r0
}
func (_m *MockSession) FindUser(id string) (*User, error) {
ret := _m.Called(id)
var r0 *User
if rf, ok := ret.Get(0).(func(string) *User); ok {
r0 = rf(id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*User)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(id)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
func (_m *MockSession) NewUser() *User {
ret := _m.Called()
var r0 *User
if rf, ok := ret.Get(0).(func() *User); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*User)
}
}
return r0
}
func (_m *MockSession) SaveLoadBalancer(_a0 *LoadBalancer) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func(*LoadBalancer) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}
func (_m *MockSession) SaveAgent(_a0 *Agent) error {
ret := _m.Called(_a0)
var r0 error
if rf, ok := ret.Get(0).(func(*Agent) error); ok {
r0 = rf(_a0)
} else {
r0 = ret.Error(0)
}
return r0
}
|
package main
import (
"fmt"
"./recentservers"
"./servers"
)
func main() {
servers, err := servers.LoadServers("config/servers.json")
if err != nil {
panic(err)
}
err = recentservers.SetConfigFile("config/recent_servers.json")
if err != nil {
panic(err)
}
recentServer, err := recentservers.GetRecentServer("Player-name", servers[0].Name)
if err != nil {
panic(err)
}
fmt.Println(recentServer)
}
|
package workflow
import (
"time"
"k8s.io/client-go/rest"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
type ClientConfig struct {
// Host must be a host string, a host:port pair, or a URL to the base of the apiserver.
// If a URL is given then the (optional) Path of that URL represents a prefix that must
// be appended to all request URIs used to access the apiserver. This allows a frontend
// proxy to easily relocate all of the apiserver endpoints.
Host string
// APIPath is a sub-path that points to an API root.
APIPath string
// ContentConfig contains settings that affect how objects are transformed when
// sent to the server.
rest.ContentConfig
// KubeService requires Basic authentication
Username string
Password string
// KubeService requires Bearer authentication. This client will not attempt to use
// refresh tokens for an OAuth2 flow.
// TODO: demonstrate an OAuth2 compatible client.
BearerToken string
// Impersonate is the configuration that RESTClient will use for impersonation.
Impersonate rest.ImpersonationConfig
AuthProvider *clientcmdapi.AuthProviderConfig
// TLSClientConfig contains settings to enable transport layer security
rest.TLSClientConfig
// UserAgent is an optional field that specifies the caller of this request.
UserAgent string
// QPS indicates the maximum QPS to the master from this client.
// If it's zero, the created RESTClient will use DefaultQPS: 5
QPS float32
// Maximum burst for throttle.
// If it's zero, the created RESTClient will use DefaultBurst: 10.
Burst int
// The maximum length of time to wait before giving up on a server request. A value of zero means no timeout.
Timeout time.Duration
}
|
package universal_multizone
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/types"
"github.com/kumahq/kuma/pkg/config/core"
. "github.com/kumahq/kuma/test/e2e/trafficroute/testutil"
. "github.com/kumahq/kuma/test/framework"
)
func KumaMultizone() {
var meshMTLSOn = func(mesh, localityAware string) string {
return fmt.Sprintf(`
type: Mesh
name: %s
mtls:
enabledBackend: ca-1
backends:
- name: ca-1
type: builtin
routing:
localityAwareLoadBalancing: %s
`, mesh, localityAware)
}
const defaultMesh = "default"
var global, zone1, zone2 Cluster
var optsGlobal, optsZone1, optsZone2 = KumaUniversalDeployOpts, KumaUniversalDeployOpts, KumaUniversalDeployOpts
E2EBeforeSuite(func() {
clusters, err := NewUniversalClusters(
[]string{Kuma3, Kuma4, Kuma5},
Verbose)
Expect(err).ToNot(HaveOccurred())
// Global
global = clusters.GetCluster(Kuma5)
err = NewClusterSetup().
Install(Kuma(core.Global, optsGlobal...)).
Install(YamlUniversal(meshMTLSOn(defaultMesh, "false"))).
Setup(global)
Expect(err).ToNot(HaveOccurred())
err = global.VerifyKuma()
Expect(err).ToNot(HaveOccurred())
globalCP := global.GetKuma()
testServerToken, err := globalCP.GenerateDpToken(defaultMesh, "test-server")
Expect(err).ToNot(HaveOccurred())
anotherTestServerToken, err := globalCP.GenerateDpToken(defaultMesh, "another-test-server")
Expect(err).ToNot(HaveOccurred())
demoClientToken, err := globalCP.GenerateDpToken(defaultMesh, "demo-client")
Expect(err).ToNot(HaveOccurred())
// Cluster 1
zone1 = clusters.GetCluster(Kuma3)
optsZone1 = append(optsZone1, WithGlobalAddress(globalCP.GetKDSServerAddress()))
ingressTokenKuma3, err := globalCP.GenerateZoneIngressToken(Kuma3)
Expect(err).ToNot(HaveOccurred())
err = NewClusterSetup().
Install(Kuma(core.Zone, optsZone1...)).
Install(DemoClientUniversal(AppModeDemoClient, defaultMesh, demoClientToken, WithTransparentProxy(true), WithConcurrency(8))).
Install(IngressUniversal(ingressTokenKuma3)).
Install(TestServerUniversal("dp-echo-1", defaultMesh, testServerToken,
WithArgs([]string{"echo", "--instance", "echo-v1"}),
WithServiceVersion("v1"),
)).
Setup(zone1)
Expect(err).ToNot(HaveOccurred())
err = zone1.VerifyKuma()
Expect(err).ToNot(HaveOccurred())
// Cluster 2
zone2 = clusters.GetCluster(Kuma4)
optsZone2 = append(optsZone2, WithGlobalAddress(globalCP.GetKDSServerAddress()))
ingressTokenKuma4, err := globalCP.GenerateZoneIngressToken(Kuma4)
Expect(err).ToNot(HaveOccurred())
err = NewClusterSetup().
Install(Kuma(core.Zone, optsZone2...)).
Install(TestServerUniversal("dp-echo-2", defaultMesh, testServerToken,
WithArgs([]string{"echo", "--instance", "echo-v2"}),
WithServiceVersion("v2"),
)).
Install(TestServerUniversal("dp-echo-3", defaultMesh, testServerToken,
WithArgs([]string{"echo", "--instance", "echo-v3"}),
WithServiceVersion("v3"),
)).
Install(TestServerUniversal("dp-echo-4", defaultMesh, testServerToken,
WithArgs([]string{"echo", "--instance", "echo-v4"}),
WithServiceVersion("v4"),
)).
Install(TestServerUniversal("dp-another-test", defaultMesh, anotherTestServerToken,
WithArgs([]string{"echo", "--instance", "another-test-server"}),
WithServiceName("another-test-server"),
)).
Install(IngressUniversal(ingressTokenKuma4)).
Setup(zone2)
Expect(err).ToNot(HaveOccurred())
err = zone2.VerifyKuma()
Expect(err).ToNot(HaveOccurred())
})
E2EAfterEach(func() {
// remove all TrafficRoutes
items, err := global.GetKumactlOptions().KumactlList("traffic-routes", "default")
Expect(err).ToNot(HaveOccurred())
for _, item := range items {
if item == "route-all-default" {
continue
}
err := global.GetKumactlOptions().KumactlDelete("traffic-route", item, "default")
Expect(err).ToNot(HaveOccurred())
}
// reapply Mesh with localityawareloadbalancing off
YamlUniversal(meshMTLSOn(defaultMesh, "false"))
})
E2EAfterSuite(func() {
Expect(zone1.DeleteKuma(optsZone1...)).To(Succeed())
Expect(zone1.DismissCluster()).To(Succeed())
Expect(zone2.DeleteKuma(optsZone2...)).To(Succeed())
Expect(zone2.DismissCluster()).To(Succeed())
Expect(global.DeleteKuma(optsGlobal...)).To(Succeed())
Expect(global.DismissCluster()).To(Succeed())
})
It("should access all instances of the service", func() {
const trafficRoute = `
type: TrafficRoute
name: three-way-route
mesh: default
sources:
- match:
kuma.io/service: demo-client
destinations:
- match:
kuma.io/service: test-server
conf:
loadBalancer:
roundRobin: {}
split:
- weight: 1
destination:
kuma.io/service: test-server
version: v1
- weight: 1
destination:
kuma.io/service: test-server
version: v2
- weight: 1
destination:
kuma.io/service: test-server
version: v4
`
Expect(YamlUniversal(trafficRoute)(global)).To(Succeed())
Eventually(func() (map[string]int, error) {
return CollectResponsesByInstance(zone1, "demo-client", "test-server.mesh")
}, "30s", "500ms").Should(
And(
HaveLen(3),
HaveKeyWithValue(MatchRegexp(`.*echo-v1.*`), Not(BeNil())),
HaveKeyWithValue(MatchRegexp(`.*echo-v2.*`), Not(BeNil())),
Not(HaveKeyWithValue(MatchRegexp(`.*echo-v3.*`), Not(BeNil()))),
HaveKeyWithValue(MatchRegexp(`.*echo-v4.*`), Not(BeNil())),
),
)
})
It("should route 100 percent of the traffic to the different service", func() {
const trafficRoute = `
type: TrafficRoute
name: route-echo-to-backend
mesh: default
sources:
- match:
kuma.io/service: demo-client
destinations:
- match:
kuma.io/service: test-server
conf:
loadBalancer:
roundRobin: {}
destination:
kuma.io/service: another-test-server
`
Expect(YamlUniversal(trafficRoute)(global)).To(Succeed())
Eventually(func() (map[string]int, error) {
return CollectResponsesByInstance(zone1, "demo-client", "test-server.mesh")
}, "30s", "500ms").Should(
And(
HaveLen(1),
HaveKeyWithValue(Equal(`another-test-server`), Not(BeNil())),
),
)
})
It("should route split traffic between the versions with 20/80 ratio", func() {
v1Weight := 80
v2Weight := 20
trafficRoute := fmt.Sprintf(`
type: TrafficRoute
name: route-20-80-split
mesh: default
sources:
- match:
kuma.io/service: demo-client
destinations:
- match:
kuma.io/service: test-server
conf:
loadBalancer:
roundRobin: {}
split:
- weight: %d
destination:
kuma.io/service: test-server
version: v1
- weight: %d
destination:
kuma.io/service: test-server
version: v2
`, v1Weight, v2Weight)
Expect(YamlUniversal(trafficRoute)(global)).To(Succeed())
Eventually(func() (map[string]int, error) {
return CollectResponsesByInstance(zone1, "demo-client", "test-server.mesh", WithNumberOfRequests(100))
}, "30s", "500ms").Should(
And(
HaveLen(2),
HaveKeyWithValue(MatchRegexp(`.*echo-v1.*`), ApproximatelyEqual(v1Weight, 10)),
HaveKeyWithValue(MatchRegexp(`.*echo-v2.*`), ApproximatelyEqual(v2Weight, 10)),
),
)
})
Context("HTTP routing", func() {
HaveOnlyResponseFrom := func(response string) types.GomegaMatcher {
return And(
HaveLen(1),
HaveKeyWithValue(MatchRegexp(`.*`+response+`.*`), Not(BeNil())),
)
}
It("should route matching by path", func() {
const trafficRoute = `
type: TrafficRoute
name: route-by-path
mesh: default
sources:
- match:
kuma.io/service: demo-client
destinations:
- match:
kuma.io/service: test-server
conf:
http:
- match:
path:
prefix: /version1
destination:
kuma.io/service: test-server
version: v1
- match:
path:
exact: /version2
destination:
kuma.io/service: test-server
version: v2
- match:
path:
regex: "^/version3$"
destination:
kuma.io/service: test-server
version: v3
loadBalancer:
roundRobin: {}
destination:
kuma.io/service: test-server
version: v4
`
Expect(YamlUniversal(trafficRoute)(global)).To(Succeed())
Eventually(func() (map[string]int, error) {
return CollectResponsesByInstance(zone1, "demo-client", "test-server.mesh/version1")
}, "30s", "500ms").Should(HaveOnlyResponseFrom("echo-v1"))
Eventually(func() (map[string]int, error) {
return CollectResponsesByInstance(zone1, "demo-client", "test-server.mesh/version2")
}, "30s", "500ms").Should(HaveOnlyResponseFrom("echo-v2"))
Eventually(func() (map[string]int, error) {
return CollectResponsesByInstance(zone1, "demo-client", "test-server.mesh/version3")
}, "30s", "500ms").Should(HaveOnlyResponseFrom("echo-v3"))
Eventually(func() (map[string]int, error) {
return CollectResponsesByInstance(zone1, "demo-client", "test-server.mesh")
}, "30s", "500ms").Should(HaveOnlyResponseFrom("echo-v4"))
})
It("should same splits with a different weights", func() {
const trafficRoute = `
type: TrafficRoute
name: two-splits
mesh: default
sources:
- match:
kuma.io/service: demo-client
destinations:
- match:
kuma.io/service: test-server
conf:
http:
- match:
path:
prefix: /split
split:
- weight: 50
destination:
kuma.io/service: test-server
version: v1
- weight: 50
destination:
kuma.io/service: test-server
version: v2
loadBalancer:
roundRobin: {}
split:
- weight: 20
destination:
kuma.io/service: test-server
version: v1
- weight: 80
destination:
kuma.io/service: test-server
version: v2
`
Expect(YamlUniversal(trafficRoute)(global)).To(Succeed())
Eventually(func() (map[string]int, error) {
return CollectResponsesByInstance(zone1, "demo-client", "test-server.mesh/split", WithNumberOfRequests(10))
}, "30s", "500ms").Should(
And(
HaveLen(2),
HaveKeyWithValue(MatchRegexp(`.*echo-v1.*`), ApproximatelyEqual(5, 1)),
HaveKeyWithValue(MatchRegexp(`.*echo-v2.*`), ApproximatelyEqual(5, 1)),
),
)
Eventually(func() (map[string]int, error) {
return CollectResponsesByInstance(zone1, "demo-client", "test-server.mesh", WithNumberOfRequests(10))
}, "30s", "500ms").Should(
And(
HaveLen(2),
HaveKeyWithValue(MatchRegexp(`.*echo-v1.*`), ApproximatelyEqual(2, 1)),
HaveKeyWithValue(MatchRegexp(`.*echo-v2.*`), ApproximatelyEqual(8, 1)),
),
)
})
})
Context("locality aware loadbalancing", func() {
It("should loadbalance all requests equally by default", func() {
Eventually(func() (map[string]int, error) {
return CollectResponsesByInstance(zone1, "demo-client", "test-server.mesh/split", WithNumberOfRequests(40))
}, "30s", "500ms").Should(
And(
HaveLen(4),
HaveKeyWithValue(MatchRegexp(`.*echo-v1.*`), Not(BeNil())),
HaveKeyWithValue(MatchRegexp(`.*echo-v2.*`), Not(BeNil())),
HaveKeyWithValue(MatchRegexp(`.*echo-v3.*`), Not(BeNil())),
HaveKeyWithValue(MatchRegexp(`.*echo-v4.*`), Not(BeNil())),
// todo(jakubdyszkiewicz) uncomment when https://github.com/kumahq/kuma/issues/2563 is fixed
// HaveKeyWithValue(MatchRegexp(`.*echo-v1.*`), ApproximatelyEqual(10, 1)),
// HaveKeyWithValue(MatchRegexp(`.*echo-v2.*`), ApproximatelyEqual(10, 1)),
// HaveKeyWithValue(MatchRegexp(`.*echo-v3.*`), ApproximatelyEqual(10, 1)),
// HaveKeyWithValue(MatchRegexp(`.*echo-v4.*`), ApproximatelyEqual(10, 1)),
),
)
})
It("should keep the request in the zone when locality aware loadbalancing is enabled", func() {
// given
Expect(YamlUniversal(meshMTLSOn(defaultMesh, "true"))(global)).To(Succeed())
Eventually(func() (map[string]int, error) {
return CollectResponsesByInstance(zone1, "demo-client", "test-server.mesh")
}, "30s", "500ms").Should(
And(
HaveLen(1),
HaveKeyWithValue(MatchRegexp(`.*echo-v1.*`), Not(BeNil())),
),
)
})
})
}
|
package main
import "fmt"
type produto struct {
nome string
preco float64
desconto float64
}
// Método: função como receiver
func (p produto) precoComDesconto() float64 {
return p.preco * (1 - p.desconto)
}
func main() {
var produto1 produto
produto1 = produto{
nome: "lápis",
preco: 0.5,
desconto: 0.01,
}
fmt.Println(produto1, produto1.precoComDesconto())
produto2 := produto{"Notebook", 5000, 0.1}
fmt.Println(produto2, produto2.precoComDesconto())
}
|
package main
import (
"fmt"
"os"
"github.com/giantswarm/conair/btrfs"
)
var cmdCommit = &Command{
Name: "commit",
Description: "Commit a container",
Summary: "Commit a container",
Run: runCommit,
}
func runCommit(args []string) (exit int) {
if len(args) < 1 {
fmt.Fprintln(os.Stderr, "Container name missing.")
return 1
}
container := args[0]
containerPath := fmt.Sprintf(".#%s", container)
var imagePath string
if len(args) < 2 {
imagePath = container
} else {
imagePath = args[1]
}
fs, _ := btrfs.Init(home)
if err := fs.Snapshot(containerPath, imagePath, true); err != nil {
fmt.Fprintln(os.Stderr, "Couldn't create snapshot of container.", err)
return 1
}
return 0
}
|
package envoyconfig
import (
envoy_config_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
envoy_config_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
envoy_extensions_filters_http_ext_authz_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/ext_authz/v3"
envoy_extensions_filters_http_lua_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/lua/v3"
envoy_extensions_filters_http_router_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3"
envoy_extensions_filters_listener_proxy_protocol_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/proxy_protocol/v3"
envoy_extensions_filters_listener_tls_inspector_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/tls_inspector/v3"
envoy_extensions_filters_network_http_connection_manager "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
envoy_extensions_filters_network_tcp_proxy_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3"
envoy_type_v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
"google.golang.org/protobuf/types/known/durationpb"
"github.com/pomerium/pomerium/pkg/protoutil"
)
// ExtAuthzFilter creates an ext authz filter.
func ExtAuthzFilter(grpcClientTimeout *durationpb.Duration) *envoy_extensions_filters_network_http_connection_manager.HttpFilter {
return &envoy_extensions_filters_network_http_connection_manager.HttpFilter{
Name: "envoy.filters.http.ext_authz",
ConfigType: &envoy_extensions_filters_network_http_connection_manager.HttpFilter_TypedConfig{
TypedConfig: protoutil.NewAny(&envoy_extensions_filters_http_ext_authz_v3.ExtAuthz{
StatusOnError: &envoy_type_v3.HttpStatus{
Code: envoy_type_v3.StatusCode_InternalServerError,
},
Services: &envoy_extensions_filters_http_ext_authz_v3.ExtAuthz_GrpcService{
GrpcService: &envoy_config_core_v3.GrpcService{
Timeout: grpcClientTimeout,
TargetSpecifier: &envoy_config_core_v3.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &envoy_config_core_v3.GrpcService_EnvoyGrpc{
ClusterName: "pomerium-authorize",
},
},
},
},
MetadataContextNamespaces: []string{"com.pomerium.client-certificate-info"},
TransportApiVersion: envoy_config_core_v3.ApiVersion_V3,
}),
},
}
}
// HTTPConnectionManagerFilter creates a new HTTP connection manager filter.
func HTTPConnectionManagerFilter(
httpConnectionManager *envoy_extensions_filters_network_http_connection_manager.HttpConnectionManager,
) *envoy_config_listener_v3.Filter {
return &envoy_config_listener_v3.Filter{
Name: "envoy.filters.network.http_connection_manager",
ConfigType: &envoy_config_listener_v3.Filter_TypedConfig{
TypedConfig: protoutil.NewAny(httpConnectionManager),
},
}
}
// HTTPRouterFilter creates a new HTTP router filter.
func HTTPRouterFilter() *envoy_extensions_filters_network_http_connection_manager.HttpFilter {
return &envoy_extensions_filters_network_http_connection_manager.HttpFilter{
Name: "envoy.filters.http.router",
ConfigType: &envoy_extensions_filters_network_http_connection_manager.HttpFilter_TypedConfig{
TypedConfig: protoutil.NewAny(&envoy_extensions_filters_http_router_v3.Router{}),
},
}
}
// LuaFilter creates a lua HTTP filter.
func LuaFilter(defaultSourceCode string) *envoy_extensions_filters_network_http_connection_manager.HttpFilter {
return &envoy_extensions_filters_network_http_connection_manager.HttpFilter{
Name: "envoy.filters.http.lua",
ConfigType: &envoy_extensions_filters_network_http_connection_manager.HttpFilter_TypedConfig{
TypedConfig: protoutil.NewAny(&envoy_extensions_filters_http_lua_v3.Lua{
DefaultSourceCode: &envoy_config_core_v3.DataSource{
Specifier: &envoy_config_core_v3.DataSource_InlineString{
InlineString: defaultSourceCode,
},
},
}),
},
}
}
// ProxyProtocolFilter creates a new Proxy Protocol filter.
func ProxyProtocolFilter() *envoy_config_listener_v3.ListenerFilter {
return &envoy_config_listener_v3.ListenerFilter{
Name: "envoy.filters.listener.proxy_protocol",
ConfigType: &envoy_config_listener_v3.ListenerFilter_TypedConfig{
TypedConfig: protoutil.NewAny(&envoy_extensions_filters_listener_proxy_protocol_v3.ProxyProtocol{}),
},
}
}
// TCPProxyFilter creates a new TCP Proxy filter.
func TCPProxyFilter(clusterName string) *envoy_config_listener_v3.Filter {
return &envoy_config_listener_v3.Filter{
Name: "tcp_proxy",
ConfigType: &envoy_config_listener_v3.Filter_TypedConfig{
TypedConfig: protoutil.NewAny(&envoy_extensions_filters_network_tcp_proxy_v3.TcpProxy{
StatPrefix: "acme_tls_alpn",
ClusterSpecifier: &envoy_extensions_filters_network_tcp_proxy_v3.TcpProxy_Cluster{
Cluster: clusterName,
},
}),
},
}
}
// TLSInspectorFilter creates a new TLS inspector filter.
func TLSInspectorFilter() *envoy_config_listener_v3.ListenerFilter {
return &envoy_config_listener_v3.ListenerFilter{
Name: "tls_inspector",
ConfigType: &envoy_config_listener_v3.ListenerFilter_TypedConfig{
TypedConfig: protoutil.NewAny(&envoy_extensions_filters_listener_tls_inspector_v3.TlsInspector{}),
},
}
}
|
package renter
import (
"strings"
"testing"
"gitlab.com/NebulousLabs/Sia/modules"
"gitlab.com/NebulousLabs/Sia/types"
)
// TestPDBRGouging checks that `checkPDBRGouging` is correctly detecting price
// gouging from a host.
func TestPDBRGouging(t *testing.T) {
t.Parallel()
// allowance contains only the fields necessary to test the price gouging
hes := modules.DefaultHostExternalSettings()
allowance := modules.Allowance{
Funds: types.SiacoinPrecision.Mul64(1e3),
MaxDownloadBandwidthPrice: hes.DownloadBandwidthPrice.Mul64(10),
MaxUploadBandwidthPrice: hes.UploadBandwidthPrice.Mul64(10),
}
// verify happy case
pt := newDefaultPriceTable()
err := checkPDBRGouging(pt, allowance)
if err != nil {
t.Fatal("unexpected price gouging failure", err)
}
// verify max download bandwidth price gouging
pt = newDefaultPriceTable()
pt.DownloadBandwidthCost = allowance.MaxDownloadBandwidthPrice.Add64(1)
err = checkPDBRGouging(pt, allowance)
if err == nil || !strings.Contains(err.Error(), "download bandwidth price") {
t.Fatalf("expected download bandwidth price gouging error, instead error was '%v'", err)
}
// verify max upload bandwidth price gouging
pt = newDefaultPriceTable()
pt.UploadBandwidthCost = allowance.MaxUploadBandwidthPrice.Add64(1)
err = checkPDBRGouging(pt, allowance)
if err == nil || !strings.Contains(err.Error(), "upload bandwidth price") {
t.Fatalf("expected upload bandwidth price gouging error, instead error was '%v'", err)
}
// update the expected download to be non zero and verify the default prices
allowance.ExpectedDownload = 1 << 30 // 1GiB
pt = newDefaultPriceTable()
err = checkPDBRGouging(pt, allowance)
if err != nil {
t.Fatal("unexpected price gouging failure", err)
}
// verify gouging of MDM related costs, in order to verify if gouging
// detection kicks in we need to ensure the cost of executing enough PDBRs
// to fulfil the expected download exceeds the allowance
// we do this by maxing out the upload and bandwidth costs and setting all
// default cost components to 250 pS, note that this value is arbitrary,
// setting those costs at 250 pS simply proved to push the price per PDBR
// just over the allowed limit.
//
// Cost breakdown:
// - cost per PDBR 266.4 mS
// - total cost to fulfil expected download 4.365 KS
// - reduced cost after applying downloadGougingFractionDenom: 1.091 KS
// - exceeding the allowance of 1 KS, which is what we are after
pt.UploadBandwidthCost = allowance.MaxUploadBandwidthPrice
pt.DownloadBandwidthCost = allowance.MaxDownloadBandwidthPrice
pS := types.SiacoinPrecision.MulFloat(1e-12)
pt.InitBaseCost = pt.InitBaseCost.Add(pS.Mul64(250))
pt.ReadBaseCost = pt.ReadBaseCost.Add(pS.Mul64(250))
pt.MemoryTimeCost = pt.MemoryTimeCost.Add(pS.Mul64(250))
err = checkPDBRGouging(pt, allowance)
if err == nil || !strings.Contains(err.Error(), "combined PDBR pricing of host yields") {
t.Fatalf("expected PDBR price gouging error, instead error was '%v'", err)
}
// verify these checks are ignored if the funds are 0
allowance.Funds = types.ZeroCurrency
err = checkPDBRGouging(pt, allowance)
if err != nil {
t.Fatal("unexpected price gouging failure", err)
}
allowance.Funds = types.SiacoinPrecision.Mul64(1e3) // reset
// verify bumping every individual cost component to an insane value results
// in a price gouging error
pt = newDefaultPriceTable()
pt.InitBaseCost = types.SiacoinPrecision.Mul64(100)
err = checkPDBRGouging(pt, allowance)
if err == nil || !strings.Contains(err.Error(), "combined PDBR pricing of host yields") {
t.Fatalf("expected PDBR price gouging error, instead error was '%v'", err)
}
pt = newDefaultPriceTable()
pt.ReadBaseCost = types.SiacoinPrecision
err = checkPDBRGouging(pt, allowance)
if err == nil || !strings.Contains(err.Error(), "combined PDBR pricing of host yields") {
t.Fatalf("expected PDBR price gouging error, instead error was '%v'", err)
}
pt = newDefaultPriceTable()
pt.ReadLengthCost = types.SiacoinPrecision
err = checkPDBRGouging(pt, allowance)
if err == nil || !strings.Contains(err.Error(), "combined PDBR pricing of host yields") {
t.Fatalf("expected PDBR price gouging error, instead error was '%v'", err)
}
pt = newDefaultPriceTable()
pt.MemoryTimeCost = types.SiacoinPrecision
err = checkPDBRGouging(pt, allowance)
if err == nil || !strings.Contains(err.Error(), "combined PDBR pricing of host yields") {
t.Fatalf("expected PDBR price gouging error, instead error was '%v'", err)
}
}
|
// Package sessions contains gorilla sessions cookies.
//
// MIT License
//
// Copyright (c) 2016 Angel Del Castillo
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package sessions
import (
"errors"
"log"
"net/http"
"github.com/gorilla/sessions"
"github.com/jimmy-go/qra"
)
var (
store *sessions.CookieStore
errSessionNotFound = errors.New("sessions: not found")
errInvalidUserID = errors.New("sessions: invalid user id")
)
const (
sessionName = "qra_example1"
)
// Configure sets gorilla sessions key.
func Configure(secret string) error {
store = sessions.NewCookieStore([]byte(secret))
return nil
}
// UserToken retrieve user token for session.
func UserToken(w http.ResponseWriter, r *http.Request) (string, error) {
session, err := store.Get(r, sessionName)
if err != nil {
return "", err
}
s, ok := session.Values["session_token"].(string)
if !ok {
return "", errSessionNotFound
}
return s, nil
}
// SetToken stores the session token in user cookies.
func SetToken(w http.ResponseWriter, r *http.Request, token string) error {
session, err := store.Get(r, sessionName)
if err != nil {
return err
}
session.Values["session_token"] = token
return session.Save(r, w)
}
// UserID retrieve user token for session.
func UserID(w http.ResponseWriter, r *http.Request) (string, error) {
session, err := store.Get(r, sessionName)
if err != nil {
return "", err
}
s, ok := session.Values["user_id"].(string)
if !ok {
return "", errSessionNotFound
}
if len(s) < 1 {
return "", errInvalidUserID
}
return s, nil
}
// SetUserID stores the user id in user cookies.
func SetUserID(w http.ResponseWriter, r *http.Request, userID string) error {
if len(userID) < 1 {
return errInvalidUserID
}
session, err := store.Get(r, sessionName)
if err != nil {
return err
}
session.Values["user_id"] = userID
return session.Save(r, w)
}
// Delete deletes cookie session.
func Delete(w http.ResponseWriter, r *http.Request) error {
session, err := store.Get(r, sessionName)
if err != nil {
return err
}
session.Options.MaxAge = -1
username, _ := session.Values["user_id"].(string)
token, _ := session.Values["session_token"].(string)
log.Printf("Delete : username [%s] token [%s]", username, token)
ctx := Ctx(username, token)
err = qra.Close(ctx)
if err != nil {
return err
}
return session.Save(r, w)
}
// Login validates user credentials and generates a session.
func Login(username, password string) (string, error) {
var token string
ctx := Ctx(username, "")
// identity: authentication.
err := qra.Authenticate(ctx, password, &token)
if err != nil {
return "", err
}
log.Printf("Login : username [%v] token [%v]", ctx.Me(), token)
// validation: identity permission for session on web admin.
err = qra.Search(ctx, nil, "session-on:web")
if err != nil {
return "", err
}
return token, nil
}
// Ctx returns a type that satisfies qra.Identity interface.
func Ctx(username, token string) qra.Identity {
return User{Username: username}
}
// User struct satisfies qra.Identity interface.
type User struct {
Username string
Token string
}
// Me returns user id.
func (ctx User) Me() string {
return ctx.Username
}
// Session returns user id.
func (ctx User) Session(dst interface{}) error {
dst = ctx.Token
return nil
}
|
package main
import (
"fmt"
"github.com/codegangsta/negroni"
"net/http"
"os"
)
func main() {
port := os.Getenv("PORT")
if len(port) == 0 {
port = "8081"
}
mux := http.NewServeMux()
mux.HandleFunc("/", hello)
//启动一个httpsEREVER 内注入对应的
// http.Handler
// gin.HANLER
// behi./roughen
// echo
// 自我诶你对server的宝座
n := negroni.Classic()
n.UseHandler(mux)
hostString := fmt.Sprintf(":%s", port)
n.Run(hostString)
}
func hello(res http.ResponseWriter, req *http.Request) {
fmt.Fprintln(res, "Hello from Go!")
}
//简单的使用的 http.NewMux路由进行处理
//使用negorRun组我诶 nginx
// API-RPOXY
// router
// formater
//适配器模式
// gin echo beego iris
// handler
// ngix
// oep
// oweklr
//
|
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/operator-framework/api/pkg/operators"
)
const (
Group = "packages." + operators.GroupName
Version = "v1"
PackageManifestKind = "PackageManifest"
PackageManifestListKind = "PackageManifestList"
)
// SchemeGroupVersion is the group version used to register these objects.
var SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version}
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
// Resource takes an unqualified resource and returns a Group-qualified GroupResource.
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
// addKnownTypes adds the set of types defined in this package to the supplied scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypeWithName(
SchemeGroupVersion.WithKind(PackageManifestKind),
&PackageManifest{},
)
scheme.AddKnownTypeWithName(
SchemeGroupVersion.WithKind(PackageManifestListKind),
&PackageManifestList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
|
package dict
import (
"bytes"
"unicode/utf8"
)
func isAnsiAsUtf8(buf []byte) bool {
offset := 0
for offset < len(buf) {
r, size := utf8.DecodeRune(buf[offset:])
if r == utf8.RuneError {
return false
}
offset += size
}
return true
}
func ReadLines(filename String) []String {
// file, err := os.Open(string(filename))
// if err != nil {
// return nil, err
// }
// defer file.Close()
// buf, err := ioutil.ReadAll(file)
// if err != nil {
// return nil, err
// }
file := Open(filename.String())
// file := Open(String())
defer file.Close()
buf := file.ReadAll()
var text String
switch {
case bytes.Equal(buf[:3], BOM_UTF8):
text = Decode(buf[3:], CP_UTF8)
case bytes.Equal(buf[:2], BOM_UTF16_LE):
text = Decode(buf[2:], CP_UTF16_LE)
case bytes.Equal(buf[:2], BOM_UTF16_BE):
text = Decode(buf[2:], CP_UTF16_BE)
default:
if isAnsiAsUtf8(buf) {
text = Decode(buf, CP_UTF8)
} else {
text = Decode(buf, CP_GBK)
}
}
return text.SplitLines()
}
|
package binance
import (
"context"
"net/http"
)
// ListSavingsFlexibleProductsService https://binance-docs.github.io/apidocs/spot/en/#get-flexible-product-list-user_data
type ListSavingsFlexibleProductsService struct {
c *Client
status string
featured string
current int64
size int64
}
// Status represent the product status ("ALL", "SUBSCRIBABLE", "UNSUBSCRIBABLE") - Default: "ALL"
func (s *ListSavingsFlexibleProductsService) Status(status string) *ListSavingsFlexibleProductsService {
s.status = status
return s
}
// Featured ("ALL", "TRUE") - Default: "ALL"
func (s *ListSavingsFlexibleProductsService) Featured(featured string) *ListSavingsFlexibleProductsService {
s.featured = featured
return s
}
// Current query page. Default: 1, Min: 1
func (s *ListSavingsFlexibleProductsService) Current(current int64) *ListSavingsFlexibleProductsService {
s.current = current
return s
}
// Size Default: 50, Max: 100
func (s *ListSavingsFlexibleProductsService) Size(size int64) *ListSavingsFlexibleProductsService {
s.size = size
return s
}
// Do send request
func (s *ListSavingsFlexibleProductsService) Do(ctx context.Context, opts ...RequestOption) ([]*SavingsFlexibleProduct, error) {
r := &request{
method: http.MethodGet,
endpoint: "/sapi/v1/lending/daily/product/list",
secType: secTypeSigned,
}
m := params{}
if s.status != "" {
m["status"] = s.status
}
if s.featured != "" {
m["featured"] = s.featured
}
if s.current != 0 {
m["current"] = s.current
}
if s.size != 0 {
m["size"] = s.size
}
r.setParams(m)
data, err := s.c.callAPI(ctx, r, opts...)
if err != nil {
return nil, err
}
var res []*SavingsFlexibleProduct
err = json.Unmarshal(data, &res)
if err != nil {
return nil, err
}
return res, nil
}
// SavingsFlexibleProduct define a flexible product (Savings)
type SavingsFlexibleProduct struct {
Asset string `json:"asset"`
AvgAnnualInterestRate string `json:"avgAnnualInterestRate"`
CanPurchase bool `json:"canPurchase"`
CanRedeem bool `json:"canRedeem"`
DailyInterestPerThousand string `json:"dailyInterestPerThousand"`
Featured bool `json:"featured"`
MinPurchaseAmount string `json:"minPurchaseAmount"`
ProductId string `json:"productId"`
PurchasedAmount string `json:"purchasedAmount"`
Status string `json:"status"`
UpLimit string `json:"upLimit"`
UpLimitPerUser string `json:"upLimitPerUser"`
}
// PurchaseSavingsFlexibleProductService https://binance-docs.github.io/apidocs/spot/en/#purchase-flexible-product-user_data
type PurchaseSavingsFlexibleProductService struct {
c *Client
productId string
amount float64
}
// ProductId represent the id of the flexible product to purchase
func (s *PurchaseSavingsFlexibleProductService) ProductId(productId string) *PurchaseSavingsFlexibleProductService {
s.productId = productId
return s
}
// Amount is the quantity of the product to purchase
func (s *PurchaseSavingsFlexibleProductService) Amount(amount float64) *PurchaseSavingsFlexibleProductService {
s.amount = amount
return s
}
// Do send request
func (s *PurchaseSavingsFlexibleProductService) Do(ctx context.Context, opts ...RequestOption) (uint64, error) {
r := &request{
method: http.MethodPost,
endpoint: "/sapi/v1/lending/daily/purchase",
secType: secTypeSigned,
}
m := params{
"productId": s.productId,
"amount": s.amount,
}
r.setParams(m)
data, err := s.c.callAPI(ctx, r, opts...)
if err != nil {
return 0, err
}
var res *PurchaseSavingsFlexibleProductResponse
if err = json.Unmarshal(data, &res); err != nil {
return 0, err
}
return res.PurchaseId, nil
}
type PurchaseSavingsFlexibleProductResponse struct {
PurchaseId uint64 `json:"purchaseId"`
}
// RedeemSavingsFlexibleProductService https://binance-docs.github.io/apidocs/spot/en/#redeem-flexible-product-user_data
type RedeemSavingsFlexibleProductService struct {
c *Client
productId string
amount float64
redeemType string
}
// ProductId represent the id of the flexible product to redeem
func (s *RedeemSavingsFlexibleProductService) ProductId(productId string) *RedeemSavingsFlexibleProductService {
s.productId = productId
return s
}
// Amount is the quantity of the product to redeem
func (s *RedeemSavingsFlexibleProductService) Amount(amount float64) *RedeemSavingsFlexibleProductService {
s.amount = amount
return s
}
// Type ("FAST", "NORMAL")
func (s *RedeemSavingsFlexibleProductService) Type(redeemType string) *RedeemSavingsFlexibleProductService {
s.redeemType = redeemType
return s
}
// Do send request
func (s *RedeemSavingsFlexibleProductService) Do(ctx context.Context, opts ...RequestOption) error {
r := &request{
method: http.MethodPost,
endpoint: "/sapi/v1/lending/daily/redeem",
secType: secTypeSigned,
}
m := params{
"productId": s.productId,
"amount": s.amount,
}
if s.redeemType != "" {
m["type"] = s.redeemType
}
r.setParams(m)
_, err := s.c.callAPI(ctx, r, opts...)
return err
}
// ListSavingsFixedAndActivityProductsService https://binance-docs.github.io/apidocs/spot/en/#get-fixed-and-activity-project-list-user_data
type ListSavingsFixedAndActivityProductsService struct {
c *Client
asset string
projectType string
status string
isSortAsc bool
sortBy string
current int64
size int64
}
// Asset desired asset
func (s *ListSavingsFixedAndActivityProductsService) Asset(asset string) *ListSavingsFixedAndActivityProductsService {
s.asset = asset
return s
}
// Type set project type ("ACTIVITY", "CUSTOMIZED_FIXED")
func (s *ListSavingsFixedAndActivityProductsService) Type(projectType string) *ListSavingsFixedAndActivityProductsService {
s.projectType = projectType
return s
}
// IsSortAsc default "true"
func (s *ListSavingsFixedAndActivityProductsService) IsSortAsc(isSortAsc bool) *ListSavingsFixedAndActivityProductsService {
s.isSortAsc = isSortAsc
return s
}
// Status ("ALL", "SUBSCRIBABLE", "UNSUBSCRIBABLE") - default "ALL"
func (s *ListSavingsFixedAndActivityProductsService) Status(status string) *ListSavingsFixedAndActivityProductsService {
s.status = status
return s
}
// SortBy ("START_TIME", "LOT_SIZE", "INTEREST_RATE", "DURATION") - default "START_TIME"
func (s *ListSavingsFixedAndActivityProductsService) SortBy(sortBy string) *ListSavingsFixedAndActivityProductsService {
s.sortBy = sortBy
return s
}
// Current Currently querying page. Start from 1. Default:1
func (s *ListSavingsFixedAndActivityProductsService) Current(current int64) *ListSavingsFixedAndActivityProductsService {
s.current = current
return s
}
// Size Default:10, Max:100
func (s *ListSavingsFixedAndActivityProductsService) Size(size int64) *ListSavingsFixedAndActivityProductsService {
s.size = size
return s
}
// Do send request
func (s *ListSavingsFixedAndActivityProductsService) Do(ctx context.Context, opts ...RequestOption) ([]*SavingsFixedProduct, error) {
r := &request{
method: http.MethodGet,
endpoint: "/sapi/v1/lending/project/list",
secType: secTypeSigned,
}
m := params{
"type": s.projectType,
}
if s.asset != "" {
m["asset"] = s.asset
}
if s.status != "" {
m["status"] = s.status
}
if s.isSortAsc != true {
m["isSortAsc"] = s.isSortAsc
}
if s.sortBy != "" {
m["sortBy"] = s.sortBy
}
if s.current != 1 {
m["current"] = s.current
}
if s.size != 10 {
m["size"] = s.size
}
r.setParams(m)
data, err := s.c.callAPI(ctx, r, opts...)
if err != nil {
return nil, err
}
var res []*SavingsFixedProduct
if err = json.Unmarshal(data, &res); err != nil {
return nil, err
}
return res, nil
}
// SavingsFixedProduct define a fixed product (Savings)
type SavingsFixedProduct struct {
Asset string `json:"asset"`
DisplayPriority int `json:"displayPriority"`
Duration int `json:"duration"`
InterestPerLot string `json:"interestPerLot"`
InterestRate string `json:"interestRate"`
LotSize string `json:"lotSize"`
LotsLowLimit int `json:"lotsLowLimit"`
LotsPurchased int `json:"lotsPurchased"`
LotsUpLimit int `json:"lotsUpLimit"`
MaxLotsPerUser int `json:"maxLotsPerUser"`
NeedKyc bool `json:"needKyc"`
ProjectId string `json:"projectId"`
ProjectName string `json:"projectName"`
Status string `json:"status"`
Type string `json:"type"`
WithAreaLimitation bool `json:"withAreaLimitation"`
}
// SavingFlexibleProductPositionsService fetches the saving flexible product positions
type SavingFlexibleProductPositionsService struct {
c *Client
asset string
}
// Asset sets the asset parameter.
func (s *SavingFlexibleProductPositionsService) Asset(asset string) *SavingFlexibleProductPositionsService {
s.asset = asset
return s
}
// Do send request
func (s *SavingFlexibleProductPositionsService) Do(ctx context.Context, opts ...RequestOption) ([]*SavingFlexibleProductPosition, error) {
r := &request{
method: http.MethodGet,
endpoint: "/sapi/v1/lending/daily/token/position",
secType: secTypeSigned,
}
m := params{}
if s.asset != "" {
m["asset"] = s.asset
}
r.setParams(m)
data, err := s.c.callAPI(ctx, r, opts...)
if err != nil {
return nil, err
}
var res []*SavingFlexibleProductPosition
if err = json.Unmarshal(data, &res); err != nil {
return nil, err
}
return res, nil
}
// SavingFlexibleProductPosition represents a saving flexible product position.
type SavingFlexibleProductPosition struct {
Asset string `json:"asset"`
ProductId string `json:"productId"`
ProductName string `json:"productName"`
AvgAnnualInterestRate string `json:"avgAnnualInterestRate"`
AnnualInterestRate string `json:"annualInterestRate"`
DailyInterestRate string `json:"dailyInterestRate"`
TotalInterest string `json:"totalInterest"`
TotalAmount string `json:"totalAmount"`
TotalPurchasedAmount string `json:"todayPurchasedAmount"`
RedeemingAmount string `json:"redeemingAmount"`
FreeAmount string `json:"freeAmount"`
FreezeAmount string `json:"freezeAmount,omitempty"`
LockedAmount string `json:"lockedAmount,omitempty"`
CanRedeem bool `json:"canRedeem"`
}
// SavingFixedProjectPositionsService fetches the saving flexible product positions
type SavingFixedProjectPositionsService struct {
c *Client
asset string
status string
projectId string
}
// Asset sets the asset parameter.
func (s *SavingFixedProjectPositionsService) Asset(asset string) *SavingFixedProjectPositionsService {
s.asset = asset
return s
}
// Status ("HOLDING", "REDEEMED"), default will fetch all
func (s *SavingFixedProjectPositionsService) Status(status string) *SavingFixedProjectPositionsService {
s.status = status
return s
}
// Project ID of the fixed project/activity
func (s *SavingFixedProjectPositionsService) ProjectID(projectId string) *SavingFixedProjectPositionsService {
s.projectId = projectId
return s
}
// Do send request
func (s *SavingFixedProjectPositionsService) Do(ctx context.Context, opts ...RequestOption) ([]*SavingFixedProjectPosition, error) {
r := &request{
method: http.MethodGet,
endpoint: "/sapi/v1/lending/project/position/list",
secType: secTypeSigned,
}
m := params{}
if s.asset != "" {
m["asset"] = s.asset
}
if s.status != "" {
m["status"] = s.status
}
if s.projectId != "" {
m["projectId"] = s.projectId
}
r.setParams(m)
data, err := s.c.callAPI(ctx, r, opts...)
if err != nil {
return nil, err
}
var res []*SavingFixedProjectPosition
if err = json.Unmarshal(data, &res); err != nil {
return nil, err
}
return res, nil
}
// SavingFixedProjectPosition represents a saving flexible product position.
type SavingFixedProjectPosition struct {
Asset string `json:"asset"`
CanTransfer bool `json:"canTransfer"`
CreateTimestamp int64 `json:"createTimestamp"`
Duration int64 `json:"duration"`
StartTime int64 `json:"startTime"`
EndTime int64 `json:"endTime"`
PurchaseTime int64 `json:"purchaseTime"`
RedeemDate string `json:"redeemDate"`
Interest string `json:"interest"`
InterestRate string `json:"interestRate"`
Lot int32 `json:"lot"`
PositionId int64 `json:"positionId"`
Principal string `json:"principal"`
ProjectId string `json:"projectId"`
ProjectName string `json:"projectName"`
Status string `json:"status"`
ProjectType string `json:"type"`
}
|
package sample_data
import "github.com/psinthorn/gostore/pb"
// return new storage object
func NewStorage() *pb.Storage {
storage := &pb.Storage{
Driver: randomStorage(),
Memory: &pb.Memory{
Value: uint64(randomInt(2, 6)),
Unit: pb.Memory_GIGABYTE,
},
}
return storage
}
|
package client
import (
"github.com/liut/staffio/pkg/common"
)
// Staff is a retrieved employee struct.
type Staff struct {
UID string `json:"uid" form:"uid"` // 登录名
CommonName string `json:"cn,omitempty" form:"cn"` // 全名
GivenName string `json:"gn,omitempty" form:"gn"` // 名
Surname string `json:"sn,omitempty" form:"sn"` // 姓
Nickname string `json:"nickname,omitempty" form:"nickname"` // 昵称
Birthday string `json:"birthday,omitempty" form:"birthday"` // 生日
Gender common.Gender `json:"gender,omitempty"` // 1=male, 2=female, 0=unknown
Mobile string `json:"mobile,omitempty"` // cell phone number
Email string `json:"email,omitempty"`
EmployeeNumber int `json:"eid,omitempty" form:"eid"`
EmployeeType string `json:"etype,omitempty" form:"etitle"`
AvatarPath string `json:"avatarPath,omitempty" form:"avatar"`
Provider string `json:"provider,omitempty"`
}
type RoleMe map[string]interface{}
func (r RoleMe) Has(name string) bool {
if v, exist := r[name]; exist {
if g, ok := v.(bool); ok {
return g
}
}
return false
}
|
package main
import (
"log"
"net/http"
"os"
"time"
"meli/pkg/handler"
"github.com/patrickmn/go-cache"
)
func main() {
handler.GoDotEnv()
infoLog := log.New(os.Stdout, "INFO\t", log.Ldate|log.Ltime)
c := cache.New(5*time.Minute, 10*time.Minute)
infoLog.Printf("Starting server on %s", os.Getenv("PORT"))
http.ListenAndServe(":"+os.Getenv("PORT"), handler.Routes(c))
}
|
package reverse
import (
"testing"
)
func TestString(t *testing.T) {
message, err := String([]byte("Hello World!"))
if string(message) != "!dlroW olleH" || err != nil {
t.Fatalf(`String([]byte("Hello World!")) = %q, %v, want "!dlroW olleH", error`, message, err)
}
}
func BenchmarkString(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := String([]byte("Hello World!"))
if err != nil {
b.Fatal(err)
}
}
}
func TestEmptyString(t *testing.T) {
message, err := String([]byte(""))
if string(message) != "" || err == nil {
t.Fatalf(`String([]byte("")) = %q, %v, want "", error`, message, err)
}
}
func TestLinkedList(t *testing.T) {
list, _ := Init([]byte("Hello World!"))
message, err := LinkedList(list)
if string(Print(message)) != "!dlroW olleH" || err != nil {
t.Fatalf(`LinkedList(Init([]byte("Hello World!"))) = %q, %v, want "!dlroW olleH", error`, Print(message), err)
}
}
func BenchmarkLinkedList(b *testing.B) {
list, _ := Init([]byte("Hello World!"))
for i := 0; i < b.N; i++ {
_, err := LinkedList(list)
if err != nil {
b.Fatal(err)
}
}
}
func TestEmptyLinkedList(t *testing.T) {
list, err := Init([]byte(""))
if list != nil || err == nil {
t.Fatalf(`Init([]byte("")) = %v, %v, want "<nil>", error`, list, err)
}
}
|
package main
import (
"time"
toxiproxy "github.com/Shopify/toxiproxy/client"
"github.com/sirupsen/logrus"
)
var log = logrus.New()
func init() {
log.SetLevel(logrus.DebugLevel)
}
type Toxic struct {
client *toxiproxy.Client
proxy *toxiproxy.Proxy
}
func (toxic *Toxic) Clean() {
log.Debugln("Cleaning Toxics ......")
toxic.proxy.RemoveToxic("bandwidth_up")
toxic.proxy.RemoveToxic("bandwidth_down")
toxic.proxy.RemoveToxic("timeout_up")
log.Debugln("Done cleaning ......")
}
func (toxic *Toxic) LowBandwidth() {
toxic.Clean()
log.Debugln("Applying Bandwidth Toxic. Upstream = 25kbps. Downstream = 25kbps")
toxic.proxy.AddToxic("bandwidth_up", "bandwidth", "upstream", 1.0, toxiproxy.Attributes{
"rate": 25,
})
toxic.proxy.AddToxic("bandwidth_down", "bandwidth", "downstream", 1.0, toxiproxy.Attributes{
"rate": 25,
})
}
func (toxic *Toxic) ZeroUpBandwidth() {
toxic.Clean()
log.Debugln("Applying Zero upstream bandwidth Toxic")
toxic.proxy.AddToxic("bandwidth_up", "bandwidth", "upstream", 1.0, toxiproxy.Attributes{
"rate": 0,
})
toxic.proxy.AddToxic("bandwidth_down", "bandwidth", "downstream", 1.0, toxiproxy.Attributes{
"rate": 250,
})
}
func (toxic *Toxic) HalfOpenConnection() {
toxic.Clean()
log.Debugln("Applying Halfopen Connection Toxic")
toxic.proxy.AddToxic("bandwidth_down", "bandwidth", "downstream", 1.0, toxiproxy.Attributes{
"rate": 0,
})
}
func (toxic *Toxic) Disconnect() {
toxic.Clean()
log.Debugln("Applying Disconnect Timeout Toxic")
toxic.proxy.AddToxic("timeout_up", "timeout", "upstream", 1.0, toxiproxy.Attributes{
"timeout": 15,
})
}
func main() {
var err error
// connect to toxiproxy server
var client = toxiproxy.NewClient("localhost:8474")
proxy, err := client.CreateProxy("toxicbroker", "127.0.0.1:9883", "127.0.0.1:1883")
if err != nil {
log.Fatalln("Couldn't create proxy client", err)
}
toxic := Toxic{client, proxy}
var clean1 = time.After(1 * time.Second)
var clean2 <-chan time.Time
var lowbandwidth <-chan time.Time
var zeroupbandwidth <-chan time.Time
var timeout <-chan time.Time
var halfopen <-chan time.Time
var toxicTime = 2 * time.Minute
for {
select {
case <-clean1:
lowbandwidth = time.After(toxicTime)
toxic.Clean()
case <-lowbandwidth:
zeroupbandwidth = time.After(toxicTime)
toxic.LowBandwidth()
case <-zeroupbandwidth:
clean2 = time.After(toxicTime)
toxic.ZeroUpBandwidth()
case <-clean2:
halfopen = time.After(toxicTime)
toxic.Clean()
case <-halfopen:
timeout = time.After(toxicTime)
toxic.HalfOpenConnection()
case <-timeout:
clean1 = time.After(toxicTime)
toxic.Disconnect()
}
}
}
|
package main
import (
"context"
"fmt"
"github.com/google/go-github/github"
"golang.org/x/oauth2"
"gopkg.in/alecthomas/kingpin.v2"
"sync"
"time"
)
var (
owner = kingpin.Arg("owner", "GitHub owner.").Required().String()
repo = kingpin.Arg("repo", "GitHub repository").Required().String()
base = kingpin.Arg("base", "Base tag/commit").Required().String()
head = kingpin.Arg("head", "Head tag/commit").Default("main").String()
auth_token = kingpin.Flag("token", "OAuth Token").Envar("GITHUB_TOKEN").String()
since *time.Time = nil
client *github.Client = nil
)
func issueWorker(pages <-chan int, results chan<- github.Issue) {
for page := range pages {
issues, _, err := client.Issues.ListByRepo(
context.Background(),
*owner,
*repo,
&github.IssueListByRepoOptions{
State: "closed",
Since: *since,
ListOptions: github.ListOptions{Page: page, PerPage: 100},
})
if err != nil {
panic(err)
}
fmt.Print(".")
for _, issue := range issues {
results <- *issue
}
}
}
func commitWorker(pages <-chan int, results chan<- github.RepositoryCommit) {
for page := range pages {
compareUrl := fmt.Sprintf("repos/%v/%v/compare/%v...%v?per_page=100&page=%v", *owner, *repo, *base, *head, page)
req, err := client.NewRequest("GET", compareUrl, nil)
if err != nil {
panic(err)
}
comp := new(github.CommitsComparison)
_, err = client.Do(context.Background(), req, comp)
if err != nil {
panic(err)
}
fmt.Print(".")
for _, commit := range comp.Commits {
results <- commit
}
}
}
func main() {
kingpin.Parse()
fmt.Println("Getting repository data...")
if *auth_token != "" {
tc := oauth2.NewClient(
context.Background(),
oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: *auth_token},
))
client = github.NewClient(tc)
} else {
client = github.NewClient(nil)
}
baseCommit, _, err := client.Repositories.GetCommit(context.Background(), *owner, *repo, *base)
if err != nil {
panic(err)
}
since = baseCommit.Commit.Author.Date
until := time.Now()
if *head != "main" {
headCommit, _, err := client.Repositories.GetCommit(context.Background(), *owner, *repo, *head)
if err != nil {
panic(err)
}
until = *headCommit.Commit.Author.Date
}
_, issueInfo, err := client.Issues.ListByRepo(
context.Background(),
*owner,
*repo,
&github.IssueListByRepoOptions{
State: "closed",
Since: *since,
ListOptions: github.ListOptions{Page: 1, PerPage: 100},
})
if err != nil {
panic(err)
}
if issueInfo.LastPage == 0 {
// if we have only one page, LastPage is not set.
issueInfo.LastPage = 1
}
compareUrl := fmt.Sprintf("repos/%v/%v/compare/%v...%v?per_page=100", *owner, *repo, *base, *head)
req, err := client.NewRequest("GET", compareUrl, nil)
if err != nil {
panic(err)
}
commitInfo, err := client.Do(context.Background(), req, nil)
if err != nil {
panic(err)
}
if commitInfo.LastPage == 0 {
// if we have only one page, LastPage is not set.
commitInfo.LastPage = 1
}
fmt.Printf("Fetching %d commit pages and %d issue pages...", commitInfo.LastPage, issueInfo.LastPage)
// Enumerate commits
commitPages := make(chan int, 100)
commits := make(chan github.RepositoryCommit, 100000)
commitWg := &sync.WaitGroup{}
commitWg.Add(5)
for w := 1; w <= 5; w++ {
go func() {
defer commitWg.Done()
commitWorker(commitPages, commits)
}()
}
for p := 1; p <= commitInfo.LastPage; p++ {
commitPages <- p
}
close(commitPages)
go func() {
commitWg.Wait()
close(commits)
}()
// Enumerate issues
issuePages := make(chan int, 100)
issues := make(chan github.Issue, 100000)
issueWg := &sync.WaitGroup{}
issueWg.Add(5)
for w := 1; w <= 5; w++ {
go func() {
defer issueWg.Done()
issueWorker(issuePages, issues)
}()
}
for p := 1; p <= issueInfo.LastPage; p++ {
issuePages <- p
}
close(issuePages)
go func() {
issueWg.Wait()
close(issues)
}()
closedIssues := 0
closedPRs := 0
for issue := range issues {
if issue.ClosedAt.After(*since) && issue.ClosedAt.Before(until) {
if issue.PullRequestLinks != nil {
closedPRs++
} else {
closedIssues++
}
}
}
contributors := make(map[string]bool)
commitCount := 0
for commit := range commits {
contributors[*commit.Commit.Author.Name] = true
commitCount += 1
}
days := int(until.Sub(*since).Hours() / 24)
fmt.Println("")
fmt.Printf("Since the last release, the project has had %d commits by %d contributors, "+
"resulting in %d closed issues and %d closed pull requests, all of this in just over %d days.",
commitCount, len(contributors), closedIssues, closedPRs, days)
fmt.Println("")
}
|
package routers
import (
"ibgamemanage/controllers"
"github.com/astaxie/beego"
)
func init() {
//查看博客详细信息
beego.Router("/view/:PlayerId([0-9]+)", &controllers.ViewController{})
beego.Router("/", &controllers.IndexController{})
beego.Router("/login", &controllers.LoginController{})
//新建博客博文
beego.Router("/new", &controllers.NewController{})
//删除博文
beego.Router("/delete/:id([0-9]+)", &controllers.DeleteController{})
//编辑博文
beego.Router("/edit/:id([0-9]+)", &controllers.EditController{})
}
|
package main
var isVisit map[string]int // 保留已经得到的结果,该结构相当于一个备忘录
// 记忆化搜索函数调用者
func minDistance(word1 string, word2 string) int {
/* 1. 进行一些预处理 */
isVisit = make(map[string]int)
/* 2. 开始调用记忆化搜索函数,返回记忆化搜索结果 */
return minDistanceExec(word1, word2)
}
// 记忆化搜索函数
func minDistanceExec(word1 string, word2 string) int {
/* 3. 判断是否需要返回结果以及进行一些剪枝 (特殊情况处理) */
if len(word1) == 0 {
return len(word2)
}
if len(word2) == 0 {
return len(word1)
}
// 如果该问题已经求解过了,那么直接返回结果
hashVal := hash(word1, word2)
if x, ok := isVisit[hashVal]; ok {
return x
}
/* 4. 如果没求解,则继续调用记忆化搜索函数,得出结果 (一般情况处理) */
ans := 0
if word1[len(word1)-1] == word2[len(word2)-1] {
ans = minDistanceExec(word1[:len(word1)-1], word2[:len(word2)-1])
} else {
a := minDistanceExec(word1[:len(word1)-1], word2)
b := minDistanceExec(word1[:len(word1)-1], word2[:len(word2)-1])
c := minDistanceExec(word1, word2[:len(word2)-1])
ans = min(a, b, c) + 1
}
// 记录该问题的结果,加入备忘录
isVisit[hashVal] = ans
return ans
}
// 由于备忘录的键值是 1 个字符串,而记忆化搜索函数需要 2 个字符串参数才能唯一标识一个子问题,
// 所以,这里采用哈希的方式,把两个参数进行哈希,生成一个键值来唯一的标识这个参数组合,
// 即: 用「1个字符串」 唯一标识 「1个子问题」。
func hash(a, b string) string {
return a + "|" + b
}
// 这里我重写了min函数,让它可以计算n个参数的最小值
func min(arr ...int) int {
if len(arr) == 1 {
return arr[0]
}
a, b := arr[0], min(arr[1:]...)
if a > b {
return b
}
return a
}
/*
题目链接:
https://leetcode-cn.com/problems/delete-operation-for-two-strings/ 两个字符串的删除操作
*/
/*
总结
1. 这是采用记忆化搜索实现的编辑距离...但是时空效率让我想哭。
2. 小伙伴可以比较一下和该目录下"LeetCode_583_两个字符串的删除操作.go"的差别。
*/
|
package main
import "testing"
func TestP1(t *testing.T) {
cases := []struct {
in int
out int
}{
{10, 23},
{1000, 233168},
}
for _, c := range cases {
v := sum(c.in)
if v != c.out {
t.Errorf("P1: %v\tExpected: %v", v, c.out)
}
}
}
|
package test
import (
"encoding/json"
"io/ioutil"
"net/http"
"testing"
"github.com/stretchr/testify/assert"
)
func GetResponseBody(t *testing.T, response *http.Response) string {
b, err := ioutil.ReadAll(response.Body)
if err != nil {
t.Fatal(err)
}
return string(b)
}
func AssertJSONMatches(t *testing.T, response *http.Response, expected interface{}) {
body := GetResponseBody(t, response)
// Convert our expected response to JSON
b, err := json.Marshal(expected)
if err != nil {
t.Fatal(err)
}
expectedBody := string(b)
assert.Equal(t, expectedBody, body)
}
|
package log_client
import (
"github.com/kataras/iris/context"
"gocherry-api-gateway/admin/services"
"time"
)
var logFileName string
func init() {
x := time.Date(2017, 02, 27, 17, 30, 20, 20, time.Local)
logDir := services.GetAppConfig().Common.LogDir
logFileName = logDir + "proxy_log_" + x.Format("2006-01-02") + ".log"
}
func LogInfo(ctx context.Context, log interface{}) {
ctx.Application().Logger().Info(log)
}
func LogErr(ctx context.Context, log interface{}) {
ctx.Application().Logger().Error(log)
}
func logWarn(ctx context.Context, log interface{}) {
ctx.Application().Logger().Warn(log)
}
|
/**
* @Author : henry
* @Data: 2020-08-13 13:22
* @Note:
**/
package models
import (
"encoding/json"
"fmt"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/mssql"
"github.com/vouchersAPI/app"
)
var MsDB *gorm.DB
var err error
type VoucherDB struct {
Type string `json:"type"`
Ip string `json:"ip"`
User string `json:"user"`
Pwd string `json:"pwd"`
Port int `json:"port"`
Database string `json:"database"`
Encrypt string `json:"encrypt"`
}
func InitMssql(config string) {
var voucherDB VoucherDB
if err = json.Unmarshal([]byte(config), &voucherDB); err != nil {
app.Logger.Fatalln("unmarshal mssqlDb failed : ", err)
}
connString := fmt.Sprintf("server=%s;port=%d;database=%s;user id=%s;password=%s;encrypt=%s",
voucherDB.Ip, voucherDB.Port, voucherDB.Database, voucherDB.User, voucherDB.Pwd, voucherDB.Encrypt)
MsDB, err = gorm.Open("mssql", connString)
if err != nil {
app.Logger.Fatalln("open mssql failed: ", err)
}
err = MsDB.DB().Ping()
if err != nil {
app.Logger.Fatalln("connect to mssql failed: ", err)
}
}
|
package types
type Type int
const (
TYPE_INT Type = 110
TYPE_STRING Type = 190
TYPE_MAP Type = 210
)
|
package git
import (
"runtime"
"sort"
"testing"
"time"
)
func TestRefModification(t *testing.T) {
t.Parallel()
repo := createTestRepo(t)
defer cleanupTestRepo(t, repo)
commitId, treeId := seedTestRepo(t, repo)
_, err := repo.References.Create("refs/tags/tree", treeId, true, "testTreeTag")
checkFatal(t, err)
tag, err := repo.References.Lookup("refs/tags/tree")
checkFatal(t, err)
checkRefType(t, tag, ReferenceOid)
ref, err := repo.References.Lookup("HEAD")
checkFatal(t, err)
checkRefType(t, ref, ReferenceSymbolic)
if target := ref.Target(); target != nil {
t.Fatalf("Expected nil *Oid, got %v", target)
}
ref, err = ref.Resolve()
checkFatal(t, err)
checkRefType(t, ref, ReferenceOid)
if target := ref.Target(); target == nil {
t.Fatalf("Expected valid target got nil")
}
if target := ref.SymbolicTarget(); target != "" {
t.Fatalf("Expected empty string, got %v", target)
}
if commitId.String() != ref.Target().String() {
t.Fatalf("Wrong ref target")
}
_, err = tag.Rename("refs/tags/renamed", false, "")
checkFatal(t, err)
tag, err = repo.References.Lookup("refs/tags/renamed")
checkFatal(t, err)
checkRefType(t, ref, ReferenceOid)
}
func TestReferenceIterator(t *testing.T) {
t.Parallel()
repo := createTestRepo(t)
defer cleanupTestRepo(t, repo)
loc, err := time.LoadLocation("Europe/Berlin")
checkFatal(t, err)
sig := &Signature{
Name: "Rand Om Hacker",
Email: "random@hacker.com",
When: time.Date(2013, 03, 06, 14, 30, 0, 0, loc),
}
idx, err := repo.Index()
checkFatal(t, err)
err = idx.AddByPath("README")
checkFatal(t, err)
treeId, err := idx.WriteTree()
checkFatal(t, err)
message := "This is a commit\n"
tree, err := repo.LookupTree(treeId)
checkFatal(t, err)
commitId, err := repo.CreateCommit("HEAD", sig, sig, message, tree)
checkFatal(t, err)
_, err = repo.References.Create("refs/heads/one", commitId, true, "headOne")
checkFatal(t, err)
_, err = repo.References.Create("refs/heads/two", commitId, true, "headTwo")
checkFatal(t, err)
_, err = repo.References.Create("refs/heads/three", commitId, true, "headThree")
checkFatal(t, err)
iter, err := repo.NewReferenceIterator()
checkFatal(t, err)
var list []string
expected := []string{
"refs/heads/master",
"refs/heads/one",
"refs/heads/three",
"refs/heads/two",
}
// test some manual iteration
nameIter := iter.Names()
name, err := nameIter.Next()
for err == nil {
list = append(list, name)
name, err = nameIter.Next()
}
if !IsErrorCode(err, ErrorCodeIterOver) {
t.Fatal("Iteration not over")
}
sort.Strings(list)
compareStringList(t, expected, list)
// test the iterator for full refs, rather than just names
iter, err = repo.NewReferenceIterator()
checkFatal(t, err)
count := 0
_, err = iter.Next()
for err == nil {
count++
_, err = iter.Next()
}
if !IsErrorCode(err, ErrorCodeIterOver) {
t.Fatal("Iteration not over")
}
if count != 4 {
t.Fatalf("Wrong number of references returned %v", count)
}
}
func TestReferenceOwner(t *testing.T) {
t.Parallel()
repo := createTestRepo(t)
defer cleanupTestRepo(t, repo)
commitId, _ := seedTestRepo(t, repo)
ref, err := repo.References.Create("refs/heads/foo", commitId, true, "")
checkFatal(t, err)
owner := ref.Owner()
if owner == nil {
t.Fatal("nil owner")
}
if owner.ptr != repo.ptr {
t.Fatalf("bad ptr, expected %v have %v\n", repo.ptr, owner.ptr)
}
}
func TestUtil(t *testing.T) {
t.Parallel()
repo := createTestRepo(t)
defer cleanupTestRepo(t, repo)
commitId, _ := seedTestRepo(t, repo)
ref, err := repo.References.Create("refs/heads/foo", commitId, true, "")
checkFatal(t, err)
ref2, err := repo.References.Dwim("foo")
checkFatal(t, err)
if ref.Cmp(ref2) != 0 {
t.Fatalf("foo didn't dwim to the right thing")
}
if ref.Shorthand() != "foo" {
t.Fatalf("refs/heads/foo has no foo shorthand")
}
hasLog, err := repo.References.HasLog("refs/heads/foo")
checkFatal(t, err)
if !hasLog {
t.Fatalf("branches have logs by default")
}
}
func TestIsNote(t *testing.T) {
t.Parallel()
repo := createTestRepo(t)
defer cleanupTestRepo(t, repo)
commitID, _ := seedTestRepo(t, repo)
sig := &Signature{
Name: "Rand Om Hacker",
Email: "random@hacker.com",
When: time.Now(),
}
refname, err := repo.Notes.DefaultRef()
checkFatal(t, err)
_, err = repo.Notes.Create(refname, sig, sig, commitID, "This is a note", false)
checkFatal(t, err)
ref, err := repo.References.Lookup(refname)
checkFatal(t, err)
if !ref.IsNote() {
t.Fatalf("%s should be a note", ref.Name())
}
ref, err = repo.References.Create("refs/heads/foo", commitID, true, "")
checkFatal(t, err)
if ref.IsNote() {
t.Fatalf("%s should not be a note", ref.Name())
}
}
func TestReferenceNameIsValid(t *testing.T) {
t.Parallel()
valid, err := ReferenceNameIsValid("HEAD")
checkFatal(t, err)
if !valid {
t.Errorf("HEAD should be a valid reference name")
}
valid, err = ReferenceNameIsValid("HEAD1")
checkFatal(t, err)
if valid {
t.Errorf("HEAD1 should not be a valid reference name")
}
}
func TestReferenceNormalizeName(t *testing.T) {
t.Parallel()
ref, err := ReferenceNormalizeName("refs/heads//master", ReferenceFormatNormal)
checkFatal(t, err)
if ref != "refs/heads/master" {
t.Errorf("ReferenceNormalizeName(%q) = %q; want %q", "refs/heads//master", ref, "refs/heads/master")
}
ref, err = ReferenceNormalizeName("master", ReferenceFormatAllowOnelevel|ReferenceFormatRefspecShorthand)
checkFatal(t, err)
if ref != "master" {
t.Errorf("ReferenceNormalizeName(%q) = %q; want %q", "master", ref, "master")
}
ref, err = ReferenceNormalizeName("foo^", ReferenceFormatNormal)
if !IsErrorCode(err, ErrorCodeInvalidSpec) {
t.Errorf("foo^ should be invalid")
}
}
func compareStringList(t *testing.T, expected, actual []string) {
for i, v := range expected {
if actual[i] != v {
t.Fatalf("Bad list")
}
}
}
func checkRefType(t *testing.T, ref *Reference, kind ReferenceType) {
if ref.Type() == kind {
return
}
// The failure happens at wherever we were called, not here
_, file, line, ok := runtime.Caller(1)
if !ok {
t.Fatalf("Unable to get caller")
}
t.Fatalf("Wrong ref type at %v:%v; have %v, expected %v", file, line, ref.Type(), kind)
}
|
func maximumProduct(nums []int) int {
m1,m2,m3:=math.MinInt32,math.MinInt32,math.MinInt32
a1,a2:=math.MaxInt32,math.MaxInt32
for _,v:=range nums{
if v>m1{
m1,m2,m3=v,m1,m2
}else if v>m2{
m2,m3=v,m2
}else if v>m3{
m3=v
}
if v<a1{
a1,a2=v,a1
}else if v<a2{
a2=v
}
}
if m1*m2*m3>m1*a1*a2{
return m1*m2*m3
}else{
return m1*a1*a2
}
}
|
package zenrpc_mw
import (
"context"
"encoding/json"
"time"
"github.com/go-kit/kit/log"
"github.com/semrush/zenrpc"
)
func Logger(logger log.Logger) zenrpc.MiddlewareFunc {
return func(invoke zenrpc.InvokeFunc) zenrpc.InvokeFunc {
return func(ctx context.Context, method string, params json.RawMessage) zenrpc.Response {
begin, ip := time.Now(), "<nil>"
if req, ok := zenrpc.RequestFromContext(ctx); ok && req != nil {
ip = req.RemoteAddr
}
r := invoke(ctx, method, params)
logger.Log(
"ip", ip,
"method", zenrpc.NamespaceFromContext(ctx)+"."+method,
"duration", time.Since(begin),
"err", r.Error,
)
return r
}
}
}
|
//go:generate mockgen -destination=./mock/output_mock.go github.com/nomkhonwaan/myblog/pkg/log Outputer
package log
import (
"log"
"os"
)
// Outputer is a compatible interface for logging with format
type Outputer interface {
// Log with format to the output
Printf(format string, args ...interface{})
}
// DefaultOutputer implements Outputer interface which embeds `log.Logger` inside
type DefaultOutputer struct {
*log.Logger
}
func (outputer DefaultOutputer) Printf(format string, args ...interface{}) {
outputer.Logger.Printf(format, args...)
}
// NewDefaultOutputer returns new `DefaultOutputer`
func NewDefaultOutputer() DefaultOutputer {
return DefaultOutputer{log.New(os.Stdout, "", 0)}
}
|
// vi:nu:et:sts=4 ts=4 sw=4
// See License.txt in main repository directory
// CSV File Adjustment program
// This program provides a convenient way to add a field
// with a constant value or delete one or more fields
// from a csv.
// Generated: Mon May 20, 2019 21:42
package main
import (
"encoding/csv"
"flag"
"fmt"
"io"
"log"
"net/http"
"os"
"strconv"
)
var (
debug bool
force bool
noop bool
quiet bool
db_pw string
db_port string
db_srvr string
db_user string
db_name string
execPath string // exec json path (optional)
)
func usage() {
fmt.Fprintf(flag.CommandLine.Output(), "Usage of %s:\n", os.Args[0])
fmt.Fprintf(flag.CommandLine.Output(), "\nOptions:\n")
flag.PrintDefaults()
fmt.Fprintf(flag.CommandLine.Output(), "\nNotes:\n")
fmt.Fprintf(flag.CommandLine.Output(), "'exec json' is a file that defines the command line parameters \n")
fmt.Fprintf(flag.CommandLine.Output(), "so that you can set them and then execute gen with -x or -exec\n")
fmt.Fprintf(flag.CommandLine.Output(), "option.\n\n")
}
func main() {
var err error
var rcdin []string
var rcdout []string
var fileIn *os.File
var fileOut *os.File
var cnt int
// Set up flag variables
flag.Usage = usage
flag.BoolVar(&debug, "debug", true, "enable debugging")
flag.BoolVar(&force, "force", true, "enable over-writes and deletions")
flag.BoolVar(&force, "f", true, "enable over-writes and deletions")
flag.BoolVar(&noop, "noop", true, "execute program, but do not make real changes")
flag.BoolVar(&quiet, "quiet", true, "enable quiet mode")
flag.BoolVar(&quiet, "q", true, "enable quiet mode")
flag.StringVar(&db_pw, "dbPW", "Passw0rd!", "the database password")
flag.StringVar(&db_port, "dbPort", "1433", "the database port")
flag.StringVar(&db_srvr, "dbServer", "localhost", "the database server")
flag.StringVar(&db_user, "dbUser", "sa", "the database user")
flag.StringVar(&db_name, "dbName", "", "the database name")
// Parse the flags and check them
flag.Parse()
if debug {
log.Println("\tIn Debug Mode...")
}
// Create the csv reader.
fileIn, err = os.Open(path)
if err != nil {
log.Printf("...end CustomerHndlrTableLoadCSV(Error:400) - %s\n", err)
http.Error(w, http.StatusText(400), http.StatusBadRequest)
return
}
defer fileIn.Close()
log.Printf("\tFile, %s, is open...\n", path)
rdr := csv.NewReader(fileIn)
log.Printf("\tAdjusting the data...\n")
for {
rcdin, err = rdr.Read()
if err == io.EOF {
break
}
if err != nil {
str := fmt.Sprintf("ERROR: Reading row %d from csv - %s\n", cnt, err.Error())
w.Write([]byte(str))
return
}
cnt++
}
}
|
/*
Description
In the game show "The Price is Right", a number of players (typically 4) compete to get on stage by guessing the price of an item. The winner is the person whose guess is the closest one not exceeding the actual price. Because of the popularity of the one-person game show "Who Wants to be a Millionaire",the American Contest Management (ACM) would like to introduce a one-person version of the "The Price is Right". In this version, each contestant is allowed G (1 <= G <= 30) guesses and L (0 <= L <= 30)lifelines. The contestant makes a number of guesses for the actual price. After each guess, the contestant is told whether it is correct, too low, or too high. If the guess is correct, the contestant wins. Otherwise,he uses up a guess. Additionally, if his guess is too high, a lifeline is also lost. The contestant loses when all his guesses are used up or if his guess is too high and he has no lifelines left. All prices are positive integers.
It turns out that for a particular pair of values for G and L, it is possible to obtain a guessing strategy such that if the price is between 1 and N (inclusive) for some N, then the player can guarantee a win.The ACM does not want every contestant to win, so it must ensure that the actual price exceeds N.At the same time, it does not want the game to be too diffcult or there will not be enough winners to attract audience. Thus, it wishes to adjust the values of G and L depending on the actual price. To help them decide the correct values of G and L, the ACM has asked you to solve the following problem.Given G and L, what is the largest value of N such that there is a strategy to win as long as the price is between 1 and N (inclusive)?
Input
The input consists of a number of cases. Each case is specified by one line containing two integers G and L, separated by one space. The end of input is specified by a line in which G = L = 0.
Output
For each case, print a line of the form:
Case c: N
where c is the case number (starting from 1) and N is the number computed.
Sample Input
3 0
3 1
10 5
7 7
0 0
Sample Output
Case 1: 3
Case 2: 6
Case 3: 847
Case 4: 127
Source
East Central North America 2002
*/
package main
func main() {
assert(value(3, 0) == 3)
assert(value(3, 1) == 6)
assert(value(10, 5) == 847)
assert(value(7, 7) == 127)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func value(G, L int) int {
n := max(G, L)
p := alloc(n + 1)
for i := 1; i <= n; i++ {
for j := 1; j <= n; j++ {
p[i][0] = i
}
}
for i := 1; i <= n; i++ {
for j := 1; j <= n; j++ {
p[i][j] = p[i-1][j-1] + p[i-1][j] + 1
}
}
return p[G][L]
}
func alloc(n int) [][]int {
p := make([][]int, n)
t := make([]int, n*n)
for i := range p {
p[i] = t[i*n : (i+1)*n]
}
return p
}
|
// Copyright © 2016 Prateek Malhotra (someone1@gmail.com)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package backends
import (
"bytes"
"context"
"crypto/rand"
"encoding/hex"
"errors"
"io"
"strings"
"testing"
"github.com/someone1/zfsbackup-go/helpers"
)
var (
errTest = errors.New("used for testing")
)
type closeReaderWrapper struct {
r io.ReadSeeker
}
func (c *closeReaderWrapper) Read(b []byte) (int, error) {
return c.r.Read(b)
}
func (c *closeReaderWrapper) Close() error {
return nil
}
func (c *closeReaderWrapper) Seek(offset int64, whence int) (int64, error) {
return c.r.Seek(offset, whence)
}
type closeWriterWrapper struct {
w io.Writer
}
func (c *closeWriterWrapper) Close() error {
return nil
}
func (c *closeWriterWrapper) Write(p []byte) (int, error) {
return c.w.Write(p)
}
type failWriter struct {
}
func (f *failWriter) Close() error {
return nil
}
func (f *failWriter) Write(p []byte) (int, error) {
return 0, errTest
}
type errTestFunc func(error) bool
func nilErrTest(e error) bool { return e == nil }
func errTestErrTest(e error) bool { return e == errTest }
func errInvalidPrefixErrTest(e error) bool { return e == ErrInvalidPrefix }
func errInvalidURIErrTest(e error) bool { return e == ErrInvalidURI }
func nonNilErrTest(e error) bool { return e != nil }
func invalidByteErrTest(e error) bool {
_, ok := e.(hex.InvalidByteError)
return ok
}
func prepareTestVols() (payload []byte, goodVol *helpers.VolumeInfo, badVol *helpers.VolumeInfo, err error) {
payload = make([]byte, 10*1024*1024)
if _, err = rand.Read(payload); err != nil {
return
}
reader := bytes.NewReader(payload)
goodVol, err = helpers.CreateSimpleVolume(context.Background(), false)
if err != nil {
return
}
_, err = io.Copy(goodVol, reader)
if err != nil {
return
}
err = goodVol.Close()
if err != nil {
return
}
goodVol.ObjectName = strings.Join([]string{"this", "is", "just", "a", "test"}, "-") + ".ext"
badVol, err = helpers.CreateSimpleVolume(context.Background(), false)
if err != nil {
return
}
err = badVol.Close()
if err != nil {
return
}
badVol.ObjectName = strings.Join([]string{"this", "is", "just", "a", "badtest"}, "-") + ".ext"
err = badVol.DeleteVolume()
return
}
func TestGetBackendForURI(t *testing.T) {
_, err := GetBackendForURI("thiswon'texist://")
if err != ErrInvalidPrefix {
t.Errorf("Expecting err %v, got %v for non-existent prefix", ErrInvalidPrefix, err)
}
_, err = GetBackendForURI("thisisinvalid")
if err != ErrInvalidURI {
t.Errorf("Expecting err %v, got %v for invalid URI", ErrInvalidURI, err)
}
}
|
package testing
import (
"context"
"io"
"io/ioutil"
"net/http"
"strings"
"time"
"github.com/devspace-cloud/devspace/pkg/devspace/config/generated"
"github.com/devspace-cloud/devspace/pkg/devspace/kubectl"
"github.com/devspace-cloud/devspace/pkg/devspace/kubectl/portforward"
"github.com/devspace-cloud/devspace/pkg/util/kubeconfig"
"github.com/devspace-cloud/devspace/pkg/util/log"
k8sv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/rest"
)
// Client is a fake implementation of the kubectl.Client interface
type Client struct {
Client kubernetes.Interface
KubeLoader kubeconfig.Loader
IsKubernetes bool
Context string
}
// CurrentContext is a fake implementation of function
func (c *Client) CurrentContext() string {
return c.Context
}
// KubeClient is a fake implementation of function
func (c *Client) KubeClient() kubernetes.Interface {
return c.Client
}
// Namespace is a fake implementation of function
func (c *Client) Namespace() string {
return "testNamespace"
}
// RestConfig is a fake implementation of function
func (c *Client) RestConfig() *rest.Config {
return &rest.Config{
Host: "testHost",
}
}
// KubeConfigLoader is a fake implementation of function
func (c *Client) KubeConfigLoader() kubeconfig.Loader {
return c.KubeLoader
}
// PrintWarning is a fake implementation of function
func (c *Client) PrintWarning(generatedConfig *generated.Config, noWarning, shouldWait bool, log log.Logger) error {
return nil
}
// CopyFromReader is a fake implementation of function
func (c *Client) CopyFromReader(pod *k8sv1.Pod, container, containerPath string, reader io.Reader) error {
return nil
}
// Copy is a fake implementation of function
func (c *Client) Copy(pod *k8sv1.Pod, container, containerPath, localPath string, exclude []string) error {
return nil
}
// ExecStreamWithTransport is a fake implementation of function
func (c *Client) ExecStreamWithTransport(options *kubectl.ExecStreamWithTransportOptions) error {
return nil
}
// ExecStream is a fake implementation of function
func (c *Client) ExecStream(options *kubectl.ExecStreamOptions) error {
return nil
}
// ExecBuffered is a fake implementation of function
func (c *Client) ExecBuffered(pod *k8sv1.Pod, container string, command []string, input io.Reader) ([]byte, []byte, error) {
return []byte{}, []byte{}, nil
}
// GenericRequest is a fake implementation of function
func (c *Client) GenericRequest(options *kubectl.GenericRequestOptions) (string, error) {
return "", nil
}
// ReadLogs is a fake implementation of function
func (c *Client) ReadLogs(namespace, podName, containerName string, lastContainerLog bool, tail *int64) (string, error) {
return "ContainerLogs", nil
}
// LogMultipleTimeout is a fake implementation of function
func (c *Client) LogMultipleTimeout(imageSelector []string, interrupt chan error, tail *int64, writer io.Writer, timeout time.Duration, log log.Logger) error {
_, err := writer.Write([]byte("ContainerLogs"))
return err
}
// LogMultiple is a fake implementation of function
func (c *Client) LogMultiple(imageSelector []string, interrupt chan error, tail *int64, writer io.Writer, log log.Logger) error {
_, err := writer.Write([]byte("ContainerLogs"))
return err
}
// Logs is a fake implementation of function
func (c *Client) Logs(ctx context.Context, namespace, podName, containerName string, lastContainerLog bool, tail *int64, follow bool) (io.ReadCloser, error) {
retVal := ioutil.NopCloser(strings.NewReader("ContainerLogs"))
return retVal, nil
}
// GetUpgraderWrapper is a fake implementation of function
func (c *Client) GetUpgraderWrapper() (http.RoundTripper, kubectl.UpgraderWrapper, error) {
return nil, nil, nil
}
// EnsureDefaultNamespace is a fake implementation of function
func (c *Client) EnsureDefaultNamespace(log log.Logger) error {
return nil
}
// EnsureGoogleCloudClusterRoleBinding is a fake implementation of function
func (c *Client) EnsureGoogleCloudClusterRoleBinding(log log.Logger) error {
return nil
}
// GetRunningPodsWithImage is a fake implementation of function
func (c *Client) GetRunningPodsWithImage(imageNames []string, namespace string, maxWaiting time.Duration) ([]*k8sv1.Pod, error) {
return nil, nil
}
// GetNewestRunningPod is a fake implementation of function
func (c *Client) GetNewestRunningPod(labelSelector string, imageSelector []string, namespace string, maxWaiting time.Duration) (*k8sv1.Pod, error) {
return nil, nil
}
// NewPortForwarder is a fake implementation of function
func (c *Client) NewPortForwarder(pod *k8sv1.Pod, ports []string, addresses []string, stopChan chan struct{}, readyChan chan struct{}, errorChan chan error) (*portforward.PortForwarder, error) {
return nil, nil
}
// IsLocalKubernetes is a fake implementation of function
func (c *Client) IsLocalKubernetes() bool {
return c.IsKubernetes
}
// FakeFakeClientset overwrites fake.Clientsets Discovery-function
type FakeFakeClientset struct {
fake.Clientset
RBACEnabled bool
}
// Discovery returns a fake instance of the Discovery-Interface
func (f *FakeFakeClientset) Discovery() discovery.DiscoveryInterface {
return &FakeFakeDiscovery{
DiscoveryInterface: f.Clientset.Discovery(),
RBACEnabled: f.RBACEnabled,
}
}
// FakeFakeDiscovery overwrites FakeDiscoverys ServerResources-function
type FakeFakeDiscovery struct {
discovery.DiscoveryInterface
RBACEnabled bool
}
// ServerResources return one RBAC-Resource if it is enabled, else nothing
func (f *FakeFakeDiscovery) ServerResources() ([]*metav1.APIResourceList, error) {
if f.RBACEnabled {
return []*metav1.APIResourceList{
&metav1.APIResourceList{
GroupVersion: "rbac.authorization.k8s.io/v1beta1",
},
}, nil
}
return []*metav1.APIResourceList{}, nil
}
|
package main
import (
"fmt"
piscine "./func"
)
func main() {
// n := 20
// piscine.PointOne(&n)
// fmt.Println(n)
// a := 20
// b := &a
// n := &b
// piscine.UltimatePointOne(&n)
// fmt.Println(a)
// a := 13
// b := 2
// var div int
// var mod int
// piscine.DivMod(a, b, &div, &mod)
// fmt.Println(div)
// fmt.Println(mod)
// a := 13
// b := 2
// piscine.UltimateDivMod(&a, &b)
// fmt.Println(a)
// fmt.Println(b)
// str := "Hello World!"
// piscine.PrintStr(str)
// str := "Hello World!"
// nb := piscine.StrLen(str)
// fmt.Println(nb)
// a := 0
// b := 1
// piscine.Swap(&a, &b)
// fmt.Println(a)
// fmt.Println(b)
// s := "Hello World!"
// s = piscine.StrRev(s)
// fmt.Println(s)
// s := "12345"
// s2 := "0000000012345"
// s3 := "000000"
// n := piscine.BasicAtoi(s)
// n2 := piscine.BasicAtoi(s2)
// n3 := piscine.BasicAtoi(s3)
// fmt.Println(n)
// fmt.Println(n2)
// fmt.Println(n3)
// s := "12345"
// s2 := "0000000012345"
// s3 := "012 345"
// s4 := "Hello World!"
// n := piscine.BasicAtoi2(s)
// n2 := piscine.BasicAtoi2(s2)
// n3 := piscine.BasicAtoi2(s3)
// n4 := piscine.BasicAtoi2(s4)
// fmt.Println(n)
// fmt.Println(n2)
// fmt.Println(n3)
// fmt.Println(n4)
// s := "12345"
// s2 := "0000000012345"
// s3 := "012 345"
// s4 := "Hello World!"
// s5 := "+1234"
// s6 := "-1234"
// s7 := "++1234"
// s8 := "--1234"
// n := piscine.Atoi(s)
// n2 := piscine.Atoi(s2)
// n3 := piscine.Atoi(s3)
// n4 := piscine.Atoi(s4)
// n5 := piscine.Atoi(s5)
// n6 := piscine.Atoi(s6)
// n7 := piscine.Atoi(s7)
// n8 := piscine.Atoi(s8)
// fmt.Println(n)
// fmt.Println(n2)
// fmt.Println(n3)
// fmt.Println(n4)
// fmt.Println(n5)
// fmt.Println(n6)
// fmt.Println(n7)
// fmt.Println(n8)
s := []int{5,4,3,2,1,0}
piscine.SortIntegerTable(s)
fmt.Println(s)
}
|
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grumpy
import (
"fmt"
"reflect"
)
// Tuple represents Python 'tuple' objects.
//
// Tuples are thread safe by virtue of being immutable.
type Tuple struct {
Object
elems []*Object
}
// NewTuple returns a tuple containing the given elements.
func NewTuple(elems ...*Object) *Tuple {
if len(elems) == 0 {
return emptyTuple
}
return &Tuple{Object: Object{typ: TupleType}, elems: elems}
}
// Below are direct allocation versions of small Tuples. Rather than performing
// two allocations, one for the tuple object and one for the slice holding the
// elements, we allocate both objects at the same time in one block of memory.
// This both decreases the number of allocations overall as well as increases
// memory locality for tuple data. Both of which *should* improve time to
// allocate as well as read performance. The methods below are used by the
// compiler to create fixed size tuples when the size is known ahead of time.
//
// The number of specializations below were chosen first to cover all the fixed
// size tuple allocations in the runtime (currently 5), then filled out to
// cover the whole memory size class (see golang/src/runtime/sizeclasses.go for
// the table). On a 64bit system, a tuple of length 6 occupies 96 bytes - 48
// bytes for the tuple object and 6*8 (48) bytes of pointers.
//
// If methods are added or removed, then the constant MAX_DIRECT_TUPLE in
// compiler/util.py needs to be updated as well.
// NewTuple0 returns the empty tuple. This is mostly provided for the
// convenience of the compiler.
func NewTuple0() *Tuple { return emptyTuple }
// NewTuple1 returns a tuple of length 1 containing just elem0.
func NewTuple1(elem0 *Object) *Tuple {
t := struct {
tuple Tuple
elems [1]*Object
}{
tuple: Tuple{Object: Object{typ: TupleType}},
elems: [1]*Object{elem0},
}
t.tuple.elems = t.elems[:]
return &t.tuple
}
// NewTuple2 returns a tuple of length 2 containing just elem0 and elem1.
func NewTuple2(elem0, elem1 *Object) *Tuple {
t := struct {
tuple Tuple
elems [2]*Object
}{
tuple: Tuple{Object: Object{typ: TupleType}},
elems: [2]*Object{elem0, elem1},
}
t.tuple.elems = t.elems[:]
return &t.tuple
}
// NewTuple3 returns a tuple of length 3 containing elem0 to elem2.
func NewTuple3(elem0, elem1, elem2 *Object) *Tuple {
t := struct {
tuple Tuple
elems [3]*Object
}{
tuple: Tuple{Object: Object{typ: TupleType}},
elems: [3]*Object{elem0, elem1, elem2},
}
t.tuple.elems = t.elems[:]
return &t.tuple
}
// NewTuple4 returns a tuple of length 4 containing elem0 to elem3.
func NewTuple4(elem0, elem1, elem2, elem3 *Object) *Tuple {
t := struct {
tuple Tuple
elems [4]*Object
}{
tuple: Tuple{Object: Object{typ: TupleType}},
elems: [4]*Object{elem0, elem1, elem2, elem3},
}
t.tuple.elems = t.elems[:]
return &t.tuple
}
// NewTuple5 returns a tuple of length 5 containing elem0 to elem4.
func NewTuple5(elem0, elem1, elem2, elem3, elem4 *Object) *Tuple {
t := struct {
tuple Tuple
elems [5]*Object
}{
tuple: Tuple{Object: Object{typ: TupleType}},
elems: [5]*Object{elem0, elem1, elem2, elem3, elem4},
}
t.tuple.elems = t.elems[:]
return &t.tuple
}
// NewTuple6 returns a tuple of length 6 containing elem0 to elem5.
func NewTuple6(elem0, elem1, elem2, elem3, elem4, elem5 *Object) *Tuple {
t := struct {
tuple Tuple
elems [6]*Object
}{
tuple: Tuple{Object: Object{typ: TupleType}},
elems: [6]*Object{elem0, elem1, elem2, elem3, elem4, elem5},
}
t.tuple.elems = t.elems[:]
return &t.tuple
}
func toTupleUnsafe(o *Object) *Tuple {
return (*Tuple)(o.toPointer())
}
// GetItem returns the i'th element of t. Bounds are unchecked and therefore
// this method will panic unless 0 <= i < t.Len().
func (t *Tuple) GetItem(i int) *Object {
return t.elems[i]
}
// Len returns the number of elements in t.
func (t *Tuple) Len() int {
return len(t.elems)
}
// ToObject upcasts t to an Object.
func (t *Tuple) ToObject() *Object {
return &t.Object
}
// TupleType is the object representing the Python 'tuple' type.
var TupleType = newBasisType("tuple", reflect.TypeOf(Tuple{}), toTupleUnsafe, ObjectType)
var emptyTuple = &Tuple{Object: Object{typ: TupleType}}
func tupleAdd(f *Frame, v, w *Object) (*Object, *BaseException) {
if !w.isInstance(TupleType) {
return NotImplemented, nil
}
elems, raised := seqAdd(f, toTupleUnsafe(v).elems, toTupleUnsafe(w).elems)
if raised != nil {
return nil, raised
}
return NewTuple(elems...).ToObject(), nil
}
func tupleContains(f *Frame, t, v *Object) (*Object, *BaseException) {
return seqContains(f, t, v)
}
func tupleCount(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
if raised := checkMethodArgs(f, "count", args, TupleType, ObjectType); raised != nil {
return nil, raised
}
return seqCount(f, args[0], args[1])
}
func tupleEq(f *Frame, v, w *Object) (*Object, *BaseException) {
return tupleCompare(f, toTupleUnsafe(v), w, Eq)
}
func tupleGE(f *Frame, v, w *Object) (*Object, *BaseException) {
return tupleCompare(f, toTupleUnsafe(v), w, GE)
}
func tupleGetItem(f *Frame, o, key *Object) (*Object, *BaseException) {
t := toTupleUnsafe(o)
item, elems, raised := seqGetItem(f, t.elems, key)
if raised != nil {
return nil, raised
}
if item != nil {
return item, nil
}
return NewTuple(elems...).ToObject(), nil
}
func tupleGetNewArgs(f *Frame, args Args, _ KWArgs) (*Object, *BaseException) {
if raised := checkMethodArgs(f, "__getnewargs__", args, TupleType); raised != nil {
return nil, raised
}
return NewTuple1(args[0]).ToObject(), nil
}
func tupleGT(f *Frame, v, w *Object) (*Object, *BaseException) {
return tupleCompare(f, toTupleUnsafe(v), w, GT)
}
func tupleIter(f *Frame, o *Object) (*Object, *BaseException) {
return newSliceIterator(reflect.ValueOf(toTupleUnsafe(o).elems)), nil
}
func tupleLE(f *Frame, v, w *Object) (*Object, *BaseException) {
return tupleCompare(f, toTupleUnsafe(v), w, LE)
}
func tupleLen(f *Frame, o *Object) (*Object, *BaseException) {
return NewInt(len(toTupleUnsafe(o).elems)).ToObject(), nil
}
func tupleLT(f *Frame, v, w *Object) (*Object, *BaseException) {
return tupleCompare(f, toTupleUnsafe(v), w, LT)
}
func tupleMul(f *Frame, v, w *Object) (*Object, *BaseException) {
if !w.isInstance(IntType) {
return NotImplemented, nil
}
elems, raised := seqMul(f, toTupleUnsafe(v).elems, toIntUnsafe(w).Value())
if raised != nil {
return nil, raised
}
return NewTuple(elems...).ToObject(), nil
}
func tupleNE(f *Frame, v, w *Object) (*Object, *BaseException) {
return tupleCompare(f, toTupleUnsafe(v), w, NE)
}
func tupleNew(f *Frame, t *Type, args Args, _ KWArgs) (*Object, *BaseException) {
if t == TupleType && len(args) == 1 && args[0].typ == TupleType {
// Tuples are immutable so just return the tuple provided.
return args[0], nil
}
elems, raised := seqNew(f, args)
if raised != nil {
return nil, raised
}
tup := toTupleUnsafe(newObject(t))
tup.elems = elems
return tup.ToObject(), nil
}
func tupleRepr(f *Frame, o *Object) (*Object, *BaseException) {
t := toTupleUnsafe(o)
if f.reprEnter(t.ToObject()) {
return NewStr("(...)").ToObject(), nil
}
s, raised := seqRepr(f, t.elems)
f.reprLeave(t.ToObject())
if raised != nil {
return nil, raised
}
if len(t.elems) == 1 {
s = fmt.Sprintf("(%s,)", s)
} else {
s = fmt.Sprintf("(%s)", s)
}
return NewStr(s).ToObject(), nil
}
func tupleRMul(f *Frame, v, w *Object) (*Object, *BaseException) {
if !w.isInstance(IntType) {
return NotImplemented, nil
}
elems, raised := seqMul(f, toTupleUnsafe(v).elems, toIntUnsafe(w).Value())
if raised != nil {
return nil, raised
}
return NewTuple(elems...).ToObject(), nil
}
func initTupleType(dict map[string]*Object) {
dict["count"] = newBuiltinFunction("count", tupleCount).ToObject()
dict["__getnewargs__"] = newBuiltinFunction("__getnewargs__", tupleGetNewArgs).ToObject()
TupleType.slots.Add = &binaryOpSlot{tupleAdd}
TupleType.slots.Contains = &binaryOpSlot{tupleContains}
TupleType.slots.Eq = &binaryOpSlot{tupleEq}
TupleType.slots.GE = &binaryOpSlot{tupleGE}
TupleType.slots.GetItem = &binaryOpSlot{tupleGetItem}
TupleType.slots.GT = &binaryOpSlot{tupleGT}
TupleType.slots.Iter = &unaryOpSlot{tupleIter}
TupleType.slots.LE = &binaryOpSlot{tupleLE}
TupleType.slots.Len = &unaryOpSlot{tupleLen}
TupleType.slots.LT = &binaryOpSlot{tupleLT}
TupleType.slots.Mul = &binaryOpSlot{tupleMul}
TupleType.slots.NE = &binaryOpSlot{tupleNE}
TupleType.slots.New = &newSlot{tupleNew}
TupleType.slots.Repr = &unaryOpSlot{tupleRepr}
TupleType.slots.RMul = &binaryOpSlot{tupleRMul}
}
func tupleCompare(f *Frame, v *Tuple, w *Object, cmp binaryOpFunc) (*Object, *BaseException) {
if !w.isInstance(TupleType) {
return NotImplemented, nil
}
return seqCompare(f, v.elems, toTupleUnsafe(w).elems, cmp)
}
|
package runner
import (
"context"
"github.com/jcftang/gitbuilder-go/buildroot"
log "github.com/sirupsen/logrus"
)
// RunAll Executes the repo setup, build/test and report
func RunAll(ctx context.Context, b buildroot.BuildRoot) error {
for _, branch := range b.Branches() {
_nextrev, err := b.NextRev(branch)
if err != nil {
log.Error(err)
}
if _nextrev == "" {
log.Info("branch ", branch.Name, " is up to date")
continue
}
if b.IsPass(_nextrev) {
continue
}
if b.IsFail(_nextrev) {
continue
}
b.RunSetup(ctx, _nextrev)
err = b.RunBuild(ctx, _nextrev)
if err != nil {
log.Error(err)
}
}
b.RunReport()
return nil
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package ssh
import (
"bytes"
"context"
"fmt"
"io"
"os"
"github.com/Azure/aks-engine/pkg/api"
"github.com/pkg/errors"
)
// CopyToRemote copies a file to a remote host.
//
// Context ctx is only enforced during the process that stablishes
// the SSH connection and creates the SSH client.
func CopyToRemote(ctx context.Context, host *RemoteHost, file *RemoteFile) (combinedOutput string, err error) {
c, err := clientWithRetry(ctx, host)
if err != nil {
return "", errors.Wrap(err, "creating SSH client")
}
defer c.Close()
s, err := c.NewSession()
if err != nil {
return "", errors.Wrap(err, "creating SSH session")
}
defer s.Close()
// Make this configurable if we find that consumers need to update the command
cmd := getUploadCommand(host.OperatingSystem)(file)
s.Stdin = bytes.NewReader(file.Content)
if co, err := s.CombinedOutput(cmd); err != nil {
return string(co), errors.Wrap(err, "uploading to remote host")
}
return "", nil
}
// CopyFromRemote copies a remote file to the local host.
//
// Context ctx is only enforced during the process that stablishes
// the SSH connection and creates the SSH client.
func CopyFromRemote(ctx context.Context, host *RemoteHost, remoteFile *RemoteFile, destinationPath string) (stderr string, err error) {
f, err := os.OpenFile(destinationPath, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return "", errors.Wrap(err, "opening destination file")
}
defer f.Close()
c, err := clientWithRetry(ctx, host)
if err != nil {
return "", errors.Wrap(err, "creating SSH client")
}
defer c.Close()
s, err := c.NewSession()
if err != nil {
return "", errors.Wrap(err, "creating SSH session")
}
defer s.Close()
stdout, err := s.StdoutPipe()
if err != nil {
return "", errors.Wrap(err, "opening SSH session stdout pipe")
}
// Make this configurable if we find that consumers need to update the command
cmd := getDownloadCommand(host.OperatingSystem)(remoteFile)
if err = s.Start(cmd); err != nil {
return fmt.Sprintf("%s", s.Stderr), errors.Wrap(err, "downloading logs from remote host")
}
_, err = io.Copy(f, stdout)
if err != nil {
return "", errors.Wrap(err, "downloading logs")
}
return "", nil
}
type uploadCommandBuilder func(file *RemoteFile) string
func getUploadCommand(os api.OSType) uploadCommandBuilder {
switch os {
case api.Linux:
return func(f *RemoteFile) string {
return fmt.Sprintf("sudo bash -c \"mkdir -p $(dirname %s); cat /dev/stdin > %s; chmod %s %s; chown %s %s\"",
f.Path, f.Path, f.Permissions, f.Path, f.Owner, f.Path)
}
case api.Windows:
return func(f *RemoteFile) string {
return fmt.Sprintf("powershell -noprofile -command \"$Input | Out-File -Encoding ASCII %s\"",
f.Path)
}
default:
return nil
}
}
type downloadCommandBuilder func(file *RemoteFile) string
func getDownloadCommand(os api.OSType) downloadCommandBuilder {
switch os {
case api.Linux:
return func(f *RemoteFile) string {
return fmt.Sprintf("bash -c \"cat %s > /dev/stdout\"", f.Path)
}
case api.Windows:
return func(f *RemoteFile) string {
return fmt.Sprintf("type %s", f.Path)
}
default:
return nil
}
}
|
package main
import "log"
type Memento struct {
state string
}
func(m *Memento)SetState(s string){
m.state = s
}
func(m *Memento)GetState()string{
return m.state
}
type Originator struct {
state string
}
func(o *Originator)SetState(s string){
o.state = s
}
func (o *Originator)GetState()string{
return o.state
}
func (o *Originator)CreateMemento()*Memento{
return &Memento{state:o.state}
}
type Caretaker struct {
memento *Memento
}
func (c *Caretaker)GetMemento()*Memento{
return c.memento
}
func(c *Caretaker)SetMemento(m *Memento){
c.memento = m
}
func main(){
o:=&Originator{state:"hello"}
log.Printf("当前状态:%v\n",o.GetState())
c:=new(Caretaker)
c.SetMemento(o.CreateMemento())
o.SetState("world")
log.Printf("更改当前状态:%v\n",o.GetState())
o.SetState(c.GetMemento().GetState())
log.Printf("恢复后状态:%v\n",o.GetState())
return
}
|
package cron
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestCron(t *testing.T) {
called := make(chan bool)
c, err := Start(context.Background(), []Job{{
Name: "testing",
Run: func(ctx context.Context) { called <- true },
Schedule: ConstantInterval{Interval: time.Millisecond},
}})
assert.NoError(t, err)
select {
case b := <-called:
assert.True(t, b, "cron job called")
case <-time.After(time.Second):
assert.Fail(t, "timeout waiting for cron job to be called")
}
c.Stop()
}
func TestPanicCaught(t *testing.T) {
called := make(chan bool)
c, err := Start(context.Background(), []Job{{
Name: "testing",
Run: func(ctx context.Context) {
called <- true
panic("testing")
},
Schedule: ConstantInterval{Interval: time.Millisecond},
}})
assert.NoError(t, err)
select {
case b := <-called:
assert.True(t, b, "cron job called")
case <-time.After(time.Second):
assert.Fail(t, "timeout waiting for cron job to be called")
}
c.Stop()
}
func TestCancelWork(t *testing.T) {
called := make(chan bool)
canceled := false
c, err := Start(context.Background(), []Job{{
Name: "testing",
Run: func(ctx context.Context) {
called <- true
select {
case <-time.After(time.Second):
case <-ctx.Done():
canceled = true
}
},
Schedule: ConstantInterval{Interval: time.Millisecond},
}})
assert.NoError(t, err)
select {
case b := <-called:
assert.True(t, b, "cron job called")
c.Stop()
assert.True(t, canceled, "cron job canceled")
case <-time.After(time.Second):
assert.Fail(t, "timeout waiting for cron job to be called")
}
}
|
package loaders_test
import (
"context"
"testing"
"time"
"github.com/syncromatics/kafmesh/internal/graph/loaders"
"github.com/syncromatics/kafmesh/internal/graph/model"
gomock "github.com/golang/mock/gomock"
"github.com/pkg/errors"
"gotest.tools/assert"
)
func Test_Topics_Inputs(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
repository := NewMockTopicRepository(ctrl)
repository.EXPECT().
ProcessorInputsByTopics(gomock.Any(), []int{12}).
Return([][]*model.ProcessorInput{
[]*model.ProcessorInput{&model.ProcessorInput{}},
}, nil).
Times(1)
repository.EXPECT().
ProcessorInputsByTopics(gomock.Any(), []int{13}).
Return(nil, errors.Errorf("boom")).
Times(1)
loader := loaders.NewTopicLoader(context.Background(), repository, 10*time.Millisecond)
r, err := loader.ProcessorInputsByTopic(12)
assert.NilError(t, err)
assert.Assert(t, r != nil)
_, err = loader.ProcessorInputsByTopic(13)
assert.ErrorContains(t, err, "failed to get inputs from repository: boom")
}
func Test_Topics_Joins(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
repository := NewMockTopicRepository(ctrl)
repository.EXPECT().
ProcessorJoinsByTopics(gomock.Any(), []int{12}).
Return([][]*model.ProcessorJoin{
[]*model.ProcessorJoin{&model.ProcessorJoin{}},
}, nil).
Times(1)
repository.EXPECT().
ProcessorJoinsByTopics(gomock.Any(), []int{13}).
Return(nil, errors.Errorf("boom")).
Times(1)
loader := loaders.NewTopicLoader(context.Background(), repository, 10*time.Millisecond)
r, err := loader.ProcessorJoinsByTopic(12)
assert.NilError(t, err)
assert.Assert(t, r != nil)
_, err = loader.ProcessorJoinsByTopic(13)
assert.ErrorContains(t, err, "failed to get joins from repository: boom")
}
func Test_Topics_Lookups(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
repository := NewMockTopicRepository(ctrl)
repository.EXPECT().
ProcessorLookupsByTopics(gomock.Any(), []int{12}).
Return([][]*model.ProcessorLookup{
[]*model.ProcessorLookup{&model.ProcessorLookup{}},
}, nil).
Times(1)
repository.EXPECT().
ProcessorLookupsByTopics(gomock.Any(), []int{13}).
Return(nil, errors.Errorf("boom")).
Times(1)
loader := loaders.NewTopicLoader(context.Background(), repository, 10*time.Millisecond)
r, err := loader.ProcessorLookupsByTopic(12)
assert.NilError(t, err)
assert.Assert(t, r != nil)
_, err = loader.ProcessorLookupsByTopic(13)
assert.ErrorContains(t, err, "failed to get lookups from repository: boom")
}
func Test_Topics_Outputs(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
repository := NewMockTopicRepository(ctrl)
repository.EXPECT().
ProcessorOutputsByTopics(gomock.Any(), []int{12}).
Return([][]*model.ProcessorOutput{
[]*model.ProcessorOutput{&model.ProcessorOutput{}},
}, nil).
Times(1)
repository.EXPECT().
ProcessorOutputsByTopics(gomock.Any(), []int{13}).
Return(nil, errors.Errorf("boom")).
Times(1)
loader := loaders.NewTopicLoader(context.Background(), repository, 10*time.Millisecond)
r, err := loader.ProcessorOutputsByTopic(12)
assert.NilError(t, err)
assert.Assert(t, r != nil)
_, err = loader.ProcessorOutputsByTopic(13)
assert.ErrorContains(t, err, "failed to get outputs from repository: boom")
}
func Test_Topics_ProcessorPersistence(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
repository := NewMockTopicRepository(ctrl)
repository.EXPECT().
ProcessorPersistencesByTopics(gomock.Any(), []int{12}).
Return([][]*model.Processor{
[]*model.Processor{&model.Processor{}},
}, nil).
Times(1)
repository.EXPECT().
ProcessorPersistencesByTopics(gomock.Any(), []int{13}).
Return(nil, errors.Errorf("boom")).
Times(1)
loader := loaders.NewTopicLoader(context.Background(), repository, 10*time.Millisecond)
r, err := loader.ProcessorPersistencesByTopic(12)
assert.NilError(t, err)
assert.Assert(t, r != nil)
_, err = loader.ProcessorPersistencesByTopic(13)
assert.ErrorContains(t, err, "failed to get processors from repository: boom")
}
func Test_Topics_Sinks(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
repository := NewMockTopicRepository(ctrl)
repository.EXPECT().
SinksByTopics(gomock.Any(), []int{12}).
Return([][]*model.Sink{
[]*model.Sink{&model.Sink{}},
}, nil).
Times(1)
repository.EXPECT().
SinksByTopics(gomock.Any(), []int{13}).
Return(nil, errors.Errorf("boom")).
Times(1)
loader := loaders.NewTopicLoader(context.Background(), repository, 10*time.Millisecond)
r, err := loader.SinksByTopic(12)
assert.NilError(t, err)
assert.Assert(t, r != nil)
_, err = loader.SinksByTopic(13)
assert.ErrorContains(t, err, "failed to get sinks from repository: boom")
}
func Test_Topics_Sources(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
repository := NewMockTopicRepository(ctrl)
repository.EXPECT().
SourcesByTopics(gomock.Any(), []int{12}).
Return([][]*model.Source{
[]*model.Source{&model.Source{}},
}, nil).
Times(1)
repository.EXPECT().
SourcesByTopics(gomock.Any(), []int{13}).
Return(nil, errors.Errorf("boom")).
Times(1)
loader := loaders.NewTopicLoader(context.Background(), repository, 10*time.Millisecond)
r, err := loader.SourcesByTopic(12)
assert.NilError(t, err)
assert.Assert(t, r != nil)
_, err = loader.SourcesByTopic(13)
assert.ErrorContains(t, err, "failed to get sources from repository: boom")
}
func Test_Topics_ViewSinks(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
repository := NewMockTopicRepository(ctrl)
repository.EXPECT().
ViewSinksByTopics(gomock.Any(), []int{12}).
Return([][]*model.ViewSink{
[]*model.ViewSink{&model.ViewSink{}},
}, nil).
Times(1)
repository.EXPECT().
ViewSinksByTopics(gomock.Any(), []int{13}).
Return(nil, errors.Errorf("boom")).
Times(1)
loader := loaders.NewTopicLoader(context.Background(), repository, 10*time.Millisecond)
r, err := loader.ViewSinksByTopic(12)
assert.NilError(t, err)
assert.Assert(t, r != nil)
_, err = loader.ViewSinksByTopic(13)
assert.ErrorContains(t, err, "failed to get view sinks from repository: boom")
}
func Test_Topics_ViewSources(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
repository := NewMockTopicRepository(ctrl)
repository.EXPECT().
ViewSourcesByTopics(gomock.Any(), []int{12}).
Return([][]*model.ViewSource{
[]*model.ViewSource{&model.ViewSource{}},
}, nil).
Times(1)
repository.EXPECT().
ViewSourcesByTopics(gomock.Any(), []int{13}).
Return(nil, errors.Errorf("boom")).
Times(1)
loader := loaders.NewTopicLoader(context.Background(), repository, 10*time.Millisecond)
r, err := loader.ViewSourcesByTopic(12)
assert.NilError(t, err)
assert.Assert(t, r != nil)
_, err = loader.ViewSourcesByTopic(13)
assert.ErrorContains(t, err, "failed to get view sources from repository: boom")
}
func Test_Topics_Views(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
repository := NewMockTopicRepository(ctrl)
repository.EXPECT().
ViewsByTopics(gomock.Any(), []int{12}).
Return([][]*model.View{
[]*model.View{&model.View{}},
}, nil).
Times(1)
repository.EXPECT().
ViewsByTopics(gomock.Any(), []int{13}).
Return(nil, errors.Errorf("boom")).
Times(1)
loader := loaders.NewTopicLoader(context.Background(), repository, 10*time.Millisecond)
r, err := loader.ViewsByTopic(12)
assert.NilError(t, err)
assert.Assert(t, r != nil)
_, err = loader.ViewsByTopic(13)
assert.ErrorContains(t, err, "failed to get views from repository: boom")
}
|
package pruning
import (
"context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
"github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned"
)
type Pruner interface {
Prune(*v1alpha1.ClusterServiceVersion)
}
type PrunerFunc func(*v1alpha1.ClusterServiceVersion)
func (f PrunerFunc) Prune(csv *v1alpha1.ClusterServiceVersion) {
f(csv)
}
func NewListerWatcher(client versioned.Interface, namespace string, override func(*metav1.ListOptions), p Pruner) cache.ListerWatcher {
return &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
override(&options)
list, err := client.OperatorsV1alpha1().ClusterServiceVersions(namespace).List(context.TODO(), options)
if err != nil {
return list, err
}
for i := range list.Items {
p.Prune(&list.Items[i])
}
return list, nil
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
override(&options)
w, err := client.OperatorsV1alpha1().ClusterServiceVersions(namespace).Watch(context.TODO(), options)
if err != nil {
return w, err
}
return watch.Filter(w, watch.FilterFunc(func(e watch.Event) (watch.Event, bool) {
if csv, ok := e.Object.(*v1alpha1.ClusterServiceVersion); ok {
p.Prune(csv)
}
return e, true
})), nil
},
}
}
|
package flex
import (
"context"
"os"
"time"
gomock "github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
fakekubeclientset "k8s.io/client-go/kubernetes/fake"
testcore "k8s.io/client-go/testing"
flexmocks "github.com/IBM/ubiquity-k8s/sidecars/flex/mocks"
"github.com/IBM/ubiquity-k8s/utils"
"github.com/IBM/ubiquity/resources"
)
var _ = PDescribe("ServiceSyncer", func() {
var ss *ServiceSyncer
var kubeClient *fakekubeclientset.Clientset
var realFlexConfigSyncer FlexConfigSyncer
var ctx context.Context
var cancelFunc context.CancelFunc
var mockCtrl *gomock.Controller
var mockFlexConfigSyncer *flexmocks.MockFlexConfigSyncer
var ns = "ubiquity"
var svc = &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: utils.UbiquityServiceName,
},
Spec: v1.ServiceSpec{
ClusterIP: "1.2.3.4",
},
}
BeforeEach(func() {
os.Setenv("NAMESPACE", "ubiquity")
ctx, cancelFunc = context.WithCancel(context.Background())
kubeClient = fakekubeclientset.NewSimpleClientset()
mockCtrl = gomock.NewController(GinkgoT())
mockFlexConfigSyncer = flexmocks.NewMockFlexConfigSyncer(mockCtrl)
realFlexConfigSyncer = defaultFlexConfigSyncer
// mock the defaultFlexConfigSyncer
defaultFlexConfigSyncer = mockFlexConfigSyncer
ss, _ = NewServiceSyncer(kubeClient, ctx)
})
AfterEach(func() {
os.Setenv("NAMESPACE", "")
mockCtrl.Finish()
defaultFlexConfigSyncer = realFlexConfigSyncer
})
Describe("test Sync", func() {
JustBeforeEach(func() {
go func() {
// stop the Sync
time.Sleep(30 * time.Millisecond)
cancelFunc()
}()
})
Context("ubiquity service does not exist at the beginning", func() {
BeforeEach(func() {
emptyConfig := &resources.UbiquityPluginConfig{}
mockFlexConfigSyncer.EXPECT().GetCurrentFlexConfig().Return(emptyConfig, nil)
mockFlexConfigSyncer.EXPECT().UpdateFlexConfig(gomock.Any())
go func() {
svcWatcher := watch.NewFake()
kubeClient.PrependWatchReactor("services", testcore.DefaultWatchReactor(svcWatcher, nil))
time.Sleep(20 * time.Millisecond)
// create the service after the Sync starts
kubeClient.CoreV1().Services(svc.Namespace).Create(svc)
svcWatcher.Add(svc)
}()
})
It("should call processService only once to get and update clusterIP", func(done Done) {
err := ss.Sync()
Ω(err).ShouldNot(HaveOccurred())
close(done)
})
})
Context("ubiquity service exists at the beginning and config has right cluterIP", func() {
BeforeEach(func() {
kubeClient.CoreV1().Services(svc.Namespace).Create(svc)
configWithUbiquityIP := &resources.UbiquityPluginConfig{UbiquityServer: resources.UbiquityServerConnectionInfo{Address: "1.2.3.4"}}
mockFlexConfigSyncer.EXPECT().GetCurrentFlexConfig().Return(configWithUbiquityIP, nil)
mockFlexConfigSyncer.EXPECT().UpdateFlexConfig(gomock.Any()).Times(0)
})
It("should call processService only once but never update clusterIP", func(done Done) {
err := ss.Sync()
Ω(err).ShouldNot(HaveOccurred())
close(done)
})
})
Context("ubiquity service exists at the beginning and cluterIP never changes and config has no cluterIP", func() {
BeforeEach(func() {
kubeClient.CoreV1().Services(svc.Namespace).Create(svc)
emptyConfig := &resources.UbiquityPluginConfig{}
configWithUbiquityIP := &resources.UbiquityPluginConfig{UbiquityServer: resources.UbiquityServerConnectionInfo{Address: "1.2.3.4"}}
mockFlexConfigSyncer.EXPECT().GetCurrentFlexConfig().Return(emptyConfig, nil)
mockFlexConfigSyncer.EXPECT().GetCurrentFlexConfig().Return(configWithUbiquityIP, nil)
mockFlexConfigSyncer.EXPECT().UpdateFlexConfig(gomock.Any())
go func() {
svcWatcher := watch.NewFake()
kubeClient.PrependWatchReactor("services", testcore.DefaultWatchReactor(svcWatcher, nil))
time.Sleep(20 * time.Millisecond)
svcWatcher.Modify(svc)
}()
})
It("should call processService twice but update clusterIP only once", func(done Done) {
err := ss.Sync()
Ω(err).ShouldNot(HaveOccurred())
close(done)
})
})
Context("ubiquity service exists at the beginning and cluterIP changes and config has right cluterIP", func() {
BeforeEach(func() {
kubeClient.CoreV1().Services(svc.Namespace).Create(svc)
configWithUbiquityIP := &resources.UbiquityPluginConfig{UbiquityServer: resources.UbiquityServerConnectionInfo{Address: "1.2.3.4"}}
mockFlexConfigSyncer.EXPECT().GetCurrentFlexConfig().Return(configWithUbiquityIP, nil).Times(2)
mockFlexConfigSyncer.EXPECT().UpdateFlexConfig(gomock.Any())
go func() {
newSvc := svc.DeepCopy()
newSvc.Spec.ClusterIP = "5.6.7.8"
svcWatcher := watch.NewFake()
kubeClient.PrependWatchReactor("services", testcore.DefaultWatchReactor(svcWatcher, nil))
time.Sleep(20 * time.Millisecond)
svcWatcher.Modify(newSvc)
}()
})
It("should call processService twice but update clusterIP only once", func(done Done) {
err := ss.Sync()
Ω(err).ShouldNot(HaveOccurred())
close(done)
})
})
})
})
|
package tools
import (
"github.com/PagerDuty/go-pagerduty"
"reflect"
"testing"
)
func TestGetMappedEscalationPolicies(t *testing.T) {
var EscalationPolicies []pagerduty.EscalationPolicy
testPolicy := pagerduty.EscalationPolicy{}
testPolicy.Name = "Test Policy"
testPolicy.NumLoops = 2
testPolicy.ID = "Test1"
EscalationPolicies = append(EscalationPolicies, testPolicy)
var result = GetMappedEscalationPolicies(EscalationPolicies)
assertEqual(t, result[0].Name, testPolicy.Name)
}
func TestGetMappedEscalationPoliciesNested(t *testing.T) {
var testRules []pagerduty.EscalationRule
testRule1 := pagerduty.EscalationRule{}
testRule1.ID = "rule1"
testRule1.Delay = 2
testRules = append(testRules, testRule1)
var EscalationPolicies []pagerduty.EscalationPolicy
testPolicy := pagerduty.EscalationPolicy{}
testPolicy.Name = "Test Policy"
testPolicy.NumLoops = 2
testPolicy.ID = "Test1"
testPolicy.EscalationRules = testRules
EscalationPolicies = append(EscalationPolicies, testPolicy)
var result = GetMappedEscalationPolicies(EscalationPolicies)
assertEqual(t, result[0].Name, testPolicy.Name)
}
func TestGetMappedEscalationRules(t *testing.T) {
var testRules []pagerduty.EscalationRule
testRule1 := pagerduty.EscalationRule{}
testRule1.ID = "rule1"
testRule1.Delay = 2
testRules = append(testRules, testRule1)
var EscalationPolicies []pagerduty.EscalationPolicy
testPolicy := pagerduty.EscalationPolicy{}
testPolicy.Name = "Test Policy"
testPolicy.NumLoops = 2
testPolicy.ID = "Test1"
testPolicy.EscalationRules = testRules
EscalationPolicies = append(EscalationPolicies, testPolicy)
var result = GetMappedEscalationRules(testRules, testPolicy.ID)
assertEqual(t, result[0].ID, testRule1.ID)
assertEqual(t, result[0].PolicyID, testPolicy.ID)
assertEqual(t, 0, result[0].LevelIndex)
}
func TestGetMappedSchedules(t *testing.T) {
var PagerDutySchedules []pagerduty.Schedule
testSchedule := pagerduty.Schedule{
Name: "Infra",
APIObject: pagerduty.APIObject{
ID: "schedule1",
},
}
PagerDutySchedules = append(PagerDutySchedules, testSchedule)
var result = GetMappedSchedules(PagerDutySchedules)
assertEqual(t, result[0].Name, testSchedule.Name)
assertEqual(t, result[0].APIObject.ID, testSchedule.APIObject.ID)
}
func TestGetMappedServices(t *testing.T) {
var PagerDutyServices []pagerduty.Service
testService := pagerduty.Service{
Name: "PD Service",
Description: "grumble grumble",
APIObject: pagerduty.APIObject{
ID: "Service1",
Type: "Type1",
},
}
PagerDutyServices = append(PagerDutyServices, testService)
var result = GetMappedServices(PagerDutyServices)
assertEqual(t, result[0].Name, testService.Name)
assertEqual(t, result[0].APIObject.ID, testService.APIObject.ID)
}
func TestGetMappedIncidents(t *testing.T) {
var PagerDutyIncidents []pagerduty.Incident
testIncident := pagerduty.Incident{
IncidentNumber: 1,
IncidentKey: "grumble grumble",
APIObject: pagerduty.APIObject{
ID: "Incident11",
Type: "Disaster",
},
FirstTriggerLogEntry: pagerduty.APIObject{
ID: "trig1",
Type: "info",
},
}
PagerDutyIncidents = append(PagerDutyIncidents, testIncident)
var result = GetMappedIncidents(PagerDutyIncidents)
assertEqual(t, result[0].IncidentNumber, testIncident.IncidentNumber)
assertEqual(t, result[0].APIObject.ID, testIncident.APIObject.ID)
assertEqual(t, result[0].FirstTriggerLogEntry.ID, testIncident.FirstTriggerLogEntry.ID)
}
func TestGetMappedLogEntries(t *testing.T) {
var PagerDutyLogEntries []pagerduty.LogEntry
testIncident := pagerduty.Incident{
IncidentNumber: 1,
IncidentKey: "grumble grumble",
APIObject: pagerduty.APIObject{
ID: "Incident11",
Type: "Disaster",
},
FirstTriggerLogEntry: pagerduty.APIObject{
ID: "trig1",
Type: "info",
},
}
testLogEntry := pagerduty.LogEntry{
CreatedAt: "01-Jun-2020",
APIObject: pagerduty.APIObject{
ID: "log1",
Type: "trigger_log_entry",
},
Agent: pagerduty.Agent{
ID: "agent001",
Type: "super_user",
},
Channel: pagerduty.Channel{Type: "incident_channel"},
Incident: testIncident,
}
PagerDutyLogEntries = append(PagerDutyLogEntries, testLogEntry)
var result = GetMappedLogEntries(PagerDutyLogEntries)
assertEqual(t, result[0].CreatedAt, testLogEntry.CreatedAt)
assertEqual(t, result[0].APIObject.ID, testLogEntry.APIObject.ID)
assertEqual(t, result[0].APIObject.Type, testLogEntry.APIObject.Type)
}
func assertEqual(t *testing.T, e, g interface{}) (r bool) {
r = compare(e, g)
if !r {
t.Errorf("Expected [%v], got [%v]", e, g)
}
return
}
func compare(e, g interface{}) (r bool) {
ev := reflect.ValueOf(e)
gv := reflect.ValueOf(g)
if ev.Kind() != gv.Kind() {
return
}
switch ev.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
r = (ev.Int() == gv.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
r = (ev.Uint() == gv.Uint())
case reflect.Float32, reflect.Float64:
r = (ev.Float() == gv.Float())
case reflect.String:
r = (ev.String() == gv.String())
case reflect.Bool:
r = (ev.Bool() == gv.Bool())
case reflect.Slice, reflect.Map:
r = reflect.DeepEqual(e, g)
}
return
}
|
package phpGo
import (
"testing"
)
type tmpStruct struct {
A string
B int
}
func TestEmpty(t *testing.T) {
if !Empty(nil) {
t.Error()
}
if !Empty("") {
t.Error()
}
if !Empty("0") {
t.Error()
}
if !Empty(false) {
t.Error()
}
tmpArr := [2]interface{}{0, 1}
if !Empty(tmpArr, 0) || Empty(tmpArr, 1) {
t.Error()
}
if !Empty([]int{}) || !Empty([]string{}) {
t.Error()
}
tmpMap := map[interface{}]interface{}{"asd": "1", 123: "2", "123": "3"}
if !Empty(tmpMap, "aa") || Empty(tmpMap, 123) || Empty(tmpMap, "asd") {
t.Error()
}
tmpStruct := tmpStruct{A: "123", B: 0}
if Empty(tmpStruct) || Empty(tmpStruct, "A") || !Empty(tmpStruct, "B") {
t.Error()
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.