text stringlengths 11 4.05M |
|---|
package main
import (
"encoding/json"
"net/http"
module "imuslab.com/arozos/mod/modules"
)
type settingModule struct {
Name string //Name of the setting module.
Desc string //Description of the setting module
IconPath string //Icon path for the setting module
Group string //Accept {}
StartDir string //Startup Directory / path
RequireAdmin bool //If the setting require admin access.
//^ Enable this to hide this setting from non-admin users, but for API call, module has to handle admin check themselves.
}
type settingGroup struct {
Name string
Group string
IconPath string
Desc string
}
var (
settingModules []settingModule
)
func SystemSettingInit() {
http.HandleFunc("/system/setting/list", system_setting_handleListing)
//Register the module
moduleHandler.RegisterModule(module.ModuleInfo{
Name: "System Setting",
Desc: "Cutomize your systems to fit your needs",
Group: "System Settings",
IconPath: "SystemAO/system_setting/img/small_icon.png",
Version: "1.0",
StartDir: "SystemAO/system_setting/index.html",
SupportFW: true,
InitFWSize: []int{1080, 580},
LaunchFWDir: "SystemAO/system_setting/index.html",
SupportEmb: false,
})
}
//Setting group defination. Your setting module defination must match the group in-order to be shown
func system_setting_getSettingGroups() []settingGroup {
return []settingGroup{
settingGroup{
Name: "Host Information",
Group: "Info",
IconPath: "SystemAO/system_setting/img/server.svg",
Desc: "Config and info about the Server Host",
},
settingGroup{
Name: "Devices & IoT",
Group: "Device",
IconPath: "SystemAO/system_setting/img/device.svg",
Desc: "Connected clients and IoT devices",
},
settingGroup{
Name: "Module Management",
Group: "Module",
IconPath: "SystemAO/system_setting/img/module.svg",
Desc: "List of modules loaded in the system",
},
settingGroup{
Name: "Disk & Storage",
Group: "Disk",
IconPath: "SystemAO/system_setting/img/drive.svg",
Desc: "Manage Storage Devices and Disks",
},
settingGroup{
Name: "Network & Connection",
Group: "Network",
IconPath: "SystemAO/system_setting/img/network.svg",
Desc: "Manage Host Network and Connections",
},
settingGroup{
Name: "Users & Groups",
Group: "Users",
IconPath: "SystemAO/system_setting/img/users.svg",
Desc: "Add, removed or edit users and groups",
},
settingGroup{
Name: "Clusters & Scheduling",
Group: "Cluster",
IconPath: "SystemAO/system_setting/img/cluster.svg",
Desc: "System Functions related to Time and Dates",
},
settingGroup{
Name: "Advance Options",
Group: "Advance",
IconPath: "SystemAO/system_setting/img/code.svg",
Desc: "Advance configs for developers",
},
settingGroup{
Name: "About ArOZ",
Group: "About",
IconPath: "SystemAO/system_setting/img/info.svg",
Desc: "Information of the current running ArOZ Online System",
},
}
}
func registerSetting(thismodule settingModule) {
settingModules = append(settingModules, thismodule)
}
//List all the setting modules and output it as JSON
func system_setting_handleListing(w http.ResponseWriter, r *http.Request) {
userinfo, err := userHandler.GetUserInfoFromRequest(w, r)
if err != nil {
sendErrorResponse(w, "User not logged in")
return
}
allSettingGroups := system_setting_getSettingGroups()
listGroup, _ := mv(r, "listGroup", false)
if len(listGroup) > 0 {
//List the given group
var results []settingModule
for _, setMod := range settingModules {
if setMod.Group == listGroup {
//Check if the module is admin only.
if setMod.RequireAdmin && userinfo.IsAdmin() {
//Admin module and user is admin. Append to list
results = append(results, setMod)
} else if setMod.RequireAdmin == false {
//Normal module. Append to list
results = append(results, setMod)
}
}
}
if len(results) > 0 {
jsonString, _ := json.Marshal(results)
sendJSONResponse(w, string(jsonString))
return
} else {
//This group not found,
sendErrorResponse(w, "Group not found")
return
}
} else {
//List all root groups
jsonString, _ := json.Marshal(allSettingGroups)
sendJSONResponse(w, string(jsonString))
return
}
}
|
package main
import (
"math/rand"
"sync"
"time"
)
func read2(id int, ci chan int, wg *sync.WaitGroup) {
for i := 0; i < 10; i++ {
println(id, ":", <-ci)
}
wg.Done() //one go routine done
}
func main() {
ci := make(chan int)
rand.Seed(time.Now().UnixNano())
go func() {
for {
ci <- rand.Intn(100)
}
}()
var wg sync.WaitGroup //declare
wg.Add(10) // will initiate 10 go routine
for id := 0; id < 10; id++ {
go read2(id, ci, &wg) // initiating
}
wg.Wait() // wait here
}
|
package repository
// Repository collects the repositories for each model
type Repository struct {
User UserRepository
Project ProjectRepository
Release ReleaseRepository
Session SessionRepository
GitRepo GitRepoRepository
Cluster ClusterRepository
HelmRepo HelmRepoRepository
Registry RegistryRepository
Infra InfraRepository
GitActionConfig GitActionConfigRepository
Invite InviteRepository
AuthCode AuthCodeRepository
DNSRecord DNSRecordRepository
PWResetToken PWResetTokenRepository
KubeIntegration KubeIntegrationRepository
BasicIntegration BasicIntegrationRepository
OIDCIntegration OIDCIntegrationRepository
OAuthIntegration OAuthIntegrationRepository
GCPIntegration GCPIntegrationRepository
AWSIntegration AWSIntegrationRepository
GithubAppInstallation GithubAppInstallationRepository
GithubAppOAuthIntegration GithubAppOAuthIntegrationRepository
SlackIntegration SlackIntegrationRepository
NotificationConfig NotificationConfigRepository
}
|
package main
import (
"github.com/paulmach/go.geojson"
"time"
)
type Stop struct {
//gorm.Model
ID uint32 `gorm:"column:ctr_id"`
Name string
Lat float64
Lon float64
}
type Line struct {
ID string
ShortName string
LongName string
Stops []Stop `gorm:"many2many:line_stop;association_foreignkey:ctr_id;jointable_foreignkey:line_id;association_jointable_foreignkey:stop_ctr_id;"`
}
type Journey struct {
//gorm.Model
LineId string `gorm:"primary_key"`
JourneyId uint32 `gorm:"primary_key"`
StartDate time.Time
}
type Geometry interface {
ToGeoJSON() *geojson.Feature
}
func (s Stop) ToGeoJSON() *geojson.Feature {
f := geojson.NewPointFeature([]float64{s.Lon, s.Lat})
f.Properties["id"] = s.ID
f.Properties["name"] = s.Name
return f
}
func (l Line) ToGeoJSON() *geojson.Feature {
coords := make([][]float64, len(l.Stops))
stopIds := make([]uint32, len(l.Stops))
for i, s := range l.Stops {
coords[i] = []float64{s.Lon, s.Lat}
stopIds[i] = s.ID
}
f := geojson.NewLineStringFeature(coords)
f.Properties["id"] = l.ID
f.Properties["short_name"] = l.ShortName
f.Properties["long_name"] = l.LongName
f.Properties["stops"] = stopIds
return f
}
|
package main
import (
"net/http"
"github.com/gorilla/rpc"
"github.com/gorilla/rpc/json"
"github.com/gin-gonic/gin"
"github.com/olahol/melody"
)
func main() {
pluginManager.Load()
m := melody.New()
s := rpc.NewServer()
s.RegisterCodec(json.NewCodec(), "application/json, text/javascript")
chat := newChat(m)
s.RegisterService(chat, "")
r := gin.Default()
r.LoadHTMLGlob("templates/*")
r.GET("/", func(c *gin.Context) {
c.HTML(http.StatusOK, "client.html", gin.H{
"plugins": pluginManager.List(),
})
})
r.POST("/json", func(c *gin.Context) {
s.ServeHTTP(c.Writer, c.Request)
})
r.GET("/channel/:name", func(c *gin.Context) {
m.HandleRequest(c.Writer, c.Request)
})
r.Run(":8080")
}
|
package cdp
import (
"context"
"crypto/tls"
"net"
"net/http"
"github.com/go-rod/rod/lib/utils"
)
// Dialer interface for WebSocket connection
type Dialer interface {
DialContext(ctx context.Context, network, address string) (net.Conn, error)
}
// TODO: replace it with tls.Dialer once golang v1.15 is widely used.
type tlsDialer struct{}
func (d *tlsDialer) DialContext(_ context.Context, network, address string) (net.Conn, error) {
return tls.Dial(network, address, nil)
}
// MustConnectWS helper to make a websocket connection
func MustConnectWS(wsURL string) WebSocketable {
ws := &WebSocket{}
utils.E(ws.Connect(context.Background(), wsURL, nil))
return ws
}
// MustStartWithURL helper for ConnectURL
func MustStartWithURL(ctx context.Context, u string, h http.Header) *Client {
c, err := StartWithURL(ctx, u, h)
utils.E(err)
return c
}
// StartWithURL helper to connect to the u with the default websocket lib.
func StartWithURL(ctx context.Context, u string, h http.Header) (*Client, error) {
ws := &WebSocket{}
err := ws.Connect(ctx, u, h)
if err != nil {
return nil, err
}
return New().Start(ws), nil
}
|
package nettests
// Psiphon test implementation
type Psiphon struct {
}
// Run starts the test
func (h Psiphon) Run(ctl *Controller) error {
builder, err := ctl.Session.NewExperimentBuilder(
"psiphon",
)
if err != nil {
return err
}
return ctl.Run(builder, []string{""})
}
|
package main
type retangulo struct {
altura float64
largura float64
}
type circulo struct {
raio float64
}
func main() {
}
|
package gatherer
import (
"io/ioutil"
"net/http"
"regexp"
"../logger"
)
// CMSDetector detects CMS with whatcms.org api.
// AJAX API.
type CMSDetector struct {
target string
CMS string
}
// NewCMSDetector returns a new CMS detector.
func NewCMSDetector() *CMSDetector {
return &CMSDetector{}
}
// Set implements Gatherer interface.
// Params should be {conn *websocket.Conn, target string}
func (c *CMSDetector) Set(v ...interface{}) {
c.target = v[0].(string)
}
// Report implements Gatherer interface
func (c *CMSDetector) Report() map[string]interface{} {
return map[string]interface{}{
"cms": c.CMS,
}
}
// Run impplements Gatherer interface.
func (c *CMSDetector) Run() {
resp, err := http.Get("https://whatcms.org/?s=" + c.target)
if err != nil {
logger.Red.Println(err)
return
}
body, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
re := regexp.MustCompile(`class="nowrap" title="(.*?)">`)
cms := re.FindAllStringSubmatch(string(body), -1)
if len(cms) == 0 {
logger.Green.Println("No CMS Detected")
return
}
c.CMS = cms[0][1]
logger.Green.Println("CMS Detected:", c.CMS)
}
|
package main
// O(n) time | O(n) space
func IsPalindrome(str string) bool {
return helper(str, 0)
}
func helper(str string, i int) bool {
j := len(str) - 1 - i
if i >= j {
return true
}
if str[i] != str[j] {
return false
}
return helper(str, i+1)
} |
package cmd
import (
"context"
"crypto/tls"
"errors"
"expvar"
"flag"
"fmt"
"hash/crc32"
"math/rand"
"net"
"net/http"
"os"
"os/signal"
"sync"
"syscall"
"time"
"github.com/linfn/camo/pkg/camo"
"github.com/linfn/camo/pkg/env"
"github.com/linfn/camo/pkg/machineid"
"github.com/linfn/camo/pkg/util"
"github.com/lucas-clemente/quic-go/http3"
"golang.org/x/crypto/acme"
"golang.org/x/crypto/acme/autocert"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
)
type Server struct {
flags *flag.FlagSet
help bool
listenAddr string
password string
mtu int
tun4 bool
tun6 bool
tunIPv4 string
tunIPv6 string
disableNAT4 bool
disableNAT6 bool
autocertHost string
autocertDir string
autocertEmail string
logLevel string
useH2C bool
enableH3 bool
debugHTTP string
log camo.Logger
}
func (cmd *Server) flagSet() *flag.FlagSet {
if cmd.flags != nil {
return cmd.flags
}
fs := flag.NewFlagSet("client", flag.ExitOnError)
fs.BoolVar(&cmd.help, "h", false, "help")
fs.StringVar(&cmd.listenAddr, "listen", env.String("CAMO_LISTEN", ":443"), "listen address")
fs.StringVar(&cmd.password, "password", env.String("CAMO_PASSWORD", ""), "Set a password. It is recommended to use the environment variable CAMO_PASSWORD to set the password.")
fs.IntVar(&cmd.mtu, "mtu", env.Int("CAMO_MTU", camo.DefaultMTU), "tun mtu")
fs.BoolVar(&cmd.tun4, "4", env.Bool("CAMO_ENABLE_IP4", false), "tunneling for IPv4")
fs.BoolVar(&cmd.tun6, "6", env.Bool("CAMO_ENABLE_IP6", false), "tunneling for IPv6")
fs.StringVar(&cmd.tunIPv4, "ip4", env.String("CAMO_IP4", "10.20.0.1/24"), "tun IPv4 cidr")
fs.StringVar(&cmd.tunIPv6, "ip6", env.String("CAMO_IP6", "fd01:cafe::1/64"), "tun IPv6 cidr")
fs.BoolVar(&cmd.disableNAT4, "disable-nat4", env.Bool("CAMO_DISABLE_NAT4", false), "disable NAT for IPv4")
fs.BoolVar(&cmd.disableNAT6, "disable-nat6", env.Bool("CAMO_DISABLE_NAT6", false), "disable NAT for IPv6")
fs.StringVar(&cmd.autocertHost, "autocert-host", env.String("CAMO_AUTOCERT_HOST", ""), "hostname")
fs.StringVar(&cmd.autocertDir, "autocert-dir", env.String("CAMO_AUTOCERT_DIR", defaultCertDir), "cert cache directory")
fs.StringVar(&cmd.autocertEmail, "autocert-email", env.String("CAMO_AUTOCERT_EMAIL", ""), "(optional) email address")
fs.StringVar(&cmd.logLevel, "log-level", env.String("CAMO_LOG_LEVEL", camo.LogLevelTexts[camo.LogLevelInfo]), "log level")
fs.BoolVar(&cmd.useH2C, "h2c", env.Bool("CAMO_H2C", false), "use h2c (for debug)")
fs.BoolVar(&cmd.enableH3, "http3", env.Bool("CAMO_HTTP3", false), "enable http3")
fs.StringVar(&cmd.debugHTTP, "debug-http", env.String("CAMO_DEBUG_HTTP", ""), "debug http server listen address")
cmd.flags = fs
return fs
}
func (cmd *Server) Name() string {
return "server"
}
func (cmd *Server) Desc() string {
return "Run camo server"
}
func (cmd *Server) Usage() {
fmt.Printf("Usage: camo server [OPTIONS]\n")
cmd.flagSet().PrintDefaults()
}
func (cmd *Server) parseFlags(args []string) {
fs := cmd.flagSet()
_ = fs.Parse(args)
if cmd.help {
return
}
log := newLogger(cmd.logLevel)
cmd.log = log
if cmd.password == "" {
log.Fatal("missing password")
}
hiddenPasswordArg()
if !cmd.tun4 && !cmd.tun6 {
cmd.tun4 = true
cmd.tun6 = true
}
if cmd.tun4 {
if _, _, err := net.ParseCIDR(cmd.tunIPv4); err != nil {
log.Fatalf("invalid -ip4 cidr: %v", err)
}
}
if cmd.tun6 {
if _, _, err := net.ParseCIDR(cmd.tunIPv6); err != nil {
log.Fatalf("invalid -ip6 cidr: %v", err)
}
}
if !cmd.useH2C && cmd.autocertHost == "" {
log.Info("no auotcert config, use PSK mode")
}
}
func (cmd *Server) Run(args ...string) {
cmd.parseFlags(args)
if cmd.help {
cmd.Usage()
return
}
log := cmd.log
var defers util.Rollback
defer defers.Do()
iface := cmd.initTun(&defers)
ctx, cancel := context.WithCancel(context.Background())
srv := cmd.initServer()
mux := http.NewServeMux()
mux.Handle("/", cmd.withLog(srv.Handler(ctx, "")))
handler := camo.WithAuth(mux, cmd.password, log)
h2 := cmd.initHTTPServer(handler)
h3 := cmd.initHTTP3Server(handler)
var exitOnce sync.Once
exit := func(err error) {
exitOnce.Do(func() {
h2.Close()
h3.Close()
cancel()
if err != nil {
log.Errorf("server exits with error %v", err)
}
})
}
go func() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
log.Infof("receive signal %s", <-c)
exit(nil)
}()
log.Info("server start")
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
exit(srv.ServeIface(ctx, iface))
}()
wg.Add(1)
go func() {
defer wg.Done()
if !cmd.useH2C {
exit(h2.ListenAndServeTLS("", ""))
} else {
exit(h2.ListenAndServe())
}
}()
if cmd.enableH3 && !cmd.useH2C {
wg.Add(1)
go func() {
defer wg.Done()
exit(h3.ListenAndServe())
}()
}
if cmd.debugHTTP != "" {
go cmd.debugHTTPServer()
}
wg.Wait()
}
func (cmd *Server) initTun(defers *util.Rollback) *camo.Iface {
log := cmd.log
iface, err := camo.NewTunIface(cmd.mtu)
if err != nil {
log.Panicf("failed to create tun device: %v", err)
}
defers.Add(func() { iface.Close() })
log.Infof("tun(%s) up", iface.Name())
if cmd.tun4 {
if err := iface.SetIPv4(cmd.tunIPv4); err != nil {
log.Panicf("failed to set %s IPv4 address %s: %v", iface.Name(), cmd.tunIPv4, err)
}
log.Infof("set %s IPv4 address at %s", iface.Name(), cmd.tunIPv4)
if !cmd.disableNAT4 {
resetNAT4, err := camo.SetupNAT(iface.Subnet4().String())
if err != nil {
log.Panicf("failed to setup nat4: %v", err)
}
defers.Add(func() {
err := resetNAT4()
if err != nil {
log.Error(err)
}
})
}
}
if cmd.tun6 {
if err := iface.SetIPv6(cmd.tunIPv6); err != nil {
log.Panicf("failed to set %s IPv6 address %s: %v", iface.Name(), cmd.tunIPv6, err)
}
log.Infof("set %s IPv6 address at %s", iface.Name(), cmd.tunIPv6)
if !cmd.disableNAT6 {
resetNAT6, err := camo.SetupNAT(iface.Subnet6().String())
if err != nil {
log.Panicf("failed to setup nat6: %v", err)
}
defers.Add(func() {
err := resetNAT6()
if err != nil {
log.Error(err)
}
})
}
}
return iface
}
func (cmd *Server) initServer() *camo.Server {
srv := &camo.Server{
MTU: cmd.mtu,
Logger: cmd.log,
Noise: cmd.getNoise(),
}
cmd.initIPPool(srv)
expvar.Publish("camo", srv.Metrics())
return srv
}
func (cmd *Server) initIPPool(srv *camo.Server) {
log := cmd.log
if cmd.tun4 {
gw, subnet, err := net.ParseCIDR(cmd.tunIPv4)
if err != nil {
log.Panic(err)
}
srv.IPv4Pool = camo.NewSubnetIPPool(subnet, gw, 256)
}
if cmd.tun6 {
gw, subnet, err := net.ParseCIDR(cmd.tunIPv6)
if err != nil {
log.Panic(err)
}
srv.IPv6Pool = camo.NewSubnetIPPool(subnet, gw, 256)
}
}
func (cmd *Server) getNoise() int {
id, err := machineid.MachineID(getCamoDir())
if err == nil {
return int(crc32.ChecksumIEEE([]byte(id)))
}
cmd.log.Warnf("failed to get machineid: %v", err)
return rand.New(rand.NewSource(time.Now().UnixNano())).Int()
}
func (cmd *Server) initHTTPServer(handler http.Handler) *http.Server {
hsrv := &http.Server{Addr: cmd.listenAddr}
if cmd.useH2C {
hsrv.Handler = h2c.NewHandler(handler, &http2.Server{})
} else {
hsrv.TLSConfig = cmd.initTLSConfig()
hsrv.Handler = handler
}
return hsrv
}
func (cmd *Server) initHTTP3Server(handler http.Handler) *http3.Server {
return &http3.Server{
Server: cmd.initHTTPServer(handler),
}
}
func (cmd *Server) initTLSConfig() *tls.Config {
tlsCfg := new(tls.Config)
tlsCfg.MinVersion = tls.VersionTLS12
tlsCfg.NextProtos = []string{"h2", "http/1.1"}
if cmd.autocertHost != "" {
certMgr := autocert.Manager{
Prompt: autocert.AcceptTOS,
Cache: autocert.DirCache(cmd.autocertDir),
HostPolicy: autocert.HostWhitelist(cmd.autocertHost),
Email: cmd.autocertEmail,
}
tlsCfg.GetCertificate = certMgr.GetCertificate
tlsCfg.NextProtos = append(tlsCfg.NextProtos, acme.ALPNProto)
} else {
tlsCfg.SessionTicketKey = camo.NewSessionTicketKey(cmd.password)
tlsCfg.GetCertificate = func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
return nil, errors.New("(PSK) bad certificate")
}
}
return tlsCfg
}
func (cmd *Server) withLog(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
cmd.log.Info(r.Method, r.URL.String(), r.Proto, "remote:", r.RemoteAddr)
h.ServeHTTP(w, r)
})
}
func (cmd *Server) debugHTTPServer() {
err := http.ListenAndServe(cmd.debugHTTP, nil)
if err != http.ErrServerClosed {
cmd.log.Errorf("debug http server exited: %v", err)
}
}
|
package template
type TemplateRepository interface {
}
|
package getchapterinfo
import (
"bufio"
"database/sql"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
)
//book信息
type Bookinfo struct {
Bookid int `db:"id"`
Bookname string `db:"booksName"`
Boookauthor string `db:"author"`
Bookcahtpernum int `db:"chapters"`
Bookcomment string `db:"summary"`
chapterdone int `db:"chapterdone"`
//Bookstorepath string `db:"bookstorepath"`
}
//章节信息
type chapterinfo struct {
Bookid int `db:"booksId"`
Chapterid int `db:"chapterId"`
Chaptername string `db:"chapterName"`
Chapter string `db:"content"`
//Storepath string `db:"storepath"`
Chapterlines int64 `db:"chapterlines"`
}
//文件名chan
var filenamech = make(chan string, 100)
var dbcon *sql.DB
func GetChapterInfo(db *sql.DB) {
dbcon = db
//获取绝对路径
pathname, err := filepath.Abs("/data/zip/unrarfull")
if err != nil {
fmt.Println("path error")
return
}
//并发数量
concurrenc := 20
wg := sync.WaitGroup{} //控制主程序等待,以便goroutines运行完
wg.Add(concurrenc + 1)
//开启一个go 程去取文件名(全路径)
go func(wg *sync.WaitGroup, filenamech chan string) {
GetAllFile(pathname, filenamech)
close(filenamech) //关闭通道,以便读取通道的程序知道通道已经关闭。
wg.Done() //一定在函数的内部的最后一行运行。否则可能函数没有执行完毕。
}(&wg, filenamech)
//for循环开启并发的go程
for i := 0; i < concurrenc; i++ {
go func(wg *sync.WaitGroup, filenamech chan string) {
for { //for不断重chan中取文件路径
filename, isclose := <-filenamech //判断chan是否关闭,关闭了就退出循环不在取文件名结束程序
if !isclose { //判断通道是否关闭,关闭则退出循环
break
}
//真正干事的
dosomewrork(filename)
}
wg.Done()
}(&wg, filenamech)
}
wg.Wait()
}
func dosomewrork(fp string) {
//读取文本
fi, err := os.Open(fp)
if err != nil {
fmt.Printf("Error: %s\n", err)
}
defer fi.Close()
br := bufio.NewReader(fi)
var lines int64 //文本的行数
var chapternum int //找到章节数,作为章节id
var pre_chaptername string
booksrt := Bookinfo{}
for {
a, _, c := br.ReadLine()
if c == io.EOF {
break
}
lines++
//获取一个chapter结构实例,并存入数据库
res := getchapterinfo(string(a), fp, lines)
//去重匹配规则
if res.Chaptername != "" && res.Bookid != 0 {
//去重
if quchong(pre_chaptername) == quchong(res.Chaptername) {
continue
}
//匹配章节规则
preisok, err := regexp.Match("^\\s*第.{1,9}章.*", []byte(pre_chaptername))
if err != nil {
fmt.Println(err)
}
nowisok, err := regexp.Match("^\\s*第.{1,9}章.*", []byte(res.Chaptername))
if err != nil {
fmt.Println(err)
}
if preisok {
if preisok != nowisok {
continue
}
}
//匹配章节规则
preisok1, err := regexp.Match("^\\s*\\d+\\s*", []byte(pre_chaptername))
if err != nil {
fmt.Println(err)
}
nowisok1, err := regexp.Match("^\\s*\\d+\\s*", []byte(res.Chaptername))
if err != nil {
fmt.Println(err)
}
if preisok1 {
if preisok1 != nowisok1 {
continue
}
}
//
preisok2, err := regexp.Match("^\\s*第.{1,9}节.*", []byte(pre_chaptername))
if err != nil {
fmt.Println(err)
}
nowisok2, err := regexp.Match("^\\s*第.{1,9}节.*", []byte(res.Chaptername))
if err != nil {
fmt.Println(err)
}
if preisok2 {
if preisok2 != nowisok2 {
continue
}
}
//
preisok3 , err := regexp.Match("^(\\s*)(卷.*)",[]byte(pre_chaptername))
if err != nil {
fmt.Println(err)
}
nowisok3 , err := regexp.Match("^(\\s*)(卷.*)",[]byte(res.Chaptername))
if err != nil {
fmt.Println(err)
}
if preisok3 {
if preisok3 != nowisok3 {
continue
}
}
//章节id
chapternum++
res.Chapterid = chapternum
fmt.Println(res.Chaptername)
res.writetodb()
//update 状态用
b := &pre_chaptername
*b = res.Chaptername
booksrt.Bookid = res.Bookid
booksrt.Bookcahtpernum = res.Chapterid
}
}
//更新小说章节状态
sqlupdate := fmt.Sprintf("UPDATE books set chapterdone=? WHERE id=?")
stmt, err := dbcon.Prepare(sqlupdate)
_, err = stmt.Exec(1, booksrt.Bookid)
defer stmt.Close()
if err != nil {
fmt.Println(err)
}
//更新book 章节数
sqlupdate = fmt.Sprintf("UPDATE books set chapters=? WHERE id=?")
stmt, err = dbcon.Prepare(sqlupdate)
_, err = stmt.Exec(booksrt.Bookcahtpernum, booksrt.Bookid)
defer stmt.Close()
if err != nil {
fmt.Println(err)
}
return
}
func quchong(s string) string {
isok , err := regexp.Match("^(\\s*)(第)(.{1,9})(章)(.*)",[]byte(s))
if err != nil {
fmt.Println(err)
}
if isok {
reg := regexp.MustCompile("^(\\s*)(第.{1,9}章)(.*)$")
result := reg.FindAllStringSubmatch(s,-1)
a := result[0][2]
return a
}
isok , err = regexp.Match("^(\\s*)(\\d+)(\\s*)",[]byte(s))
if err != nil {
fmt.Println(err)
}
if isok {
reg := regexp.MustCompile("^(\\s*)(\\d+)(\\s*)")
result := reg.FindAllStringSubmatch(s,-1)
a := result[0][2]
return a
}
isok , err = regexp.Match("^(\\s*)(卷.*)",[]byte(s))
if err != nil {
fmt.Println(err)
}
if isok {
reg := regexp.MustCompile("^(\\s*)(卷.*)$")
result := reg.FindAllStringSubmatch(s,-1)
a := result[0][2]
return a
}
isok , err = regexp.Match("^(\\s*)(第.{1,9}节)",[]byte(s))
if err != nil {
fmt.Println(err)
}
if isok {
reg := regexp.MustCompile("^(\\s*)(第.{1,9}节)(.*)$")
result := reg.FindAllStringSubmatch(s,-1)
a := result[0][2]
return a
}
return ""
}
func getchapterinfo(s string, fp string, lines int64) *chapterinfo {
c := chapterinfo{}
c.Bookid = getbookid(fp)
c.Chaptername = getchaptername(s)
c.Chapterlines = lines
return &c
}
func getchaptername(s string)( a string ){
isok , err := regexp.Match("^(\\s*)(第)(.{1,9})(章)(.*)",[]byte(s))
if err != nil {
fmt.Println(err)
}
if isok {
reg := regexp.MustCompile("^(\\s*)(第.{1,9}章)(.*)$")
result := reg.FindAllStringSubmatch(s,-1)
a = result[0][0]
//fmt.Println(a)
return a
}
isok1 , err := regexp.Match("^(\\s*)(\\d+)(\\s*)",[]byte(s))
if err != nil {
fmt.Println(err)
}
if isok1 {
reg := regexp.MustCompile("^(\\s*)(\\d+)(\\s*)")
result := reg.FindAllStringSubmatch(s,-1)
a = result[0][0]
//fmt.Println(a)
return a
}
isok2 , err := regexp.Match("^(\\s*)(卷.*)",[]byte(s))
if err != nil {
fmt.Println(err)
}
if isok2 {
reg := regexp.MustCompile("^(\\s*)(卷.*)")
result := reg.FindAllStringSubmatch(s,-1)
a = result[0][0]
//fmt.Println(a)
return a
}
isok2 , err = regexp.Match("^(\\s*)(第.{1,9}节)",[]byte(s))
if err != nil {
fmt.Println(err)
}
if isok2 {
reg := regexp.MustCompile("^(\\s*)(第.{1,9}节)(.*)")
result := reg.FindAllStringSubmatch(s,-1)
a = result[0][0]
//fmt.Println(a)
return a
}
return a
}
func getbookid(fp string) (bookid int) {
db := dbcon
bn := strings.Split(filepath.Base(fp), ".")
bookname := bn[2]
err := db.QueryRow("SELECT id FROM books WHERE booksName = ?", bookname).Scan(&bookid)
check(err)
return
}
func (c *chapterinfo) writetodb() {
db := dbcon
bookid := "chapter_" + strconv.Itoa(c.Bookid%100+1)
insertsql := fmt.Sprintf("INSERT %v ( booksId, chapterName, content,chapterlines,chapterId) VALUES (?,?,?,?,?)", bookid)
stmt, err := db.Prepare(insertsql)
check(err)
res, err := stmt.Exec(c.Bookid, c.Chaptername, c.Chapter, c.Chapterlines, c.Chapterid)
check(err)
defer stmt.Close()
_, err = res.LastInsertId() //必须是自增id的才可以正确返回。
check(err)
}
func (c *chapterinfo) printcahpter() {
fmt.Println(c.Chaptername, c.Chapterlines)
}
func check(err error) {
if err != nil {
fmt.Println(err)
panic(err)
}
}
func GetAllFile(pathname string, fn_ch chan string) {
rd, err := ioutil.ReadDir(pathname)
if err != nil {
fmt.Println("read dir fail:", err)
}
for _, fi := range rd {
if fi.IsDir() {
fullDir := pathname + "/" + fi.Name()
GetAllFile(fullDir, fn_ch)
if err != nil {
fmt.Println("read dir fail:", err)
}
} else {
fullName := pathname + "/" + fi.Name()
fn_ch <- fullName
}
}
} |
/*
* Created on Mon Jan 21 2019 22:52:12
* Author: WuLC
* EMail: liangchaowu5@gmail.com
*/
// two pointers, O(n) time, O(n) space
func abs(num int) int {
if num <= 0 {
return -1 * num
} else {
return num
}
}
func sortedSquares(A []int) []int {
idx := len(A)
for i, num := range A {
if num >= 0 {
idx = i
break
}
}
result := []int{}
left, right := idx - 1, idx
for left>=0 && right<len(A) {
if (abs(A[left]) > abs(A[right])) {
result = append(result, A[right]*A[right])
right++
} else {
result = append(result, A[left]*A[left])
left--
}
}
for left>=0 {
result = append(result, A[left]*A[left])
left--
}
for right<len(A) {
result = append(result, A[right]*A[right])
right++
}
return result
} |
package ms
import (
"github.com/catorpilor/leetcode/utils"
)
// MaxSquare returns a max all 1s square inside matrix
func MaxSquare(matrix [][]int) int {
if matrix == nil {
return 0
}
// brute force
// if we find a 1, we move diagonally
// and check this square if it is all 1s
// if it is true update the maxSqrLen,
// otherwise move forward
// time complexity: O((mn)^2)
// space: O(1)
row, col := len(matrix), len(matrix[0])
var maxSqlLen int
for i := 0; i < row; i++ {
for j := 0; j < col; j++ {
if matrix[i][j] == 1 {
tm, flag := 1, true
for tm+i < row && tm+j < col && flag {
for k := j; k <= tm+j; k++ {
if matrix[i+tm][k] == 0 {
flag = false
break
}
}
for k := i; k <= i+tm; k++ {
if matrix[k][j+tm] == 0 {
flag = false
break
}
}
if flag {
tm += 1
}
}
maxSqlLen = utils.Max(maxSqlLen, tm)
}
}
}
return maxSqlLen * maxSqlLen
}
// MaxSquare2 returns a all 1s square inside matrix
func MaxSquare2(matrix [][]int) int {
if matrix == nil {
return 0
}
row := len(matrix)
var col int
if row > 0 {
col = len(matrix[0])
}
if row == 0 || col == 0 {
return 0
}
var maxSqrLen int
// allocate a matrix
dp := make([][]int, row)
for i := 0; i < row; i++ {
dp[i] = make([]int, col)
dp[i][0] = matrix[i][0]
if matrix[i][0] == 1 {
maxSqrLen = 1
}
}
for j := 0; j < col; j++ {
dp[0][j] = matrix[0][j]
if matrix[0][j] == 1 {
maxSqrLen = 1
}
}
for i := 1; i < row; i++ {
for j := 1; j < col; j++ {
if matrix[i][j] == 1 {
dp[i][j] = utils.Min(dp[i-1][j], utils.Min(dp[i][j-1], dp[i-1][j-1])) + 1
}
maxSqrLen = utils.Max(maxSqrLen, dp[i][j])
}
}
return maxSqrLen * maxSqrLen
}
// MaxSquare3 returns a max square with 1s inside matrix
func MaxSquare3(matrix [][]int) int {
if matrix == nil {
return 0
}
row := len(matrix)
var col int
if row > 0 {
col = len(matrix[0])
}
if row == 0 || col == 0 {
return 0
}
dp := make([]int, col+1)
var mx, prev int
for i := 1; i <= row; i++ {
for j := 1; j <= col; j++ {
temp := dp[j]
if matrix[i-1][j-1] == 1 {
dp[j] = utils.Min(dp[j], utils.Min(dp[j-1], prev)) + 1
mx = utils.Max(mx, dp[j])
}
prev = temp
}
}
return mx * mx
}
|
package errors
// 系统性错误
func SystemError(options ...interface{}) *Error {
return NewError(1001, "system error", options...)
}
// 参数错误错误
func ParamError(options ...interface{}) *Error {
return NewError(1002, "param error", options...)
}
// DB错误错误
func DBError(options ...interface{}) *Error {
return NewError(1003, "db error", options...)
}
// Auth错误错误
func AuthError(options ...interface{}) *Error {
return NewError(1004, "auth error", options...)
}
// 响应空数据错误错误
func NoDataError(options ...interface{}) *Error {
return NewError(1005, "no data error", options...)
}
|
package v1
import (
"github.com/openshift-knative/knative-openshift-ingress/pkg/apis"
routev1 "github.com/openshift/api/route/v1"
)
func init() {
apis.AddToSchemes = append(apis.AddToSchemes, routev1.SchemeBuilder.AddToScheme)
}
|
package main
import (
"fmt"
"os"
"github.com/marcozj/centrify-awstool/awstool"
log "github.com/marcozj/golang-sdk/logging"
)
func main() {
log.SetLevel(log.LevelDebug)
log.SetLogPath("centrifyawstool.log")
cli := awstool.NewClient()
err := cli.Run()
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
}
|
package ngx
import (
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
)
// NgxClientParams 请求nginx status相关参数
type NgxClientParams struct {
EndPoint *string
UserAgent *string
Timeout time.Duration
}
// NgxClient nginx 客户端信息
type NgxClient struct {
endPoint string
httpClient *http.Client
}
// NgxMetrics 采集指标
type NgxMetrics struct {
Active int64 // 活跃的连接数
Accepted int64 // 总共处理了多少个连接
Handled int64 // 成功创建了多少次握手
Reading int64 // 读取客户端连接数
Writing int64 // 响应数据到客户端的数量
Waiting int64 // 开启 keep-alive 的情况下,这个值等于 active – (reading+writing),意思就是 Nginx 已经处理完正在等候下一次请求指令的驻留连接.
Requests int64 // 总共处理了多少个请求
}
// QueryNgxStatus 请求nginx status,获取监控指标
func (n *NgxClient) QueryNgxStatus() (metrics *NgxMetrics, err error) {
response, err := n.httpClient.Get(n.endPoint)
if err != nil {
return nil, err
}
bodyBytes, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, err
}
metrics, err = parseStatusData(bodyBytes)
if err != nil {
return nil, err
}
return metrics, err
}
// parseStatusData 解析监控指标
func parseStatusData(data []byte) (metrics *NgxMetrics, err error) {
dataStr := string(data)
dataSlice := strings.Split(dataStr, "\n")
dataErr := fmt.Errorf("invalid value: %v\n", dataStr)
if len(dataSlice) != 5 {
return nil, dataErr
}
ac := strings.TrimSpace(strings.Split(dataSlice[0], ":")[1])
activeConn, err := strconv.ParseInt(ac, 10, 64)
if err != nil {
return nil, dataErr
}
// accepts/handled/requests
ahr := strings.Split(strings.TrimSpace(dataSlice[2]), " ")
if len(ahr) != 3 {
return nil, dataErr
}
accepts, err := strconv.ParseInt(strings.TrimSpace(ahr[0]), 10, 64)
if err != nil {
return nil, dataErr
}
handled, err := strconv.ParseInt(strings.TrimSpace(ahr[1]), 10, 64)
if err != nil {
return nil, dataErr
}
requests, err := strconv.ParseInt(strings.TrimSpace(ahr[2]), 10, 64)
if err != nil {
return nil, dataErr
}
// Reading/Writing/Waiting
rww := strings.Split(strings.TrimSpace(dataSlice[3]), " ")
reading, err := strconv.ParseInt(strings.TrimSpace(rww[1]), 10, 64)
if err != nil {
return nil, dataErr
}
writing, err := strconv.ParseInt(strings.TrimSpace(rww[1]), 10, 64)
if err != nil {
return nil, dataErr
}
waiting, err := strconv.ParseInt(strings.TrimSpace(rww[1]), 10, 64)
if err != nil {
return nil, dataErr
}
metrics = &NgxMetrics{
Active: activeConn,
Accepted: accepts,
Handled: handled,
Reading: reading,
Writing: writing,
Waiting: waiting,
Requests: requests,
}
return metrics, nil
}
//
type userAgentRoundTripper struct {
ua string
rt http.RoundTripper
}
// 设置http client的header,添加User-Agent.
func (u *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36")
return u.rt.RoundTrip(req)
}
// InitHttpClient 初始化请求nginx status 的http client,并发送请求.
func InitHttpClient(p NgxClientParams) (client *NgxClient, err error) {
transport := &http.Transport{}
ut := &userAgentRoundTripper{
ua: *p.UserAgent,
rt: transport,
}
httpClient := &http.Client{
Timeout: p.Timeout,
Transport: ut,
}
client = &NgxClient{
endPoint: *p.EndPoint,
httpClient: httpClient,
}
return client, nil
}
|
package myArray
func transfer(array []int) {
}
func testAssign() {
datas := []int{1, 2, 3, 4, 5}
transfer(datas)
}
func Test_Array() {
testAssign()
}
|
package main
// Leetcode 509. (easy)
func fib(n int) int {
if n < 2 {
return n
}
res := matrixPow([2][2]int{{1, 1}, {1, 0}}, n-1)
return res[0][0]
}
func matrixPow(a [2][2]int, n int) [2][2]int {
ret := [2][2]int{{1, 0}, {0, 1}}
for n > 0 {
if n&1 == 1 {
ret = matrixMultiply(ret, a)
}
a = matrixMultiply(a, a)
n >>= 1
}
return ret
}
func matrixMultiply(a, b [2][2]int) (c [2][2]int) {
for i := 0; i < 2; i++ {
for j := 0; j < 2; j++ {
c[i][j] = a[i][0]*b[0][j] + a[i][1]*b[1][j]
}
}
return
}
|
package common
import (
"context"
"os"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/stretchr/testify/assert"
)
func TestCommonEnvironmentClient(t *testing.T) {
ResetCommonEnvironmentClient()
defer CleanupEnvironment()()
os.Setenv("DATABRICKS_TOKEN", ".")
os.Setenv("DATABRICKS_HOST", ".")
c := CommonEnvironmentClient()
c2 := CommonEnvironmentClient()
ctx := context.Background()
assert.Equal(t, c2.userAgent(ctx), c.userAgent(ctx))
assert.Equal(t, "databricks-tf-provider/"+version+" (+unknown) terraform/unknown", c.userAgent(ctx))
ctx = context.WithValue(ctx, ResourceName, "cluster")
c.Provider = &schema.Provider{
TerraformVersion: "0.12",
}
assert.Equal(t, "databricks-tf-provider/"+version+" (+cluster) terraform/0.12", c.userAgent(ctx))
}
|
package Majority_Element
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestME(t *testing.T) {
ast := assert.New(t)
case1 := []int{1, 1, 1, 1, 1, 3, 4, 5}
ast.Equal(majorityElement(case1), 1)
case2 := []int{1, 2, 3, 4, 1, 1, 1, 1}
ast.Equal(majorityElement(case2), 1)
case3 := []int{3, 3, 4}
ast.Equal(majorityElement(case3), 3)
case4 := []int{2, 2, 1, 1, 1, 2, 2}
ast.Equal(majorityElement(case4), 2)
}
|
package handlers
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestParseListParams(t *testing.T) {
namespace := "bookinfo"
objects := ""
criteria := parseCriteria(namespace, objects)
assert.Equal(t, "bookinfo", criteria.Namespace)
assert.True(t, criteria.IncludeVirtualServices)
assert.True(t, criteria.IncludeDestinationRules)
assert.True(t, criteria.IncludeServiceEntries)
assert.True(t, criteria.IncludeRules)
assert.True(t, criteria.IncludeQuotaSpecs)
assert.True(t, criteria.IncludeQuotaSpecBindings)
assert.True(t, criteria.IncludeMeshPolicies)
objects = "gateways"
criteria = parseCriteria(namespace, objects)
assert.True(t, criteria.IncludeGateways)
assert.False(t, criteria.IncludeVirtualServices)
assert.False(t, criteria.IncludeDestinationRules)
assert.False(t, criteria.IncludeServiceEntries)
assert.False(t, criteria.IncludeRules)
assert.False(t, criteria.IncludeQuotaSpecs)
assert.False(t, criteria.IncludeQuotaSpecBindings)
assert.False(t, criteria.IncludeMeshPolicies)
objects = "virtualservices"
criteria = parseCriteria(namespace, objects)
assert.False(t, criteria.IncludeGateways)
assert.True(t, criteria.IncludeVirtualServices)
assert.False(t, criteria.IncludeDestinationRules)
assert.False(t, criteria.IncludeServiceEntries)
assert.False(t, criteria.IncludeRules)
assert.False(t, criteria.IncludeQuotaSpecs)
assert.False(t, criteria.IncludeQuotaSpecBindings)
assert.False(t, criteria.IncludeMeshPolicies)
objects = "destinationrules"
criteria = parseCriteria(namespace, objects)
assert.False(t, criteria.IncludeGateways)
assert.False(t, criteria.IncludeVirtualServices)
assert.True(t, criteria.IncludeDestinationRules)
assert.False(t, criteria.IncludeServiceEntries)
assert.False(t, criteria.IncludeRules)
assert.False(t, criteria.IncludeQuotaSpecs)
assert.False(t, criteria.IncludeQuotaSpecBindings)
assert.False(t, criteria.IncludeMeshPolicies)
objects = "serviceentries"
criteria = parseCriteria(namespace, objects)
assert.False(t, criteria.IncludeGateways)
assert.False(t, criteria.IncludeVirtualServices)
assert.False(t, criteria.IncludeDestinationRules)
assert.True(t, criteria.IncludeServiceEntries)
assert.False(t, criteria.IncludeRules)
assert.False(t, criteria.IncludeQuotaSpecs)
assert.False(t, criteria.IncludeQuotaSpecBindings)
assert.False(t, criteria.IncludeMeshPolicies)
objects = "rules"
criteria = parseCriteria(namespace, objects)
assert.False(t, criteria.IncludeGateways)
assert.False(t, criteria.IncludeVirtualServices)
assert.False(t, criteria.IncludeDestinationRules)
assert.False(t, criteria.IncludeServiceEntries)
assert.True(t, criteria.IncludeRules)
assert.False(t, criteria.IncludeQuotaSpecs)
assert.False(t, criteria.IncludeQuotaSpecBindings)
assert.False(t, criteria.IncludeMeshPolicies)
objects = "quotaspecs"
criteria = parseCriteria(namespace, objects)
assert.False(t, criteria.IncludeGateways)
assert.False(t, criteria.IncludeVirtualServices)
assert.False(t, criteria.IncludeDestinationRules)
assert.False(t, criteria.IncludeServiceEntries)
assert.False(t, criteria.IncludeRules)
assert.True(t, criteria.IncludeQuotaSpecs)
assert.False(t, criteria.IncludeQuotaSpecBindings)
assert.False(t, criteria.IncludeMeshPolicies)
objects = "quotaspecbindings"
criteria = parseCriteria(namespace, objects)
assert.False(t, criteria.IncludeGateways)
assert.False(t, criteria.IncludeVirtualServices)
assert.False(t, criteria.IncludeDestinationRules)
assert.False(t, criteria.IncludeServiceEntries)
assert.False(t, criteria.IncludeRules)
assert.False(t, criteria.IncludeQuotaSpecs)
assert.True(t, criteria.IncludeQuotaSpecBindings)
assert.False(t, criteria.IncludeMeshPolicies)
objects = "virtualservices,rules"
criteria = parseCriteria(namespace, objects)
assert.False(t, criteria.IncludeGateways)
assert.True(t, criteria.IncludeVirtualServices)
assert.False(t, criteria.IncludeDestinationRules)
assert.False(t, criteria.IncludeServiceEntries)
assert.True(t, criteria.IncludeRules)
assert.False(t, criteria.IncludeQuotaSpecs)
assert.False(t, criteria.IncludeQuotaSpecBindings)
assert.False(t, criteria.IncludeMeshPolicies)
objects = "destinationrules,virtualservices"
criteria = parseCriteria(namespace, objects)
assert.False(t, criteria.IncludeGateways)
assert.True(t, criteria.IncludeVirtualServices)
assert.True(t, criteria.IncludeDestinationRules)
assert.False(t, criteria.IncludeServiceEntries)
assert.False(t, criteria.IncludeRules)
assert.False(t, criteria.IncludeQuotaSpecs)
assert.False(t, criteria.IncludeQuotaSpecBindings)
assert.False(t, criteria.IncludeMeshPolicies)
objects = "meshpolicies"
criteria = parseCriteria(namespace, objects)
assert.True(t, criteria.IncludeMeshPolicies)
assert.False(t, criteria.IncludeGateways)
assert.False(t, criteria.IncludeVirtualServices)
assert.False(t, criteria.IncludeDestinationRules)
assert.False(t, criteria.IncludeServiceEntries)
assert.False(t, criteria.IncludeRules)
assert.False(t, criteria.IncludeQuotaSpecs)
assert.False(t, criteria.IncludeQuotaSpecBindings)
objects = "notsupported"
criteria = parseCriteria(namespace, objects)
assert.False(t, criteria.IncludeGateways)
assert.False(t, criteria.IncludeVirtualServices)
assert.False(t, criteria.IncludeDestinationRules)
assert.False(t, criteria.IncludeServiceEntries)
assert.False(t, criteria.IncludeRules)
assert.False(t, criteria.IncludeQuotaSpecs)
assert.False(t, criteria.IncludeQuotaSpecBindings)
assert.False(t, criteria.IncludeMeshPolicies)
objects = "notsupported,rules"
criteria = parseCriteria(namespace, objects)
assert.False(t, criteria.IncludeGateways)
assert.False(t, criteria.IncludeVirtualServices)
assert.False(t, criteria.IncludeDestinationRules)
assert.False(t, criteria.IncludeServiceEntries)
assert.True(t, criteria.IncludeRules)
assert.False(t, criteria.IncludeQuotaSpecs)
assert.False(t, criteria.IncludeQuotaSpecBindings)
assert.False(t, criteria.IncludeMeshPolicies)
}
|
package msg
import (
"github.com/name5566/leaf/network/protobuf"
)
var (
Processor = protobuf.NewProcessor()
)
func init() { // 这里我们注册了一个 protobuf 消息)
Processor.Register(&TocNotifyConnect{})
Processor.Register(&TosChat{})
Processor.Register(&TocChat{})
}
|
package svr
import (
"net/http"
"github.com/gin-gonic/gin"
)
func noRoute(c *gin.Context) {
c.AbortWithStatus(http.StatusNotFound)
}
|
package main
import (
"fmt"
)
func lowerBound(nums []int,target int) int {
l,r := 0,len(nums)
for ;l<r; {
m := (l+r)/2
//和upperbound唯一的区别
// < 意味着,l会右移到小于target,那么r就是第一个等于target的下标,如果没有等于就是大于
if nums[m] < target {
l = m + 1
} else {
r = m
}
}
return r
}
func upperBound(nums []int,target int) int {
l,r := 0,len(nums)
for ;l<r; {
m := (l+r)/2
// <= 意味着,即使等于target,l也会继续右移,那么r就是第一个大于target的下标
if nums[m] <= target {
l = m + 1
} else {
r = m
}
}
return r
}
func binarySearch(nums []int,target int) int {
l,r := 0,len(nums) // 初始值,注意r
for ;l<r; {
// 有可能越界
m := l + (l-r)/2
if nums[m] == target {
return m
}else if nums[m] <= target {
l = m + 1
} else {
r = m
}
}
return -1
}
func lower_bound(arr []int,x int) int {
l,r := 0,len(arr)
for l < r {
m := (l+r)/2
if arr[m] < x {
l = m + 1
} else {
r = m
}
}
return r
}
func main() {
fmt.Println(upperBound([]int{1,2,3,3,3,3,4,5},5))
a := []int{1,2,3,4,5}
copy(a[1:],a[2:])
fmt.Println(a)
fmt.Println(bslessOne([]int{1,2,3,4,6,7},5))
fmt.Println(bslessOne([]int{1,2,3,4,6,7},6))
fmt.Println(bslessOne([]int{1,2,3,4,6,7},8))
fmt.Println(bslessOne([]int{1,2,3,4,6,7},0))
}
func bslessOne(data []int,key int) int {
l,r := 0,len(data)
for l < r {
m := (l+r)/2
if data[m] == key {
return m
} else if data[m] < key {
l = m+1
} else {
r = m
}
}
return r-1
} |
package main
import (
"encoding/json"
"flag"
"log"
"net/http"
"os"
"github.com/stripe/stripe-go"
"github.com/stripe/stripe-go/client"
)
// the version string is injected during the build process
var version string
var (
dev bool // development mode?
keys map[string]string // public key -> secret key
)
func main() {
addr := flag.String("addr", ":8080", "server address")
flag.BoolVar(&dev, "dev", false, "development mode")
keyFile := flag.String("keys", "/etc/stripe-keys.json", "Stripe keys")
printVersion := flag.Bool("version", false, "print version")
flag.Parse()
if *printVersion {
println(version)
return
}
f, err := os.Open(*keyFile)
if err != nil {
log.Fatalf("failed to open key file: %v", err)
}
if err := json.NewDecoder(f).Decode(&keys); err != nil {
log.Fatalf("failed to parse key file: %v", err)
}
f.Close()
http.HandleFunc("/", handle)
log.Printf("listening on %s", *addr)
if err := http.ListenAndServe(*addr, nil); err != nil {
log.Fatal(err)
}
}
type Request struct {
Pubkey string // Stripe public key used to generate the token
Token string // Stripe checkout.js generated token
Email string // customer email
Plan string
Quantity uint64 // How many of the plan to subscribe
Once bool // Cancel the plan once subscribed
}
func handle(w http.ResponseWriter, r *http.Request) {
if dev { // Allow CORS in dev mode
w.Header().Add("Access-Control-Allow-Origin", "*")
w.Header().Add("Access-Control-Allow-Headers", "Content-Type")
}
switch r.Method {
case "POST":
break
case "OPTIONS":
// Allow CORS preflight requests
return
default:
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
var req Request
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "bad request", http.StatusBadRequest)
return
}
if dev {
log.Printf("%#v", req)
}
seckey, ok := keys[req.Pubkey]
if !ok {
http.Error(w, "account not found", http.StatusNotFound)
return
}
cli := &client.API{}
cli.Init(seckey, nil)
ps := &stripe.CustomerParams{
Email: req.Email,
Plan: req.Plan,
Quantity: req.Quantity,
}
err := ps.SetSource(req.Token)
if err != nil {
log.Printf("unsupported source %v: %v", req.Token, err)
http.Error(w, "server error", http.StatusInternalServerError)
return
}
cus, err := cli.Customers.New(ps)
if err != nil {
log.Printf("failed to subscribe (email=%q token=%q plan=%q x %v): %s", req.Email, req.Token, req.Plan, req.Quantity, err)
http.Error(w, "server error", http.StatusInternalServerError)
return
}
if req.Once {
for _, sub := range cus.Subs.Values {
if err := cli.Subs.Cancel(sub.ID, &stripe.SubParams{Customer: cus.ID, EndCancel: true}); err != nil {
log.Printf("failed to unsubscribe customer ID = %v: %s sub ID = %v", cus.ID, sub.ID, err)
http.Error(w, "server error", http.StatusInternalServerError)
return
}
}
}
w.Write([]byte("OK"))
}
|
package cherryInterfaces
import "fmt"
type PacketDecoder interface {
Decode(data []byte) ([]*Packet, error)
}
type PacketEncoder interface {
Encode(typ byte, buf []byte) ([]byte, error)
}
// Packet represents a network packet.
type Packet struct {
Type byte
Length int
Data []byte
}
//New create a Packet instance.
func New() *Packet {
return &Packet{}
}
//String represents the Packet's in text mode.
func (p *Packet) String() string {
return fmt.Sprintf("Type: %d, Length: %d, Settings: %s", p.Type, p.Length, string(p.Data))
}
|
package microGin
import (
"todoList/app/options"
"github.com/gin-gonic/gin"
)
type MicroGin struct {
Engine *gin.Engine
Listen string
}
func NewMicroGin()*MicroGin{
return &MicroGin{
Engine:gin.Default(),
Listen:options.Options.GinService.Listen,
}
}
func (m *MicroGin)Run(){
m.Engine.Run(m.Listen)
}
|
package user
import (
user "goto/logic/user"
"github.com/gin-gonic/gin"
)
// Hello 打个招呼
func Hello(c *gin.Context) {
var message = user.Hello()
c.JSON(200, gin.H{
"code": 0,
"message": message,
"data": "",
})
}
// List 用户列表
func List(c *gin.Context) {
var data = user.List()
c.JSON(200, gin.H{
"code": 0,
"message": "",
"data": data,
})
}
// One 一个用户
func One(c *gin.Context) {
var message = user.One()
c.JSON(200, gin.H{
"code": 0,
"message": message,
"data": "",
})
}
// Add 新增用户
func Add(c *gin.Context) {
var message = user.Add()
c.JSON(200, gin.H{
"code": 0,
"message": message,
"data": "",
})
}
// Delete 删除用户
func Delete(c *gin.Context) {
var message = user.Delete()
c.JSON(200, gin.H{
"code": 0,
"message": message,
"data": "",
})
}
|
package ut
import (
"../../datahub"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/stretchr/testify/assert"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
)
func TestJoinGroup(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
assert.Equal(t, requests.POST, request.Method)
assert.Equal(t, "/projects/test_project/topics/test_topic/subscriptions/test_subId", request.URL.EscapedPath())
assert.Equal(t, "application/json", request.Header.Get("Content-Type"))
defer request.Body.Close()
body, err := ioutil.ReadAll(request.Body)
assert.Nil(t, err)
assert.NotNil(t, body)
str := string(body)
assert.Equal(t, "{\"Action\":\"joinGroup\",\"SessionTimeout\":60000}", str)
writer.Header().Set("x-datahub-request-id", "request_id")
writer.WriteHeader(http.StatusOK)
_, _ = writer.Write([]byte("{\"ConsumerId\": \"test_sub_id-1\",\"VersionId\":1,\"SessionTimeout\": 60000}"))
}))
defer ts.Close()
dh := datahub.New("a", "a", ts.URL)
jg, err := dh.JoinGroup("test_project", "test_topic", "test_subId", 60000)
assert.Nil(t, err)
assert.NotNil(t, jg)
assert.Equal(t, http.StatusOK, jg.StatusCode)
assert.Equal(t, "request_id", jg.RequestId)
assert.Equal(t, "test_sub_id-1", jg.ConsumerId)
assert.Equal(t, int64(1), jg.VersionId)
assert.Equal(t, int64(60000), jg.SessionTimeout)
}
func TestHeartbeat(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
assert.Equal(t, requests.POST, request.Method)
assert.Equal(t, "/projects/test_project/topics/test_topic/subscriptions/test_subId", request.URL.EscapedPath())
assert.Equal(t, "application/json", request.Header.Get("Content-Type"))
defer request.Body.Close()
body, err := ioutil.ReadAll(request.Body)
assert.Nil(t, err)
assert.NotNil(t, body)
str := string(body)
assert.Equal(t, "{\"Action\":\"heartbeat\",\"ConsumerId\":\"test_consumer_id\",\"VersionId\":1,\"HoldShardList\":[\"0\",\"1\"]}", str)
writer.Header().Set("x-datahub-request-id", "request_id")
writer.WriteHeader(http.StatusOK)
_, _ = writer.Write([]byte("{\"ShardList\": [\"0\", \"1\"], \"TotalPlan\": \"xxx\", \"PlanVersion\": 1}"))
}))
defer ts.Close()
dh := datahub.New("a", "a", ts.URL)
holdShardIds := []string{"0", "1"}
ret, err := dh.Heartbeat("test_project", "test_topic", "test_subId", "test_consumer_id", 1, holdShardIds, nil)
assert.Nil(t, err)
assert.NotNil(t, ret)
assert.Equal(t, http.StatusOK, ret.StatusCode)
assert.Equal(t, "request_id", ret.RequestId)
assert.Equal(t, int64(1), ret.PlanVersion)
assert.Equal(t, "0", ret.ShardList[0])
assert.Equal(t, "1", ret.ShardList[1])
assert.Equal(t, "xxx", ret.TotalPlan)
}
func TestSyncGroup(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
assert.Equal(t, requests.POST, request.Method)
assert.Equal(t, "/projects/test_project/topics/test_topic/subscriptions/test_subId", request.URL.EscapedPath())
assert.Equal(t, "application/json", request.Header.Get("Content-Type"))
defer request.Body.Close()
body, err := ioutil.ReadAll(request.Body)
assert.Nil(t, err)
assert.NotNil(t, body)
str := string(body)
assert.Equal(t, "{\"Action\":\"syncGroup\",\"ConsumerId\":\"test_consumer_id\",\"VersionId\":1,\"ReleaseShardList\":[\"0\",\"1\"],\"ReadEndShardList\":[\"2\",\"3\"]}", str)
writer.Header().Set("x-datahub-request-id", "request_id")
writer.WriteHeader(http.StatusOK)
}))
defer ts.Close()
dh := datahub.New("a", "a", ts.URL)
releaseShardList := []string{"0", "1"}
readEndShardList := []string{"2", "3"}
ret, err := dh.SyncGroup("test_project", "test_topic", "test_subId", "test_consumer_id", 1, releaseShardList, readEndShardList)
assert.Nil(t, err)
assert.NotNil(t, ret)
assert.Equal(t, http.StatusOK, ret.StatusCode)
assert.Equal(t, "request_id", ret.RequestId)
}
func TestLeaveGroups(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
assert.Equal(t, requests.POST, request.Method)
assert.Equal(t, "/projects/test_project/topics/test_topic/subscriptions/test_subId", request.URL.EscapedPath())
assert.Equal(t, "application/json", request.Header.Get("Content-Type"))
defer request.Body.Close()
body, err := ioutil.ReadAll(request.Body)
assert.Nil(t, err)
assert.NotNil(t, body)
str := string(body)
assert.Equal(t, "{\"Action\":\"leaveGroup\",\"ConsumerId\":\"test_consumer_id\",\"VersionId\":1}", str)
writer.Header().Set("x-datahub-request-id", "request_id")
writer.WriteHeader(http.StatusOK)
}))
defer ts.Close()
dh := datahub.New("a", "a", ts.URL)
ret, err := dh.LeaveGroup("test_project", "test_topic", "test_subId", "test_consumer_id", 1)
assert.Nil(t, err)
assert.NotNil(t, ret)
assert.Equal(t, http.StatusOK, ret.StatusCode)
assert.Equal(t, "request_id", ret.RequestId)
}
|
/*
Copyright 2022 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package manifest
import (
"context"
"encoding/json"
"github.com/containerd/containerd/platforms"
spec "github.com/opencontainers/image-spec/specs-go/v1"
v1 "k8s.io/api/core/v1"
apimachinery "k8s.io/apimachinery/pkg/runtime/schema"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/output/log"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/yaml"
)
const (
tolerationField = "tolerations"
)
var gkeARMToleration = v1.Toleration{
Key: "kubernetes.io/arch",
Operator: v1.TolerationOpEqual,
Value: "arm64",
Effect: v1.TaintEffectNoSchedule,
}
// SetGKEARMToleration adds a toleration for GKE ARM node taint `kubernetes.io/arch="arm64":NoSchedule`.
func (l *ManifestList) SetGKEARMToleration(ctx context.Context, rs ResourceSelector, platforms PodPlatforms) (ManifestList, error) {
r := &gkeTolerationSetter{ctx: ctx, platforms: platforms}
return l.Visit(r, rs)
}
type gkeTolerationSetter struct {
ctx context.Context
platforms PodPlatforms
}
func (s *gkeTolerationSetter) Visit(gk apimachinery.GroupKind, navpath string, o map[string]interface{}, k string, v interface{}, rs ResourceSelector) bool {
if _, ok := rs.allowByNavpath(gk, navpath, k); !ok {
return true
}
if len(s.platforms) == 0 {
return false
}
pls, ok := s.platforms[navpath]
if !ok || len(pls) == 0 {
return true
}
matcher := platforms.Any(pls...)
if !matcher.Match(spec.Platform{OS: "linux", Architecture: "arm64"}) {
return false
}
spec, ok := v.(map[string]interface{})
if !ok {
return true
}
tolerations, err := addGKEARMToleration(spec[tolerationField])
if err != nil {
log.Entry(s.ctx).Debugf("failed to update spec tolerations: %s", err.Error())
return true
}
spec[tolerationField] = tolerations
return false
}
func addGKEARMToleration(data interface{}) ([]interface{}, error) {
var tolerations []v1.Toleration
if data != nil {
b, err := json.Marshal(data)
if err != nil {
return nil, err
}
if err = json.Unmarshal(b, &tolerations); err != nil {
return nil, err
}
}
exists := false
for _, t := range tolerations {
if t == gkeARMToleration {
exists = true
break
}
}
if !exists {
tolerations = append(tolerations, gkeARMToleration)
}
bytes, err := json.Marshal(tolerations)
if err != nil {
return nil, err
}
var sl []interface{}
if err := yaml.Unmarshal(bytes, &sl); err != nil {
return nil, err
}
return sl, err
}
|
package trigger
import (
"context"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"github.com/tilt-dev/tilt/internal/controllers/indexer"
"github.com/tilt-dev/tilt/internal/sliceutils"
"github.com/tilt-dev/tilt/internal/timecmp"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
)
var fwGVK = v1alpha1.SchemeGroupVersion.WithKind("FileWatch")
var btnGVK = v1alpha1.SchemeGroupVersion.WithKind("UIButton")
// SetupControllerRestartOn sets up watchers / indexers for a type with a RestartOnSpec
func SetupControllerRestartOn(builder *builder.Builder, idxer *indexer.Indexer, extract func(obj client.Object) *v1alpha1.RestartOnSpec) {
idxer.AddKeyFunc(
func(obj client.Object) []indexer.Key {
spec := extract(obj)
if spec == nil {
return nil
}
var keys []indexer.Key
keys = append(keys, indexerKeys(fwGVK, obj.GetNamespace(), spec.FileWatches)...)
keys = append(keys, indexerKeys(btnGVK, obj.GetNamespace(), spec.UIButtons)...)
return keys
})
registerWatches(builder, idxer, []client.Object{&v1alpha1.FileWatch{}, &v1alpha1.UIButton{}})
}
// SetupControllerStartOn sets up watchers / indexers for a type with a StartOnSpec
func SetupControllerStartOn(builder *builder.Builder, idxer *indexer.Indexer, extract func(obj client.Object) *v1alpha1.StartOnSpec) {
idxer.AddKeyFunc(
func(obj client.Object) []indexer.Key {
spec := extract(obj)
if spec == nil {
return nil
}
return indexerKeys(btnGVK, obj.GetNamespace(), spec.UIButtons)
})
registerWatches(builder, idxer, []client.Object{&v1alpha1.UIButton{}})
}
// SetupControllerStopOn sets up watchers / indexers for a type with a StopOnSpec
func SetupControllerStopOn(builder *builder.Builder, idxer *indexer.Indexer, extract func(obj client.Object) *v1alpha1.StopOnSpec) {
idxer.AddKeyFunc(
func(obj client.Object) []indexer.Key {
spec := extract(obj)
if spec == nil {
return nil
}
return indexerKeys(btnGVK, obj.GetNamespace(), spec.UIButtons)
})
registerWatches(builder, idxer, []client.Object{&v1alpha1.UIButton{}})
}
func indexerKeys(gvk schema.GroupVersionKind, namespace string, names []string) []indexer.Key {
var keys []indexer.Key
for _, name := range names {
keys = append(keys, indexer.Key{
Name: types.NamespacedName{Namespace: namespace, Name: name},
GVK: gvk,
})
}
return keys
}
func registerWatches(builder *builder.Builder, idxer *indexer.Indexer, typesToWatch []client.Object) {
for _, t := range typesToWatch {
builder.Watches(t, handler.EnqueueRequestsFromMapFunc(idxer.Enqueue))
}
}
// fetchButtons retrieves all the buttons that this object depends on.
//
// If a button isn't in the API server yet, it will simply be missing from the map.
//
// Other errors reaching the API server will be returned to the caller.
//
// TODO(nick): If the user typos a button name, there's currently no feedback
//
// that this is happening. This is probably the correct product behavior (in particular:
// resources should still run if one of their triggers has been deleted).
// We might eventually need trigger statuses to express errors in lookup.
func fetchButtons(ctx context.Context, client client.Reader, buttonNames []string) (map[string]*v1alpha1.UIButton, error) {
buttons := make(map[string]*v1alpha1.UIButton, len(buttonNames))
for _, n := range buttonNames {
_, exists := buttons[n]
if exists {
continue
}
b := &v1alpha1.UIButton{}
err := client.Get(ctx, types.NamespacedName{Name: n}, b)
if err != nil {
if apierrors.IsNotFound(err) {
continue
}
return nil, err
}
buttons[n] = b
}
return buttons, nil
}
// fetchFileWatches retrieves all the filewatches that this object depends on.
//
// If a filewatch isn't in the API server yet, it will simply be missing from the slice.
//
// Other errors reaching the API server will be returned to the caller.
//
// TODO(nick): If the user typos a filewatch name, there's currently no feedback
//
// that this is happening. This is probably the correct product behavior (in particular:
// resources should still run if one of their triggers has been deleted).
// We might eventually need trigger statuses to express errors in lookup.
func fetchFileWatches(ctx context.Context, client client.Reader, fwNames []string) ([]*v1alpha1.FileWatch, error) {
result := []*v1alpha1.FileWatch{}
for _, n := range fwNames {
fw := &v1alpha1.FileWatch{}
err := client.Get(ctx, types.NamespacedName{Name: n}, fw)
if err != nil {
if apierrors.IsNotFound(err) {
continue
}
return nil, err
}
result = append(result, fw)
}
return result, nil
}
// LastStartEvent determines the last time a start was requested from this target's dependencies.
//
// Returns the most recent start time. If the most recent start is from a button,
// return the button. Some consumers use the button for text inputs.
func LastStartEvent(ctx context.Context, cli client.Reader, startOn *v1alpha1.StartOnSpec) (metav1.MicroTime, *v1alpha1.UIButton, error) {
if startOn == nil {
return metav1.MicroTime{}, nil, nil
}
buttons, err := fetchButtons(ctx, cli, startOn.UIButtons)
if err != nil {
return metav1.MicroTime{}, nil, err
}
var latestTime metav1.MicroTime
var latestButton *v1alpha1.UIButton
// ensure predictable iteration order by using the list from the spec
// (currently, missing buttons are simply ignored)
for _, buttonName := range startOn.UIButtons {
b := buttons[buttonName]
if b == nil {
continue
}
lastEventTime := b.Status.LastClickedAt
if timecmp.AfterOrEqual(lastEventTime, startOn.StartAfter) && timecmp.After(lastEventTime, latestTime) {
latestTime = lastEventTime
latestButton = b
}
}
return latestTime, latestButton, nil
}
// LastRestartEvent determines the last time a restart was requested from this target's dependencies.
//
// Returns the most recent restart time.
//
// If the most recent restart is from a button, return the button. Some consumers use the button for text inputs.
// If the most recent restart is from a FileWatch, return the FileWatch. Some consumers use the FileWatch to
// determine if they should re-run or not to avoid repeated failures.
func LastRestartEvent(ctx context.Context, cli client.Reader, restartOn *v1alpha1.RestartOnSpec) (metav1.MicroTime, *v1alpha1.UIButton, []*v1alpha1.FileWatch, error) {
var fws []*v1alpha1.FileWatch
if restartOn == nil {
return metav1.MicroTime{}, nil, fws, nil
}
buttons, err := fetchButtons(ctx, cli, restartOn.UIButtons)
if err != nil {
return metav1.MicroTime{}, nil, fws, err
}
fws, err = fetchFileWatches(ctx, cli, restartOn.FileWatches)
if err != nil {
return metav1.MicroTime{}, nil, fws, err
}
var cur metav1.MicroTime
var latestButton *v1alpha1.UIButton
for _, fw := range fws {
lastEventTime := fw.Status.LastEventTime
if timecmp.After(lastEventTime, cur) {
cur = lastEventTime
}
}
// ensure predictable iteration order by using the list from the spec
// (currently, missing buttons are simply ignored)
for _, buttonName := range restartOn.UIButtons {
b := buttons[buttonName]
if b == nil {
continue
}
lastEventTime := b.Status.LastClickedAt
if timecmp.After(lastEventTime, cur) {
cur = lastEventTime
latestButton = b
}
}
return cur, latestButton, fws, nil
}
// FilesChanged determines the set of files that have changed since the given timestamp.
// We err on the side of undercounting (i.e., skipping files that may have triggered
// this build but are not sure).
func FilesChanged(restartOn *v1alpha1.RestartOnSpec, fileWatches []*v1alpha1.FileWatch, lastBuild time.Time) []string {
filesChanged := []string{}
if restartOn == nil {
return filesChanged
}
for _, fw := range fileWatches {
// Add files so that the most recent files are first.
for i := len(fw.Status.FileEvents) - 1; i >= 0; i-- {
e := fw.Status.FileEvents[i]
if timecmp.After(e.Time, lastBuild) {
filesChanged = append(filesChanged, e.SeenFiles...)
}
}
}
return sliceutils.DedupedAndSorted(filesChanged)
}
// LastStopEvent determines the latest time a stop was requested from this target's dependencies.
//
// Returns the most recent stop time. If the most recent stop is from a button,
// return the button. Some consumers use the button for text inputs.
func LastStopEvent(ctx context.Context, cli client.Reader, stopOn *v1alpha1.StopOnSpec) (metav1.MicroTime, *v1alpha1.UIButton, error) {
if stopOn == nil {
return metav1.MicroTime{}, nil, nil
}
buttons, err := fetchButtons(ctx, cli, stopOn.UIButtons)
if err != nil {
return metav1.MicroTime{}, nil, err
}
var latestTime metav1.MicroTime
var latestButton *v1alpha1.UIButton
// ensure predictable iteration order by using the list from the spec
// (currently, missing buttons are simply ignored)
for _, buttonName := range stopOn.UIButtons {
b := buttons[buttonName]
if b == nil {
continue
}
lastEventTime := b.Status.LastClickedAt
if timecmp.After(lastEventTime, latestTime) {
latestTime = lastEventTime
latestButton = b
}
}
return latestTime, latestButton, nil
}
|
package Algorithms
import (
GD "GoGraph/DataStructure"
GG "GoGraph/Graph"
GV "GoGraph/Vertex"
)
func dijkstraOnDense(s *GV.Vertex, graph *GG.Graph) {
if len(graph.AdjMatr) == 0 {
graph.GetAdjMatrix()
}
var V []*GV.Vertex //A vertex set which has the shortest path from s.
var U []*GV.Vertex //V(G)-V
//initialize
for _, val := range graph.AdjList {
U = append(U, val)
}
for _, v := range U {
v.Distance = 1000000
}
s.Distance = 0
for {
var x *GV.Vertex
min := float32(1000000)
for _, val := range U {
if val.Distance < min {
x = val
min = val.Distance
}
}
if min == 1000000 {
panic("the graph is disconnected")
}
V = append(V, x)
U = remove(U, x)
if len(U) == 0 {
break
}
for _, val := range U {
if graph.AdjMatr[val.Id-1][x.Id-1] != 0 {
val.Distance = getMin(val.Distance, x.Distance+graph.AdjMatr[val.Id-1][x.Id-1])
}
}
}
}
func dijkstraOnSparse(s *GV.Vertex, graph *GG.Graph) {
var V []*GV.Vertex //A vertex set which has the shortest path from s.
var U []*GV.Vertex //V(G)-V
}
func remove(list []*GV.Vertex, v *GV.Vertex) (res []*GV.Vertex) {
for _, val := range list {
if val != v {
res = append(res, val)
}
}
return res
}
func getMin(a float32, b float32) float32 {
if a < b {
return a
} else {
return b
}
}
|
package smartling
// FileStatus represents current file status in the Smartling system.
type File struct {
// FileURI is a unique path to file in Smartling system.
FileURI string
// FileType is a file type identifier.
FileType FileType
// LastUploaded refers to time when file was uploaded.
LastUploaded UTC
// HasInstructions specifies does files have instructions or not.
HasInstructions bool
}
|
package credit_journal
import "cointhink/proto"
import "cointhink/model/account"
import "cointhink/db"
import "log"
var Columns = "id, account_id, schedule_id, status, stripe_tx, credit_adjustment, total_usd"
var Fields = ":id, :account_id, :schedule_id, :status, :stripe_tx, :credit_adjustment, :total_usd"
var Table = "credit_journals"
func Insert(item *proto.CreditJournal) error {
item.Id = db.NewId(Table)
_, err := db.D.Handle.NamedExec("insert into "+Table+" ("+Columns+") "+"values ("+Fields+")", item)
if err != nil {
log.Printf(Table+" Create err: %v", err)
return err
}
return nil
}
func Credit(_account *proto.Account, stripeTx string, adjustment int32, value float32) error {
initialCreditJournal := &proto.CreditJournal{
AccountId: _account.Id,
CreditAdjustment: adjustment,
StripeTx: stripeTx,
TotalUsd: value}
log.Printf("creditJournal Credit %+v", initialCreditJournal)
c_err := Insert(initialCreditJournal)
if c_err != nil {
return c_err
} else {
a_err := account.AdjustScheduleCredits(_account, adjustment)
return a_err
}
}
func Debit(_account *proto.Account, schedule *proto.Schedule, adjustment int32) error {
adjustment = adjustment * -1 // debit
initialCreditJournal := &proto.CreditJournal{
AccountId: _account.Id,
CreditAdjustment: adjustment,
ScheduleId: schedule.Id}
log.Printf("creditJournal Debit %+v", initialCreditJournal)
c_err := Insert(initialCreditJournal)
if c_err != nil {
return c_err
} else {
a_err := account.AdjustScheduleCredits(_account, adjustment)
return a_err
}
}
|
package flexo
import (
"fmt"
"net/http"
"github.com/gin-gonic/gin"
"gorm.io/gorm"
"github.com/SECCDC/flexo/model"
)
func (s *Server) getCategories(c *gin.Context) {
cats, err := queryCategories(s.DB)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"message": "Encountered an error while processing",
})
return
}
c.JSON(http.StatusOK, cats)
}
func queryCategories(db *gorm.DB) ([]model.Category, error) {
var categories []model.Category
res := db.Find(&categories)
return categories, res.Error
}
func (s *Server) postCategory(c *gin.Context) {
var category model.Category
if err := c.ShouldBindJSON(&category); err != nil {
c.JSON(http.StatusBadRequest, gin.H{
"error": err.Error(),
})
return
}
res := s.DB.Create(&category)
if res.Error != nil {
fmt.Println(res.Error)
c.JSON(http.StatusInternalServerError, "Couldn't create category")
return
}
}
|
package parser
import (
"github.com/stephens2424/php/ast"
"github.com/stephens2424/php/token"
)
func (p *Parser) parseTopStmt() ast.Statement {
switch p.current.Typ {
case token.Namespace:
// TODO check that this comes before anything but a declare statement
p.expect(token.Identifier)
p.namespace = ast.NewNamespace(p.current.Val)
p.file.Namespace = p.namespace
p.expectStmtEnd()
return nil
case token.Use:
p.expect(token.Identifier)
if p.peek().Typ == token.AsOperator {
p.expect(token.AsOperator)
p.expect(token.Identifier)
}
p.expectStmtEnd()
// We are ignoring this for now
return nil
case token.Declare:
return p.parseDeclareBlock()
default:
return p.parseStmt()
}
}
func (p *Parser) parseStmt() ast.Statement {
switch p.current.Typ {
case token.BlockBegin:
p.backup()
return p.parseBlock()
case token.Global:
p.next()
g := &ast.GlobalDeclaration{
Identifiers: make([]*ast.Variable, 0, 1),
}
for p.current.Typ == token.VariableOperator {
variable, ok := p.parseVariable().(*ast.Variable)
if !ok {
p.errorf("global declarations must be of standard variables")
break
}
g.Identifiers = append(g.Identifiers, variable)
if p.peek().Typ != token.Comma {
break
}
p.expect(token.Comma)
p.next()
}
p.expectStmtEnd()
return g
case token.Static:
if p.peek().Typ == token.ScopeResolutionOperator {
p.errorf("static keyword outside of class context")
expr := p.parseExpression()
p.expectStmtEnd()
return expr
}
s := &ast.StaticVariableDeclaration{Declarations: make([]ast.Dynamic, 0)}
for {
p.next()
v, ok := p.parseVariable().(*ast.Variable)
if !ok {
p.errorf("global static declaration must be a variable")
return nil
}
if _, ok := v.Name.(*ast.Identifier); !ok {
p.errorf("static variable declarations must not be dynamic")
}
// check if there's an initial assignment
if p.peek().Typ == token.AssignmentOperator {
p.expect(token.AssignmentOperator)
op := p.current.Val
p.expect(token.Null, token.StringLiteral, token.BooleanLiteral, token.NumberLiteral, token.Array)
switch p.current.Typ {
case token.Array:
s.Declarations = append(s.Declarations, &ast.AssignmentExpr{Assignee: v, Value: p.parseArrayDeclaration(), Operator: op})
default:
s.Declarations = append(s.Declarations, &ast.AssignmentExpr{Assignee: v, Value: p.parseLiteral(), Operator: op})
}
} else {
s.Declarations = append(s.Declarations, v)
}
if p.peek().Typ != token.Comma {
break
}
p.next()
}
p.expectStmtEnd()
return s
case token.VariableOperator, token.UnaryOperator:
expr := ast.ExprStmt{Expr: p.parseExpression()}
p.expectStmtEnd()
return expr
case token.Print:
requireParen := false
if p.peek().Typ == token.OpenParen {
p.expect(token.OpenParen)
requireParen = true
}
stmt := ast.Echo(p.parseNextExpression())
if requireParen {
p.expect(token.CloseParen)
}
p.expectStmtEnd()
return stmt
case token.Function:
return p.parseFunctionStmt(false)
case token.PHPEnd:
if p.peek().Typ == token.EOF {
return nil
}
var expr ast.Statement
if p.accept(token.HTML) {
expr = ast.Echo(&ast.Literal{Type: ast.String, Value: p.current.Val})
}
p.next()
if p.current.Typ != token.EOF {
p.expectCurrent(token.PHPBegin)
}
return expr
case token.Echo:
exprs := []ast.Expr{
p.parseNextExpression(),
}
for p.peek().Typ == token.Comma {
p.expect(token.Comma)
exprs = append(exprs, p.parseNextExpression())
}
p.expectStmtEnd()
echo := ast.Echo(exprs...)
return echo
case token.If:
return p.parseIf()
case token.While:
return p.parseWhile()
case token.Do:
return p.parseDo()
case token.For:
return p.parseFor()
case token.Foreach:
return p.parseForeach()
case token.Switch:
return p.parseSwitch()
case token.Abstract, token.Final, token.Class:
return p.parseClass()
case token.Interface:
return p.parseInterface()
case token.Return:
p.next()
stmt := &ast.ReturnStmt{}
if p.current.Typ != token.StatementEnd {
stmt.Expr = p.parseExpression()
p.expectStmtEnd()
}
return stmt
case token.Break:
p.next()
stmt := &ast.BreakStmt{}
if p.current.Typ != token.StatementEnd {
stmt.Expr = p.parseExpression()
p.expectStmtEnd()
}
return stmt
case token.Continue:
p.next()
stmt := &ast.ContinueStmt{}
if p.current.Typ != token.StatementEnd {
stmt.Expr = p.parseExpression()
p.expectStmtEnd()
}
return stmt
case token.Throw:
stmt := ast.ThrowStmt{Expr: p.parseNextExpression()}
p.expectStmtEnd()
return stmt
case token.Exit:
stmt := &ast.ExitStmt{}
if p.peek().Typ == token.OpenParen {
p.expect(token.OpenParen)
if p.peek().Typ != token.CloseParen {
stmt.Expr = p.parseNextExpression()
}
p.expect(token.CloseParen)
}
p.expectStmtEnd()
return stmt
case token.Try:
stmt := &ast.TryStmt{}
stmt.TryBlock = p.parseBlock()
for p.expect(token.Catch); p.current.Typ == token.Catch; p.next() {
caught := &ast.CatchStmt{}
p.expect(token.OpenParen)
p.expect(token.Identifier)
caught.CatchType = p.current.Val
p.expect(token.VariableOperator)
p.expect(token.Identifier)
caught.CatchVar = ast.NewVariable(p.current.Val)
p.expect(token.CloseParen)
caught.CatchBlock = p.parseBlock()
stmt.CatchStmts = append(stmt.CatchStmts, caught)
}
p.backup()
return stmt
case token.IgnoreErrorOperator:
// Ignore this operator
p.next()
return p.parseStmt()
case token.StatementEnd:
// this is an empty statement
return &ast.EmptyStatement{}
default:
expr := p.parseExpression()
if expr != nil {
p.expectStmtEnd()
return ast.ExprStmt{Expr: expr}
}
p.errorf("Found %s, statement or expression", p.current)
return nil
}
}
func (p *Parser) expectStmtEnd() {
if p.peek().Typ != token.PHPEnd {
p.expect(token.StatementEnd)
}
}
|
package common
import "go.uber.org/dig"
type UpdateActionOut struct {
dig.Out
Action func(dt int64) `group:"update_actions"`
}
type UpdateActionsIn struct {
dig.In
Actions []func(dt int64) `group:"update_actions"`
}
type Pos struct {
X, Y float32
}
|
package delete
import (
"fmt"
"os"
"path/filepath"
"strings"
platformK8s "github.com/dolittle/platform-api/pkg/platform/k8s"
"github.com/spf13/cobra"
)
var customerCMD = &cobra.Command{
Use: "customer",
Short: "Shows commands to aid in deleting a customer from the cluster",
Long: `
go run main.go tools helpers commands delete-customer --directory="/Users/freshteapot/dolittle/git/Operations" 6677c2f0-9e2f-4d2b-beb5-50014fc8ad0c
`,
Run: func(cmd *cobra.Command, args []string) {
rootDir, _ := cmd.Flags().GetString("directory")
rootDir = strings.TrimSuffix(rootDir, string(os.PathSeparator))
platformApiDir := filepath.Join(rootDir, "Source", "V3", "platform-api")
azureDir := filepath.Join(rootDir, "Source", "V3", "Azure")
customerID := args[0]
type storageInfo struct {
CustomerID string
ApplicationID string
PlatformEnvironment string
Path string
}
items := make([]storageInfo, 0)
err := filepath.Walk(platformApiDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return nil
}
_path := strings.TrimPrefix(path, platformApiDir)
_path = strings.TrimPrefix(_path, string(os.PathSeparator))
parts := strings.Split(_path, string(os.PathSeparator))
if len(parts) > 3 {
return filepath.SkipDir
}
if !strings.Contains(path, customerID) {
return nil
}
if len(parts) != 3 {
return nil
}
items = append(items, storageInfo{
PlatformEnvironment: parts[0],
CustomerID: parts[1],
ApplicationID: parts[2],
Path: path,
})
return nil
})
if err != nil {
panic(err)
}
for _, item := range items {
namespace := platformK8s.GetApplicationNamespace(item.ApplicationID)
tfPrefix := fmt.Sprintf("customer_%s_%s", item.CustomerID, item.ApplicationID)
cmds := []string{
// Delete from cluster
fmt.Sprintf(`kubectl delete namespace %s`, namespace),
// Delete from storage
fmt.Sprintf(`rm -r %s`, item.Path),
// Delete from terraform
fmt.Sprintf("cd %s", azureDir),
"TODO delete from terraform",
fmt.Sprintf(`terraform destroy -target="module.%s"`, tfPrefix),
fmt.Sprintf(`rm -r %s/%s.tf"`, azureDir, tfPrefix),
"terraform apply",
}
output := strings.Join(cmds, "\n")
fmt.Println(output)
}
},
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package quickanswers
import (
"context"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/event"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/quickanswers"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: UnitConversion,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Test Quick Answers unit conversion feature",
Contacts: []string{
"updowndota@google.com",
"croissant-eng@google.com",
"chromeos-sw-engprod@google.com",
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
Params: []testing.Param{{
Fixture: "chromeLoggedIn",
Val: browser.TypeAsh,
}, {
Name: "lacros",
Fixture: "lacros",
ExtraSoftwareDeps: []string{"lacros"},
Val: browser.TypeLacros,
}},
})
}
// UnitConversion tests Quick Answers unit conversion feature.
func UnitConversion(ctx context.Context, s *testing.State) {
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
cr := s.FixtValue().(chrome.HasChrome).Chrome()
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create Test API connection: ", err)
}
ui := uiauto.New(tconn)
if err := quickanswers.SetPrefValue(ctx, tconn, "settings.quick_answers.enabled", true); err != nil {
s.Fatal("Failed to enable Quick Answers: ", err)
}
// Setup a browser.
bt := s.Param().(browser.Type)
br, closeBrowser, err := browserfixt.SetUp(ctx, cr, bt)
if err != nil {
s.Fatal("Failed to open the browser: ", err)
}
defer closeBrowser(cleanupCtx)
// Open page with source units on it.
conn, err := br.NewConn(ctx, "https://google.com/search?q=50+kg")
if err != nil {
s.Fatal("Failed to create new Chrome connection: ", err)
}
defer conn.Close()
defer conn.CloseTarget(ctx)
// Wait for the source units to appear.
units := nodewith.Name("50 kg").Role(role.StaticText).First()
if err := ui.WaitUntilExists(units)(ctx); err != nil {
s.Fatal("Failed to wait for units to load: ", err)
}
// Select the units and setup watcher to wait for text selection event.
if err := ui.WaitForEvent(nodewith.Root(),
event.TextSelectionChanged,
ui.Select(units, 0 /*startOffset*/, units, 5 /*endOffset*/))(ctx); err != nil {
s.Fatal("Failed to select units: ", err)
}
// Right click the selected units and ensure the Quick Answers UI shows up with the conversion result in pounds.
quickAnswers := nodewith.ClassName("QuickAnswersView")
unitConversionResult := nodewith.NameContaining("110.231").ClassName("QuickAnswersTextLabel")
if err := uiauto.Combine("Show context menu",
ui.RightClick(units),
ui.WaitUntilExists(quickAnswers),
ui.WaitUntilExists(unitConversionResult))(ctx); err != nil {
s.Fatal("Quick Answers result not showing up: ", err)
}
// Dismiss the context menu and ensure the Quick Answers UI also dismiss.
if err := uiauto.Combine("Dismiss context menu",
ui.LeftClick(units),
ui.WaitUntilGone(quickAnswers))(ctx); err != nil {
s.Fatal("Quick Answers result not dismissed: ", err)
}
}
|
package color
import (
"fmt"
"github.com/bchadwic/gh-graph/pkg/stats"
lg "github.com/charmbracelet/lipgloss"
)
const (
Catagories = 5
GroupFormRate = 2
DefaultBestDay = 100
)
type ColorPalette struct {
Colors []Color
Limits []int
}
type Color struct {
R uint8
G uint8
B uint8
}
func (cp *ColorPalette) Initialize(stats *stats.Stats) *ColorPalette {
cp.Limits = make([]int, Catagories)
max := DefaultBestDay
if max > stats.BestDay {
max = stats.BestDay
}
curr := max
for i := Catagories - 1; i >= 0; i-- {
cp.Limits[i] = curr
curr = (curr / GroupFormRate) + 1
}
if lg.HasDarkBackground() {
for i := 0; i < Catagories; i++ {
cp.Colors = append(cp.Colors, Color{
R: 30,
G: uint8(i+1) * 50,
B: 30,
})
}
} else {
for i := Catagories - 1; i >= 0; i-- {
cp.Colors = append(cp.Colors, Color{
R: 30,
G: uint8(i+1) * 50,
B: 30,
})
}
}
return cp
}
func (cp *ColorPalette) GetColor(colorIndex int) string {
for i, e := range cp.Limits {
if colorIndex < e {
return cp.Colors[i].Hex()
}
}
return cp.Colors[Catagories-1].Hex()
}
func (c *Color) Hex() string {
return fmt.Sprintf("#%02x%02x%02x", c.R, c.G, c.B)
}
|
package main
import (
"io"
"os"
"github.com/yuyamada/atcoder/lib"
)
func main() {
solve(os.Stdin, os.Stdout)
}
func solve(r io.Reader, w io.Writer) {
io := lib.NewIo(r, w)
defer io.Flush()
n := io.NextInt()
ans := solver(n)
io.Println(ans)
}
func solver(n int) int {
a := lib.Matrix([][]int{{2, 1, 0}, {2, 2, 1}, {0, 1, 2}})
b := lib.Matrix([][]int{{2}, {2}, {0}})
return a.Pow(n - 1).Mul(b)[0][0]
}
|
package rawdatalog_test
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/sirupsen/logrus"
logrusTest "github.com/sirupsen/logrus/hooks/test"
"github.com/dolittle/platform-api/pkg/dolittle/k8s"
"github.com/dolittle/platform-api/pkg/platform"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
netv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/rest"
"k8s.io/client-go/testing"
platformK8s "github.com/dolittle/platform-api/pkg/platform/k8s"
. "github.com/dolittle/platform-api/pkg/platform/microservice/rawdatalog"
)
var _ = Describe("Repo", func() {
var (
clientSet *fake.Clientset
config *rest.Config
k8sRepo platformK8s.K8sPlatformRepo
logger *logrus.Logger
rawDataLogRepo RawDataLogIngestorRepo
isProduction bool
)
BeforeEach(func() {
logger, _ = logrusTest.NewNullLogger()
clientSet = fake.NewSimpleClientset()
config = &rest.Config{}
k8sRepo = platformK8s.NewK8sRepo(clientSet, config, logger)
isProduction = false
rawDataLogRepo = NewRawDataLogIngestorRepo(isProduction, k8sRepo, clientSet, logger)
})
Describe("when creating RawDataLog", func() {
var (
namespace string
customer k8s.Tenant
application k8s.Application
input platform.HttpInputRawDataLogIngestorInfo
err error
)
BeforeEach(func() {
namespace = "application-6db1278e-da39-481a-8474-e0ef6bdc2f6e"
customer = k8s.Tenant{
Name: "LydiaBall",
ID: "c6c72dab-a770-47d5-b85d-2777d2ac0922",
}
application = k8s.Application{
Name: "CordeliaChavez",
ID: "6db1278e-da39-481a-8474-e0ef6bdc2f6e",
}
input = platform.HttpInputRawDataLogIngestorInfo{
MicroserviceBase: platform.MicroserviceBase{
Environment: "LoisMay",
Name: "ErnestBush",
Dolittle: platform.HttpInputDolittle{
ApplicationID: application.ID,
CustomerID: customer.ID,
MicroserviceID: "b9a9211e-f118-4ea0-9eb9-8d0d8f33c753",
},
Kind: platform.MicroserviceKindRawDataLogIngestor,
},
Extra: platform.HttpInputRawDataLogIngestorExtra{
Ingress: platform.HttpInputSimpleIngress{
Path: "/api/not-webhooks-just-to-be-sure",
Pathtype: "SpecialTypeNotActuallySupported",
},
},
}
})
Context("for an application that does not have any ingresses", func() {
BeforeEach(func() {
customerTenants := make([]platform.CustomerTenantInfo, 0)
err = rawDataLogRepo.Create(namespace, customer, application, customerTenants, input)
})
It("should fail with an error", func() {
Expect(err).ToNot(BeNil())
})
It("should not create any resources", func() {
Expect(getCreateActions(clientSet)).To(BeEmpty())
})
})
Context("and an application exists but no other resources", func() {
var (
natsConfigMap *corev1.ConfigMap
natsService *corev1.Service
natsStatefulSet *appsv1.StatefulSet
stanConfigMap *corev1.ConfigMap
stanService *corev1.Service
stanStatefulSet *appsv1.StatefulSet
rawDataLogIngestorIngress *netv1.Ingress
rawDataLogDeployment *appsv1.Deployment
customerTenantID string
)
BeforeEach(func() {
customerTenantID = "f4679b71-1215-4a60-8483-53b0d5f2bb47"
customerTenants := []platform.CustomerTenantInfo{
{
CustomerTenantID: customerTenantID,
Hosts: []platform.CustomerTenantHost{
{
Host: "some-fancy.domain.name",
SecretName: "some-fancy-certificate",
},
},
},
}
err = rawDataLogRepo.Create(namespace, customer, application, customerTenants, input)
})
// NATS ConfigMap
It("should create a configmap for nats named 'loismay-nats'", func() {
object := getCreatedObject(clientSet, "ConfigMap", "loismay-nats")
Expect(object).ToNot(BeNil())
natsConfigMap = object.(*corev1.ConfigMap)
})
It("should create a configmap for nats with the correct ApiVersion", func() {
Expect(natsConfigMap.APIVersion).To(Equal("v1"))
})
It("should create a configmap for nats with the correct Kind", func() {
Expect(natsConfigMap.Kind).To(Equal("ConfigMap"))
})
It("should create a configmap for nats with the correct tenant-id annotation", func() {
Expect(natsConfigMap.Annotations["dolittle.io/tenant-id"]).To(Equal(customer.ID))
})
It("should create a configmap for nats with the correct application-id annotation", func() {
Expect(natsConfigMap.Annotations["dolittle.io/application-id"]).To(Equal(application.ID))
})
It("should create a configmap for nats with the correct tenant label", func() {
Expect(natsConfigMap.Labels["tenant"]).To(Equal(customer.Name))
})
It("should create a configmap for nats with the correct application label", func() {
Expect(natsConfigMap.Labels["application"]).To(Equal(application.Name))
})
It("should create a configmap for nats with the correct environment label", func() {
Expect(natsConfigMap.Labels["environment"]).To(Equal(input.Environment))
})
It("should create a configmap for nats with the correct infrastructure label", func() {
Expect(natsConfigMap.Labels["infrastructure"]).To(Equal("Nats"))
})
It("should create a configmap for nats without a microservice label", func() {
Expect(natsConfigMap.Labels["microservice"]).To(Equal(""))
})
It("should create a configmap for nats with 'nats.conf'", func() {
Expect(natsConfigMap.Data["nats.conf"]).To(Equal(`
pid_file: "/var/run/nats/nats.pid"
http: 8222
`))
})
// NATS Service
It("should create a service for nats named 'loismay-nats'", func() {
object := getCreatedObject(clientSet, "Service", "loismay-nats")
Expect(object).ToNot(BeNil())
natsService = object.(*corev1.Service)
})
It("should create a service for nats with the correct ApiVersion", func() {
Expect(natsService.APIVersion).To(Equal("v1"))
})
It("should create a service for nats with the correct Kind", func() {
Expect(natsService.Kind).To(Equal("Service"))
})
It("should create a service for nats that is headless", func() {
Expect(natsService.Spec.ClusterIP).To(Equal("None"))
})
It("should create a service for nats with the correct tenant-id annotation", func() {
Expect(natsService.Annotations["dolittle.io/tenant-id"]).To(Equal(customer.ID))
})
It("should create a service for nats with the correct application-id annotation", func() {
Expect(natsService.Annotations["dolittle.io/application-id"]).To(Equal(application.ID))
})
It("should create a service for nats with the correct tenant label", func() {
Expect(natsService.Labels["tenant"]).To(Equal(customer.Name))
})
It("should create a service for nats with the correct application label", func() {
Expect(natsService.Labels["application"]).To(Equal(application.Name))
})
It("should create a service for nats with the correct environment label", func() {
Expect(natsService.Labels["environment"]).To(Equal(input.Environment))
})
It("should create a service for nats with the correct infrastructure label", func() {
Expect(natsService.Labels["infrastructure"]).To(Equal("Nats"))
})
It("should create a service for nats without a microservice label", func() {
Expect(natsService.Labels["microservice"]).To(Equal(""))
})
It("should create a service for nats with the correct tenant label selector", func() {
Expect(natsService.Spec.Selector["tenant"]).To(Equal(customer.Name))
})
It("should create a service for nats with the correct application label selector", func() {
Expect(natsService.Spec.Selector["application"]).To(Equal(application.Name))
})
It("should create a service for nats with the correct environment label selector", func() {
Expect(natsService.Spec.Selector["environment"]).To(Equal(input.Environment))
})
It("should create a service for nats with the correct infrastructure label selector", func() {
Expect(natsService.Spec.Selector["infrastructure"]).To(Equal("Nats"))
})
It("should create a service for nats without a microservice label selector", func() {
Expect(natsService.Spec.Selector["microservice"]).To(Equal(""))
})
It("should create a service for nats with the 'client' port exposed", func() {
Expect(natsService.Spec.Ports[0].Name).To(Equal("client"))
Expect(natsService.Spec.Ports[0].Port).To(Equal(int32(4222)))
Expect(natsService.Spec.Ports[0].TargetPort.StrVal).To(Equal("client"))
})
It("should create a service for nats with the 'cluster' port exposed", func() {
Expect(natsService.Spec.Ports[1].Name).To(Equal("cluster"))
Expect(natsService.Spec.Ports[1].Port).To(Equal(int32(6222)))
Expect(natsService.Spec.Ports[1].TargetPort.StrVal).To(Equal("cluster"))
})
It("should create a service for nats with the 'monitor' port exposed", func() {
Expect(natsService.Spec.Ports[2].Name).To(Equal("monitor"))
Expect(natsService.Spec.Ports[2].Port).To(Equal(int32(8222)))
Expect(natsService.Spec.Ports[2].TargetPort.StrVal).To(Equal("monitor"))
})
It("should create a service for nats with the 'metrics' port exposed", func() {
Expect(natsService.Spec.Ports[3].Name).To(Equal("metrics"))
Expect(natsService.Spec.Ports[3].Port).To(Equal(int32(7777)))
Expect(natsService.Spec.Ports[3].TargetPort.StrVal).To(Equal("metrics"))
})
It("should create a service for nats with the 'leafnodes' port exposed", func() {
Expect(natsService.Spec.Ports[4].Name).To(Equal("leafnodes"))
Expect(natsService.Spec.Ports[4].Port).To(Equal(int32(7422)))
Expect(natsService.Spec.Ports[4].TargetPort.StrVal).To(Equal("leafnodes"))
})
It("should create a service for nats with the 'gateways' port exposed", func() {
Expect(natsService.Spec.Ports[5].Name).To(Equal("gateways"))
Expect(natsService.Spec.Ports[5].Port).To(Equal(int32(7522)))
})
// NATS StatefulSet
It("should create a statefulset for nats named 'loismay-nats'", func() {
object := getCreatedObject(clientSet, "StatefulSet", "loismay-nats")
Expect(object).ToNot(BeNil())
natsStatefulSet = object.(*appsv1.StatefulSet)
})
It("should create a statefulset for nats with the correct ApiVersion", func() {
Expect(natsStatefulSet.APIVersion).To(Equal("apps/v1"))
})
It("should create a statefulset for nats with the correct Kind", func() {
Expect(natsStatefulSet.Kind).To(Equal("StatefulSet"))
})
It("should create a statefulset for nats with the correct tenant-id annotation", func() {
Expect(natsStatefulSet.Annotations["dolittle.io/tenant-id"]).To(Equal(customer.ID))
})
It("should create a statefulset for nats with the correct application-id annotation", func() {
Expect(natsStatefulSet.Annotations["dolittle.io/application-id"]).To(Equal(application.ID))
})
It("should create a statefulset for nats with the correct tenant label", func() {
Expect(natsStatefulSet.Labels["tenant"]).To(Equal(customer.Name))
})
It("should create a statefulset for nats with the correct application label", func() {
Expect(natsStatefulSet.Labels["application"]).To(Equal(application.Name))
})
It("should create a statefulset for nats with the correct environment label", func() {
Expect(natsStatefulSet.Labels["environment"]).To(Equal(input.Environment))
})
It("should create a statefulset for nats with the correct infrastructure label", func() {
Expect(natsStatefulSet.Labels["infrastructure"]).To(Equal("Nats"))
})
It("should create a statefulset for nats without a microservice label", func() {
Expect(natsStatefulSet.Labels["microservice"]).To(Equal(""))
})
It("should create a statefulset for nats with the correct tenant label selector", func() {
Expect(natsStatefulSet.Spec.Selector.MatchLabels["tenant"]).To(Equal(customer.Name))
})
It("should create a statefulset for nats with the correct application label selector", func() {
Expect(natsStatefulSet.Spec.Selector.MatchLabels["application"]).To(Equal(application.Name))
})
It("should create a statefulset for nats with the correct environment label selector", func() {
Expect(natsStatefulSet.Spec.Selector.MatchLabels["environment"]).To(Equal(input.Environment))
})
It("should create a statefulset for nats with the correct environment label selector", func() {
Expect(natsStatefulSet.Spec.Selector.MatchLabels["infrastructure"]).To(Equal("Nats"))
})
It("should create a statefulset for nats without a microservice label selector", func() {
Expect(natsStatefulSet.Spec.Selector.MatchLabels["microservice"]).To(Equal(""))
})
It("should create a statefulset for nats with one replica", func() {
Expect(*natsStatefulSet.Spec.Replicas).To(Equal(int32(1)))
})
It("should create a pod template for nats with the correct tenant-id annotation", func() {
Expect(natsStatefulSet.Spec.Template.Annotations["dolittle.io/tenant-id"]).To(Equal(customer.ID))
})
It("should create a pod template for nats with the correct application-id annotation", func() {
Expect(natsStatefulSet.Spec.Template.Annotations["dolittle.io/application-id"]).To(Equal(application.ID))
})
It("should create a pod template for nats with the correct tenant label", func() {
Expect(natsStatefulSet.Spec.Template.Labels["tenant"]).To(Equal(customer.Name))
})
It("should create a pod template for nats with the correct application label", func() {
Expect(natsStatefulSet.Spec.Template.Labels["application"]).To(Equal(application.Name))
})
It("should create a pod template for nats with the correct environment label", func() {
Expect(natsStatefulSet.Spec.Template.Labels["environment"]).To(Equal(input.Environment))
})
It("should create a pod template for nats with the correct infrastructure label", func() {
Expect(natsStatefulSet.Spec.Template.Labels["infrastructure"]).To(Equal("Nats"))
})
It("should create a pod template for nats without a microservice label", func() {
Expect(natsStatefulSet.Spec.Template.Labels["microservice"]).To(Equal(""))
})
It("should create a pod template for nats that shares the process namespace", func() {
Expect(*natsStatefulSet.Spec.Template.Spec.ShareProcessNamespace).To(Equal(true))
})
It("should create a pod template for nats with the nats config map as a volume", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Volumes[0].Name).To(Equal("config-volume"))
Expect(natsStatefulSet.Spec.Template.Spec.Volumes[0].ConfigMap.Name).To(Equal("loismay-nats"))
})
It("should create a pod template for nats with a pid volume", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Volumes[1].Name).To(Equal("pid"))
Expect(natsStatefulSet.Spec.Template.Spec.Volumes[1].EmptyDir).ToNot(Equal(""))
})
It("should create a container for nats named 'nats'", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Name).To(Equal("nats"))
})
It("should create a container for nats with the correct image", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Image).To(Equal("nats:2.1.7-alpine3.11"))
})
It("should create a container for nats with the correct command", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Command[0]).To(Equal("nats-server"))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Command[1]).To(Equal("--config"))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Command[2]).To(Equal("/etc/nats-config/nats.conf"))
})
It("should create a container for nats with the 'client' port exposed", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort).To(Equal(int32(4222)))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Ports[0].Name).To(Equal("client"))
})
It("should create a container for nats with the 'cluster' port exposed", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Ports[1].ContainerPort).To(Equal(int32(6222)))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Ports[1].Name).To(Equal("cluster"))
})
It("should create a container for nats with the 'monitor' port exposed", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Ports[2].ContainerPort).To(Equal(int32(8222)))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Ports[2].Name).To(Equal("monitor"))
})
It("should create a container for nats with the 'metrics' port exposed", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Ports[3].ContainerPort).To(Equal(int32(7777)))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Ports[3].Name).To(Equal("metrics"))
})
It("should create a container for nats with the 'leafnodes' port exposed", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Ports[4].ContainerPort).To(Equal(int32(7422)))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Ports[4].Name).To(Equal("leafnodes"))
})
It("should create a container for nats with the 'POD_NAME' environmental variable set", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Env[0].Name).To(Equal("POD_NAME"))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Env[0].ValueFrom.FieldRef.FieldPath).To(Equal("metadata.name"))
})
It("should create a container for nats with the 'POD_NAMESPACE' environmental variable set", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Env[1].Name).To(Equal("POD_NAMESPACE"))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath).To(Equal("metadata.namespace"))
})
It("should create a container for nats with the 'CLUSTER_ADVERTISE' environmental variable set", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Env[2].Name).To(Equal("CLUSTER_ADVERTISE"))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Env[2].Value).To(Equal("$(POD_NAME).loismay-nats.$(POD_NAMESPACE).svc.cluster.local"))
})
It("should create a container for nats with '/etc/nats-config' mounted", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath).To(Equal("/etc/nats-config"))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name).To(Equal("config-volume"))
})
It("should create a container for nats with '/var/run/nats' mounted", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath).To(Equal("/var/run/nats"))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name).To(Equal("pid"))
})
It("should create a container for nats with a liveness probe", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].LivenessProbe.HTTPGet.Path).To(Equal("/"))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].LivenessProbe.HTTPGet.Port.StrVal).To(Equal("monitor"))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].LivenessProbe.InitialDelaySeconds).To(Equal(int32(10)))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].LivenessProbe.TimeoutSeconds).To(Equal(int32(5)))
})
It("should create a container for nats with a readiness probe", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].ReadinessProbe.HTTPGet.Path).To(Equal("/"))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].ReadinessProbe.HTTPGet.Port.StrVal).To(Equal("monitor"))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].ReadinessProbe.InitialDelaySeconds).To(Equal(int32(10)))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].ReadinessProbe.TimeoutSeconds).To(Equal(int32(5)))
})
It("should create a container for nats with a prestop lifecycle command to shut it down gracefully", func() {
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Lifecycle.PreStop.Exec.Command[0]).To(Equal("/bin/sh"))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Lifecycle.PreStop.Exec.Command[1]).To(Equal("-c"))
Expect(natsStatefulSet.Spec.Template.Spec.Containers[0].Lifecycle.PreStop.Exec.Command[2]).To(Equal("/nats-server -sl=ldm=/var/run/nats/nats.pid && /bin/sleep 60"))
})
// STAN ConfigMap
It("should create a configmap for stan named 'loismay-stan'", func() {
object := getCreatedObject(clientSet, "ConfigMap", "loismay-stan")
Expect(object).ToNot(BeNil())
stanConfigMap = object.(*corev1.ConfigMap)
})
It("should create a configmap for stan with the correct ApiVersion", func() {
Expect(stanConfigMap.APIVersion).To(Equal("v1"))
})
It("should create a configmap for stan with the correct Kind", func() {
Expect(stanConfigMap.Kind).To(Equal("ConfigMap"))
})
It("should create a configmap for stan with the correct tenant-id annotation", func() {
Expect(stanConfigMap.Annotations["dolittle.io/tenant-id"]).To(Equal(customer.ID))
})
It("should create a configmap for stan with the correct application-id annotation", func() {
Expect(stanConfigMap.Annotations["dolittle.io/application-id"]).To(Equal(application.ID))
})
It("should create a configmap for stan with the correct tenant label", func() {
Expect(stanConfigMap.Labels["tenant"]).To(Equal(customer.Name))
})
It("should create a configmap for stan with the correct application label", func() {
Expect(stanConfigMap.Labels["application"]).To(Equal(application.Name))
})
It("should create a configmap for stan with the correct environment label", func() {
Expect(stanConfigMap.Labels["environment"]).To(Equal(input.Environment))
})
It("should create a configmap for stan with the correct infrastructure label", func() {
Expect(stanConfigMap.Labels["infrastructure"]).To(Equal("Stan"))
})
It("should create a configmap for stan without a microservice label", func() {
Expect(stanConfigMap.Labels["microservice"]).To(Equal(""))
})
It("should create a configmap for stan with 'stan.conf'", func() {
Expect(stanConfigMap.Data["stan.conf"]).To(Equal(`
port: 4222
http: 8222
streaming {
ns: "nats://loismay-nats:4222"
id: stan
store: file
dir: datastore
}
`))
})
// STAN Service
It("should create a service for stan named 'loismay-nats'", func() {
object := getCreatedObject(clientSet, "Service", "loismay-stan")
Expect(object).ToNot(BeNil())
stanService = object.(*corev1.Service)
})
It("should create a service for stan with the correct ApiVersion", func() {
Expect(stanService.APIVersion).To(Equal("v1"))
})
It("should create a service for stan with the correct Kind", func() {
Expect(stanService.Kind).To(Equal("Service"))
})
It("should create a service for stan that is headless", func() {
Expect(stanService.Spec.ClusterIP).To(Equal("None"))
})
It("should create a service for stan with the correct tenant-id annotation", func() {
Expect(stanService.Annotations["dolittle.io/tenant-id"]).To(Equal(customer.ID))
})
It("should create a service for stan with the correct application-id annotation", func() {
Expect(stanService.Annotations["dolittle.io/application-id"]).To(Equal(application.ID))
})
It("should create a service for stan with the correct tenant label", func() {
Expect(stanService.Labels["tenant"]).To(Equal(customer.Name))
})
It("should create a service for stan with the correct application label", func() {
Expect(stanService.Labels["application"]).To(Equal(application.Name))
})
It("should create a service for stan with the correct environment label", func() {
Expect(stanService.Labels["environment"]).To(Equal(input.Environment))
})
It("should create a service for stan with the correct infrastructure label", func() {
Expect(stanService.Labels["infrastructure"]).To(Equal("Stan"))
})
It("should create a service for stan without a microservice label", func() {
Expect(stanService.Labels["microservice"]).To(Equal(""))
})
It("should create a service for stan with the correct tenant label selector", func() {
Expect(stanService.Spec.Selector["tenant"]).To(Equal(customer.Name))
})
It("should create a service for stan with the correct application label selector", func() {
Expect(stanService.Spec.Selector["application"]).To(Equal(application.Name))
})
It("should create a service for stan with the correct environment label selector", func() {
Expect(stanService.Spec.Selector["environment"]).To(Equal(input.Environment))
})
It("should create a service for stan with the correct infrastructure label selector", func() {
Expect(stanService.Spec.Selector["infrastructure"]).To(Equal("Stan"))
})
It("should create a service for stan without a microservice label selector", func() {
Expect(stanService.Spec.Selector["microservice"]).To(Equal(""))
})
It("should create a service for stan with the 'metrics' port exposed", func() {
Expect(stanService.Spec.Ports[0].Name).To(Equal("metrics"))
Expect(stanService.Spec.Ports[0].Port).To(Equal(int32(7777)))
Expect(stanService.Spec.Ports[0].TargetPort.StrVal).To(Equal("metrics"))
})
// STAN StatefulSet
It("should create a statefulset for stan named 'loismay-stan'", func() {
object := getCreatedObject(clientSet, "StatefulSet", "loismay-stan")
Expect(object).ToNot(BeNil())
stanStatefulSet = object.(*appsv1.StatefulSet)
})
It("should create a statefulset for stan with the correct ApiVersion", func() {
Expect(stanStatefulSet.APIVersion).To(Equal("apps/v1"))
})
It("should create a statefulset for stan with the correct Kind", func() {
Expect(stanStatefulSet.Kind).To(Equal("StatefulSet"))
})
It("should create a statefulset for stan with the correct tenant-id annotation", func() {
Expect(stanStatefulSet.Annotations["dolittle.io/tenant-id"]).To(Equal(customer.ID))
})
It("should create a statefulset for stan with the correct application-id annotation", func() {
Expect(stanStatefulSet.Annotations["dolittle.io/application-id"]).To(Equal(application.ID))
})
It("should create a statefulset for stan with the correct tenant label", func() {
Expect(stanStatefulSet.Labels["tenant"]).To(Equal(customer.Name))
})
It("should create a statefulset for stan with the correct application label", func() {
Expect(stanStatefulSet.Labels["application"]).To(Equal(application.Name))
})
It("should create a statefulset for stan with the correct environment label", func() {
Expect(stanStatefulSet.Labels["environment"]).To(Equal(input.Environment))
})
It("should create a statefulset for stan with the correct infrastructure label", func() {
Expect(stanStatefulSet.Labels["infrastructure"]).To(Equal("Stan"))
})
It("should create a statefulset for stan without a microservice label", func() {
Expect(stanStatefulSet.Labels["microservice"]).To(Equal(""))
})
It("should create a statefulset for stan with the correct tenant label selector", func() {
Expect(stanStatefulSet.Spec.Selector.MatchLabels["tenant"]).To(Equal(customer.Name))
})
It("should create a statefulset for stan with the correct application label selector", func() {
Expect(stanStatefulSet.Spec.Selector.MatchLabels["application"]).To(Equal(application.Name))
})
It("should create a statefulset for stan with the correct environment label selector", func() {
Expect(stanStatefulSet.Spec.Selector.MatchLabels["environment"]).To(Equal(input.Environment))
})
It("should create a statefulset for stan with the correct infrastructure label selector", func() {
Expect(stanStatefulSet.Spec.Selector.MatchLabels["infrastructure"]).To(Equal("Stan"))
})
It("should create a statefulset for stan without a microservice label selector", func() {
Expect(stanStatefulSet.Spec.Selector.MatchLabels["microservice"]).To(Equal(""))
})
It("should create a statefulset for stan with one replica", func() {
Expect(*stanStatefulSet.Spec.Replicas).To(Equal(int32(1)))
})
It("should create a pod template for stan with the correct tenant-id annotation", func() {
Expect(stanStatefulSet.Spec.Template.Annotations["dolittle.io/tenant-id"]).To(Equal(customer.ID))
})
It("should create a pod template for stan with the correct application-id annotation", func() {
Expect(stanStatefulSet.Spec.Template.Annotations["dolittle.io/application-id"]).To(Equal(application.ID))
})
It("should create a pod template for stan with the correct tenant label", func() {
Expect(stanStatefulSet.Spec.Template.Labels["tenant"]).To(Equal(customer.Name))
})
It("should create a pod template for stan with the correct application label", func() {
Expect(stanStatefulSet.Spec.Template.Labels["application"]).To(Equal(application.Name))
})
It("should create a pod template for stan with the correct environment label", func() {
Expect(stanStatefulSet.Spec.Template.Labels["environment"]).To(Equal(input.Environment))
})
It("should create a pod template for stan with the correct infrastructure label", func() {
Expect(stanStatefulSet.Spec.Template.Labels["infrastructure"]).To(Equal("Stan"))
})
It("should create a pod template for stan without a microservice label", func() {
Expect(stanStatefulSet.Spec.Template.Labels["microservice"]).To(Equal(""))
})
It("should create a pod template for stan with the stan config map as a volume", func() {
Expect(stanStatefulSet.Spec.Template.Spec.Volumes[0].Name).To(Equal("config-volume"))
Expect(stanStatefulSet.Spec.Template.Spec.Volumes[0].ConfigMap.Name).To(Equal("loismay-stan"))
})
It("should create a container for stan named 'stan'", func() {
Expect(stanStatefulSet.Spec.Template.Spec.Containers[0].Name).To(Equal("stan"))
})
It("should create a container for stan with the correct image", func() {
Expect(stanStatefulSet.Spec.Template.Spec.Containers[0].Image).To(Equal("nats-streaming:0.22.0"))
})
It("should create a container for stan with the correct arguments", func() {
Expect(stanStatefulSet.Spec.Template.Spec.Containers[0].Args[0]).To(Equal("-sc"))
Expect(stanStatefulSet.Spec.Template.Spec.Containers[0].Args[1]).To(Equal("/etc/stan-config/stan.conf"))
})
It("should create a container for stan with the 'monitor' port exposed", func() {
Expect(stanStatefulSet.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort).To(Equal(int32(8222)))
Expect(stanStatefulSet.Spec.Template.Spec.Containers[0].Ports[0].Name).To(Equal("monitor"))
})
It("should create a container for stan with the 'metrics' port exposed", func() {
Expect(stanStatefulSet.Spec.Template.Spec.Containers[0].Ports[1].ContainerPort).To(Equal(int32(7777)))
Expect(stanStatefulSet.Spec.Template.Spec.Containers[0].Ports[1].Name).To(Equal("metrics"))
})
It("should create a container for stan with the 'POD_NAME' environmental variable set", func() {
Expect(stanStatefulSet.Spec.Template.Spec.Containers[0].Env[0].Name).To(Equal("POD_NAME"))
Expect(stanStatefulSet.Spec.Template.Spec.Containers[0].Env[0].ValueFrom.FieldRef.FieldPath).To(Equal("metadata.name"))
})
It("should create a container for stan with the 'POD_NAMESPACE' environmental variable set", func() {
Expect(stanStatefulSet.Spec.Template.Spec.Containers[0].Env[1].Name).To(Equal("POD_NAMESPACE"))
Expect(stanStatefulSet.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath).To(Equal("metadata.namespace"))
})
It("should create a container for stan with '/etc/stan-config' mounted", func() {
Expect(stanStatefulSet.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath).To(Equal("/etc/stan-config"))
Expect(stanStatefulSet.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name).To(Equal("config-volume"))
})
// RawDataLogIngestor Ingress
It("should create an ingress for rawdatalogingestor named 'ernestbush'", func() {
object := getCreatedObject(clientSet, "Ingress", fmt.Sprintf("loismay-ernestbush-%s", customerTenantID[0:7]))
Expect(object).ToNot(BeNil())
rawDataLogIngestorIngress = object.(*netv1.Ingress)
})
It("should create an ingress for rawdatalogingestor with the production certmanager issuer", func() {
Expect(rawDataLogIngestorIngress.Annotations["cert-manager.io/cluster-issuer"]).To(Equal("letsencrypt-staging"))
})
It("should create an ingress for rawdatalogingestor with the correct tenant id", func() {
// TODO this is broken now, as we get a dynamic id
Expect(rawDataLogIngestorIngress.Annotations["nginx.ingress.kubernetes.io/configuration-snippet"]).To(Equal("proxy_set_header Tenant-ID \"f4679b71-1215-4a60-8483-53b0d5f2bb47\";\n"))
})
It("should create an ingress for rawdatalogingestor with one rule", func() {
Expect(len(rawDataLogIngestorIngress.Spec.Rules)).To(Equal(1))
})
It("should create an ingress for rawdatalogingestor with the correct rule host", func() {
Expect(rawDataLogIngestorIngress.Spec.Rules[0].Host).To(Equal("some-fancy.domain.name"))
})
It("should create an ingress for rawdatalogingestor with one rule path", func() {
Expect(len(rawDataLogIngestorIngress.Spec.Rules[0].HTTP.Paths)).To(Equal(1))
})
It("should create an ingress for rawdatalogingestor with the correct rule path", func() {
Expect(rawDataLogIngestorIngress.Spec.Rules[0].HTTP.Paths[0].Path).To(Equal("/api/not-webhooks-just-to-be-sure"))
})
It("should create an ingress for rawdatalogingestor with the correct rule pathtype", func() {
Expect(string(*rawDataLogIngestorIngress.Spec.Rules[0].HTTP.Paths[0].PathType)).To(Equal("SpecialTypeNotActuallySupported"))
})
It("should create an ingress for rawdatalogingestor with one TLS", func() {
Expect(len(rawDataLogIngestorIngress.Spec.TLS)).To(Equal(1))
})
It("should create an ingress for rawdatalogingestor with the correct TLS host", func() {
Expect(rawDataLogIngestorIngress.Spec.TLS[0].Hosts[0]).To(Equal("some-fancy.domain.name"))
})
It("should create an ingress for rawdatalogingestor with the correct TLS secret name", func() {
Expect(rawDataLogIngestorIngress.Spec.TLS[0].SecretName).To(Equal("some-fancy-certificate"))
})
// RawDataLogIngestor Deployment
It("should create a deployment for rawdatalog named 'loismay-ernestbush'", func() {
object := getCreatedObject(clientSet, "Deployment", "loismay-ernestbush")
Expect(object).ToNot(BeNil())
rawDataLogDeployment = object.(*appsv1.Deployment)
})
It("should create a deployment for raw data log with the correct Kind", func() {
Expect(rawDataLogDeployment.Kind).To(Equal("Deployment"))
})
It("should create a deployment for raw data log with the correct tenant-id annotation", func() {
Expect(rawDataLogDeployment.Annotations["dolittle.io/tenant-id"]).To(Equal(input.Dolittle.CustomerID))
})
It("should create a deployment for raw data log with the correct application-id annotation", func() {
Expect(rawDataLogDeployment.Annotations["dolittle.io/application-id"]).To(Equal(input.Dolittle.ApplicationID))
})
It("should create a deployment for raw data log with the correct microservice-id annotation", func() {
Expect(rawDataLogDeployment.Annotations["dolittle.io/microservice-id"]).To(Equal(input.Dolittle.MicroserviceID))
})
It("should create a deployment for raw data log with the correct microservice-kind annotation", func() {
Expect(rawDataLogDeployment.Annotations["dolittle.io/microservice-kind"]).To(Equal(string(input.Kind)))
})
It("should create a deployment for raw data log with the correct tenant label", func() {
Expect(rawDataLogDeployment.Labels["tenant"]).To(Equal(customer.Name))
})
It("should create a deployment for raw data log with the correct application label", func() {
Expect(rawDataLogDeployment.Labels["application"]).To(Equal(application.Name))
})
It("should create a deployment for raw data log with the correct environment label", func() {
Expect(rawDataLogDeployment.Labels["environment"]).To(Equal(input.Environment))
})
It("should create a deployment for raw data log with the correct microservice label", func() {
Expect(rawDataLogDeployment.Labels["microservice"]).To(Equal(input.Name))
})
})
})
})
func getCreatedObject(clientSet *fake.Clientset, kind, name string) runtime.Object {
for _, create := range getCreateActions(clientSet) {
object := create.GetObject()
if object.GetObjectKind().GroupVersionKind().Kind == kind {
switch resource := object.(type) {
case *corev1.ConfigMap:
if resource.GetName() == name {
return resource
}
case *corev1.Service:
if resource.GetName() == name {
return resource
}
case *appsv1.StatefulSet:
if resource.GetName() == name {
return resource
}
case *netv1.Ingress:
if resource.GetName() == name {
return resource
}
case *appsv1.Deployment:
if resource.GetName() == name {
return resource
}
}
}
}
return nil
}
func getCreateActions(clientSet *fake.Clientset) []testing.CreateAction {
var actions []testing.CreateAction
for _, action := range clientSet.Actions() {
if create, ok := action.(testing.CreateAction); ok {
actions = append(actions, create)
}
}
return actions
}
|
/*
Copyright 2021 The KodeRover Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"log"
"os"
"os/signal"
"syscall"
"github.com/koderover/zadig/lib/microservice/cron/server"
)
func main() {
stopCh := make(chan struct{})
signals := make(chan os.Signal, 1)
defer close(signals)
signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT)
go func() {
<-signals
signal.Stop(signals)
close(stopCh)
}()
if err := server.Serve(stopCh); err != nil {
log.Fatal(err)
}
}
|
package connector
import (
"common"
// "logger"
)
const PLAYERLEVEL_MAX = 200
// //return (*cfg)[0].Deco
// return reflect.ValueOf(&(*cfg)[0]).Elem().FieldByName(fmt.Sprintf("Deco%d", level)).Interface().(uint32)
// case rpc.BuildingId_Bomb:
func GetGlobalCfg(key string) uint32 {
return common.GetGlobalConfig(key)
}
func GetTaskCfg(id string) *TaskCfg {
cfg, exist := gTaskCfg[id]
if !exist {
return nil
}
return &(*cfg)[0]
}
func GetUplevelCfg(id uint32) *UplevelCfg {
cfg, exist := gUplevelCfg[id]
if !exist {
return nil
}
return &(*cfg)[0]
}
func CheckItemId(id string) bool {
if _, ok := gItemCfg[id]; ok {
return true
}
return false
}
func GetItemCfg(id string) *ItemCfg {
cfg, exist := gItemCfg[id]
if !exist {
return nil
}
return &(*cfg)[0]
}
func GetAllItemIds() []string {
ids := []string{}
for id, _ := range gItemCfg {
ids = append(ids, id)
}
return ids
}
|
package BuddySimulator
import (
"sort"
"fmt"
)
var nextPid int = 0
type Process struct {
pid int
memoryUsage uint
memoryBlock *Block
}
func NewProcess(memoryUsage uint) *Process {
process := &Process{nextPid, memoryUsage, nil}
nextPid++
return process
}
func (p Process)GetPid() int {
return p.pid
}
func (p Process)GetBlock() Block {
return *p.memoryBlock
}
func (p *Process)extractBlock() *Block {
tmp := p.memoryBlock
p.memoryBlock = nil
return tmp
}
func (p Process)String() string {
res := "{pid:" + fmt.Sprint(p.GetPid()) +", "
res += "memoryUsage:" + fmt.Sprint(p.memoryUsage) +", "
res += fmt.Sprintf("%+v",p.GetBlock()) + "}"
return res
}
type processList []*Process
func (p processList)String() string {
res := "["
for i := 0; i < p.Len(); i++ {
res += fmt.Sprint(*p[i])
if i != p.Len() - 1 {
res += ", "
}
}
res += "]"
return res
}
func (p processList)Len() int {
return len(p)
}
func (p processList)Less(i, j int) bool {
return p[i].GetPid() < p[j].GetPid()
}
func (p processList)Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
type OS struct {
memory *Memory
pList processList
}
func NewOs(memory uint) (*OS, error) {
var err error
os := &OS{}
os.memory, err = NewMemory(memory)
if err != nil {
return nil, err
}
os.pList = make([]*Process, 0)
return os, nil
}
func (os *OS)AllocNewProcess(size uint) (int, error) {
block, err := os.memory.AllocateMemory(size)
if err != nil {
return -1, err
}
process := NewProcess(size)
process.memoryBlock = block
os.pList = append(os.pList, process)
sort.Sort(os.pList)
return process.GetPid(), nil
}
func (os *OS)DeallocProcess(pid int) error {
index := sort.Search(os.pList.Len(), func(n int) bool {
if os.pList[n].GetPid() < pid {
return false
} else {
return true
}
})
if index == os.pList.Len() || os.pList[index].GetPid() != pid {
return fmt.Errorf("pid %d is not pressent in OS", pid)
}
proc := os.pList[index]
os.pList = append(os.pList[:index], os.pList[index + 1:]...)
sort.Sort(os.pList)
block := proc.extractBlock()
err := os.memory.FreeBlock(block)
if err != nil {
return err
}
return nil
}
func (os *OS)PrintState() {
freeBlocks := make([]Block, 0)
for i := 0; i < len(os.memory.freeLists); i++ {
for j := 0; j < len(os.memory.freeLists[i]); j++ {
freeBlocks = append(freeBlocks, *os.memory.freeLists[i][j])
}
}
fmt.Printf("freeBlocks:%+v, processes:%+v\n",freeBlocks,os.pList)
}
|
// Copyright 2015-2016 Cocoon Labs Ltd.
//
// See LICENSE file for terms and conditions.
package libflac
import (
"io"
"os"
"testing"
"github.com/cocoonlife/testify/assert"
)
func TestDecode(t *testing.T) {
a := assert.New(t)
d, err := NewDecoder("testdata/nonexistent.flac")
a.Equal(d, (*Decoder)(nil), "decoder is nil")
a.NotNil(err, "err is not nil")
d, err = NewDecoder("testdata/sine24-00.flac")
a.Equal(err, nil, "err is nil")
a.Equal(d.Channels, 1, "channels is 1")
a.Equal(d.Depth, 24, "depth is 24")
a.Equal(d.Rate, 48000, "depth is 48000")
samples := 0
f, err := d.ReadFrame()
a.Equal(err, nil, "err is nil")
a.Equal(f.Channels, 1, "channels is 1")
a.Equal(f.Depth, 24, "depth is 24")
a.Equal(f.Rate, 48000, "depth is 48000")
samples = samples + len(f.Buffer)
for {
f, err := d.ReadFrame()
if err == nil || err == io.EOF {
if f != nil {
samples = samples + len(f.Buffer)
}
} else {
a.Equal(err, nil, "error reported")
break
}
if err == io.EOF {
break
}
}
a.Equal(samples, 200000, "all samples read")
d.Close()
}
func TestDecodeReader(t *testing.T) {
a := assert.New(t)
reader, _ := os.Open("testdata/nonexistent.flac")
d, err := NewDecoderReader(reader)
a.Equal(d, (*Decoder)(nil), "decoder is nil")
a.Error(err)
reader, _ = os.Open("testdata/sine24-00.flac")
d, err = NewDecoderReader(reader)
a.Equal(err, nil, "err is nil")
a.Equal(d.Channels, 1, "channels is 1")
a.Equal(d.Depth, 24, "depth is 24")
a.Equal(d.Rate, 48000, "depth is 48000")
samples := 0
f, err := d.ReadFrame()
a.Equal(err, nil, "err is nil")
a.Equal(f.Channels, 1, "channels is 1")
a.Equal(f.Depth, 24, "depth is 24")
a.Equal(f.Rate, 48000, "depth is 48000")
samples = samples + len(f.Buffer)
for {
f, err := d.ReadFrame()
if err == nil || err == io.EOF {
if f != nil {
samples = samples + len(f.Buffer)
}
} else {
a.Equal(err, nil, "error reported")
break
}
if err == io.EOF {
break
}
}
a.Equal(samples, 200000, "all samples read")
d.Close()
}
func TestEncode(t *testing.T) {
a := assert.New(t)
e, err := NewEncoder("not-existdir/foo.flac", 2, 24, 48000)
a.Equal(e, (*Encoder)(nil), "encoder is nil")
a.NotNil(err, "err is not nil")
fileName := "testdata/test.flac"
e, err = NewEncoder(fileName, 2, 24, 48000)
a.Equal(err, nil, "err is nil")
f := Frame{Channels: 1, Depth: 24, Rate: 48000}
err = e.WriteFrame(f)
a.Error(err, "channels mismatch")
f.Channels = 2
f.Buffer = make([]int32, 2*100)
err = e.WriteFrame(f)
a.Equal(err, nil, "frame encoded")
e.Close()
os.Remove(fileName)
}
func TestRoundTrip(t *testing.T) {
a := assert.New(t)
inputFile := "testdata/sine24-00.flac"
outputFile := "testdata/test.flac"
d, err := NewDecoder(inputFile)
a.Equal(err, nil, "err is nil")
e, err := NewEncoder(outputFile, d.Channels, d.Depth, d.Rate)
samples := 0
for {
f, err := d.ReadFrame()
if err == nil || err == io.EOF {
if f != nil {
_ = e.WriteFrame(*f)
samples = samples + len(f.Buffer)
}
} else {
a.Equal(err, nil, "error reported")
break
}
if err == io.EOF {
break
}
}
a.Equal(samples, 200000, "all samples read")
d.Close()
e.Close()
os.Remove(outputFile)
}
func TestRoundTripStereo(t *testing.T) {
a := assert.New(t)
inputFile := "testdata/sine16-12.flac"
outputFile := "testdata/test.flac"
d, err := NewDecoder(inputFile)
a.Equal(err, nil, "err is nil")
e, err := NewEncoder(outputFile, d.Channels, d.Depth, d.Rate)
samples := 0
for {
f, err := d.ReadFrame()
if err == nil || err == io.EOF {
if f != nil {
_ = e.WriteFrame(*f)
samples = samples + len(f.Buffer)
}
} else {
a.Equal(err, nil, "error reported")
break
}
if err == io.EOF {
break
}
}
a.Equal(samples, 400000, "all samples read")
d.Close()
e.Close()
os.Remove(outputFile)
}
func TestRoundTripReaderWriter(t *testing.T) {
a := assert.New(t)
inputFile := "testdata/sine24-00.flac"
outputFile := "testdata/test.flac"
reader, _ := os.Open(inputFile)
d, err := NewDecoderReader(reader)
a.Equal(err, nil, "err is nil")
writer, _ := os.Create(outputFile)
e, err := NewEncoderWriter(writer, d.Channels, d.Depth, d.Rate)
samples := 0
for {
f, err := d.ReadFrame()
if err == nil || err == io.EOF {
if f != nil {
_ = e.WriteFrame(*f)
samples = samples + len(f.Buffer)
}
} else {
a.Equal(err, nil, "error reported")
break
}
if err == io.EOF {
break
}
}
a.Equal(samples, 200000, "all samples read")
d.Close()
e.Close()
os.Remove(outputFile)
}
|
package handlers
import (
"net/http"
"github.com/abhinavdwivedi440/microservices/data"
)
// swagger:route GET /products products listProducts
// Returns a list of products
// responses:
// 200: productsResponse
// GetProducts returns the products from the data store
func (p *Product) GetProducts(w http.ResponseWriter, r *http.Request) {
p.l.Println("Handle GET")
w.Header().Set("Content-Type", "application/json")
// fetch the products from the datastore
prodList := data.GetProducts()
// serialize the list to JSON
err := prodList.ToJSON(w)
if err != nil {
http.Error(w, "Unable to encode JSON", http.StatusInternalServerError)
return
}
}
|
// SPDX-License-Identifier: MIT
// apidoc 是一个 RESTful API 文档生成工具
//
// 大致的使用方法为:
//
// apidoc cmd [args]
//
// 其中的 cmd 为子命令,args 代码传递给该子命令的参数。
// 可以使用 help 查看每个子命令的具体说明:
//
// apidoc help [cmd]
package main
import (
"fmt"
"os"
"github.com/issue9/localeutil"
"golang.org/x/text/language"
"github.com/caixw/apidoc/v7"
"github.com/caixw/apidoc/v7/internal/cmd"
"github.com/caixw/apidoc/v7/internal/locale"
)
func main() {
tag, err := localeutil.DetectUserLanguageTag()
if err != nil { // 无法获取系统语言,则采用默认值
fmt.Fprintln(os.Stderr, err, tag)
tag = language.MustParse(locale.DefaultLocaleID)
}
apidoc.SetLocale(tag)
if err := cmd.Init(os.Stdout).Exec(os.Args[1:]); err != nil {
if _, err := fmt.Fprintln(os.Stderr, err); err != nil {
panic(err)
}
os.Exit(2)
}
}
|
package undocker
import (
"encoding/json"
"io"
"github.com/pkg/errors"
"github.com/pepabo/undocker/internal/untar"
)
type Source interface {
Config(repository, tag string) ([]byte, error)
Exists(repository, tag string) bool
LayerBlobs(repository, tag string) ([]io.Reader, error)
Image(repository, tag string) Image
CleanUp() error
}
type Image struct {
Source Source
Repository string
Tag string
}
// Extract extracts docker image as rootfs to the specified directory
func (i Image) Extract(dir string, overwriteSymlink bool) error {
if !i.Exists() {
return errors.New("Image not found")
}
layerBlobs, err := i.LayerBlobs()
if err != nil {
return err
}
for _, blob := range layerBlobs {
err = untar.Untar(blob, dir, untar.Options{
OverwriteSymlinkRefs: overwriteSymlink,
})
if err != nil {
return err
}
}
return nil
}
// Unpack is an alias for Extract()
func (i Image) Unpack(dir string, overwriteSymlink bool) error {
return i.Extract(dir, overwriteSymlink)
}
func (i Image) Config() (*ImageConfig, error) {
bytes, err := i.Source.Config(i.Repository, i.Tag)
if err != nil {
return nil, err
}
config := new(ImageConfig)
if err := json.Unmarshal(bytes, config); err != nil {
return nil, err
}
return config, nil
}
// Exists check the images
func (i Image) Exists() bool {
if i.Source.Exists(i.Repository, i.Tag) {
return true
}
return false
}
// LayerBlobs return the layers of the image in order from the lower
func (i Image) LayerBlobs() ([]io.Reader, error) {
return i.Source.LayerBlobs(i.Repository, i.Tag)
}
|
package log
import (
"fmt"
"io"
stdlib_log "log"
"os"
log_api "github.com/cyberark/secretless-broker/pkg/secretless/log"
)
var defaultOutputBuffer = os.Stdout
// Logger is the main logging object that can be used to log messages to stdout
// or any other io.Writer. Delegates to `log.Logger` for writing to the buffer.
type Logger struct {
BackingLogger *stdlib_log.Logger
IsDebug bool
prefix string
}
// severity is an integer representation of the severity level associated with
// a logging message.
type severity uint8
const (
// DebugSeverity indicates a debug logging message
DebugSeverity severity = iota
// InfoSeverity indicates an informational logging message
InfoSeverity
// WarnSeverity indicates a warning logging message
WarnSeverity
// ErrorSeverity indicates a critical severity logging message
ErrorSeverity
// PanicSeverity indicates a severity logging message that is unliekly to be
// recovered from
PanicSeverity
)
// severityLevels is a mapping of all available severity levels to their printed
// values.
var severityLevels = map[severity]string{
DebugSeverity: "DEBUG",
InfoSeverity: "INFO",
WarnSeverity: "WARN",
ErrorSeverity: "ERROR",
PanicSeverity: "PANIC",
}
// New method instantiates a new logger that we can write things to.
func New(isDebug bool) log_api.Logger {
return NewWithOptions(defaultOutputBuffer, "", isDebug)
}
// NewForService method instantiates a new logger that includes information about
// the service itself.
func NewForService(serviceName string, isDebug bool) log_api.Logger {
return NewWithOptions(defaultOutputBuffer, serviceName, isDebug)
}
// NewWithOptions method instantiates a new logger with all configurable options.
// This specific constructor is not intended to be used directly by clients.
func NewWithOptions(outputBuffer io.Writer, prefix string, isDebug bool) log_api.Logger {
return &Logger{
BackingLogger: stdlib_log.New(outputBuffer, "", stdlib_log.LstdFlags),
IsDebug: isDebug,
prefix: prefix,
}
}
func (logger *Logger) shouldPrint(severityLevel severity) bool {
return logger.IsDebug || (severityLevel != DebugSeverity)
}
func prependString(prependString string, args ...interface{}) []interface{} {
prependSlice := []interface{}{prependString}
return append(prependSlice, args...)
}
// DebugEnabled returns if the debug logging should be displayed for a particular
// logger instance
func (logger *Logger) DebugEnabled() bool {
return logger.IsDebug
}
// CopyWith creates a copy of the logger with the prefix and debug values
// overridden by the arguments.
func (logger *Logger) CopyWith(prefix string, isDebug bool) log_api.Logger {
return NewWithOptions(
logger.BackingLogger.Writer(),
prefix,
isDebug,
)
}
// Prefix returns the prefix that will be prepended to all output messages
func (logger *Logger) Prefix() string {
return logger.prefix
}
func severityPrefix(sev severity) string {
return fmt.Sprintf("%-7s", "["+severityLevels[sev]+"]")
}
// ---------------------------
// Main logging methods that funnel all the info here
func (logger *Logger) logf(sev severity, format string, args ...interface{}) {
if !logger.shouldPrint(sev) {
return
}
if logger.prefix != "" {
format = "%s: " + format
args = prependString(logger.prefix, args...)
}
format = "%s " + format
args = prependString(severityPrefix(sev), args...)
logger.BackingLogger.Printf(format, args...)
}
func (logger *Logger) logln(sev severity, args ...interface{}) {
if !logger.shouldPrint(sev) {
return
}
if logger.prefix != "" {
args = prependString(logger.prefix+":", args...)
}
args = prependString(severityPrefix(sev), args...)
logger.BackingLogger.Println(args...)
}
func (logger *Logger) log(sev severity, args ...interface{}) {
logger.logln(sev, args...)
}
// TODO: This duplication is quite hideous, and should be cleaned up by
// delegating everything to stdlib logger in a more straightforward way.
func (logger *Logger) panicf(sev severity, format string, args ...interface{}) {
if !logger.shouldPrint(sev) {
return
}
if logger.prefix != "" {
format = "%s: " + format
args = prependString(logger.prefix, args...)
}
format = "%s " + format
args = prependString(severityPrefix(sev), args...)
logger.BackingLogger.Panicf(format, args...)
}
func (logger *Logger) panicln(sev severity, args ...interface{}) {
if !logger.shouldPrint(sev) {
return
}
if logger.prefix != "" {
args = prependString(logger.prefix+":", args...)
}
args = prependString(severityPrefix(sev), args...)
logger.BackingLogger.Panicln(args...)
}
func (logger *Logger) panic(sev severity, args ...interface{}) {
logger.panicln(sev, args...)
}
// ---------------------------
// Specific API implementation
// Debugf prints to stdout a formatted debug-level logging message
func (logger *Logger) Debugf(format string, args ...interface{}) {
logger.logf(DebugSeverity, format, args...)
}
// Infof prints to stdout a formatted info-level logging message
func (logger *Logger) Infof(format string, args ...interface{}) {
logger.logf(InfoSeverity, format, args...)
}
// Warnf prints to stdout a formatted warning-level logging message
func (logger *Logger) Warnf(format string, args ...interface{}) {
logger.logf(WarnSeverity, format, args...)
}
// Errorf prints to stdout a formatted error-level logging message
func (logger *Logger) Errorf(format string, args ...interface{}) {
logger.logf(ErrorSeverity, format, args...)
}
// Panicf prints to stdout a formatted panic-level logging message
func (logger *Logger) Panicf(format string, args ...interface{}) {
logger.panicf(PanicSeverity, format, args...)
}
// Debugln prints to stdout a debug-level logging message
func (logger *Logger) Debugln(args ...interface{}) {
logger.logln(DebugSeverity, args...)
}
// Infoln prints to stdout a info-level logging message
func (logger *Logger) Infoln(args ...interface{}) {
logger.logln(InfoSeverity, args...)
}
// Warnln prints to stdout a warning-level logging message
func (logger *Logger) Warnln(args ...interface{}) {
logger.logln(WarnSeverity, args...)
}
// Errorln prints to stdout a error-level logging message
func (logger *Logger) Errorln(args ...interface{}) {
logger.logln(ErrorSeverity, args...)
}
// Panicln prints to stdout a panic-level logging message
func (logger *Logger) Panicln(args ...interface{}) {
logger.panicln(PanicSeverity, args...)
}
// Debug prints to stdout a debug-level logging message. Alias of
// Debugln method.
func (logger *Logger) Debug(args ...interface{}) {
logger.log(DebugSeverity, args...)
}
// Info prints to stdout a info-level logging message. Alias of
// Infoln method.
func (logger *Logger) Info(args ...interface{}) {
logger.log(InfoSeverity, args...)
}
// Warn prints to stdout a warning-level logging message. Alias of
// Warnln method.
func (logger *Logger) Warn(args ...interface{}) {
logger.log(WarnSeverity, args...)
}
// Error prints to stdout a error-level logging message. Alias of
// Errorn method.
func (logger *Logger) Error(args ...interface{}) {
logger.log(ErrorSeverity, args...)
}
// Panic prints to stdout a panic-level logging message. Alias of
// Panicln method.
func (logger *Logger) Panic(args ...interface{}) {
logger.panic(PanicSeverity, args...)
}
|
package html5_test
import (
. "github.com/bytesparadise/libasciidoc/testsupport"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("ordered lists", func() {
It("with implicit numbering style on a single line", func() {
source := `. item on a single line`
expected := `<div class="olist arabic">
<ol class="arabic">
<li>
<p>item on a single line</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("with implicit numbering style on multiple lines with leading tabs", func() {
// leading tabs should be trimmed
source := `. item
on
multiple
lines
`
expected := `<div class="olist arabic">
<ol class="arabic">
<li>
<p>item
on
multiple
lines</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("with title and role", func() {
source := `.title
[#myid]
[.myrole]
. item 1`
expected := `<div id="myid" class="olist arabic myrole">
<div class="title">title</div>
<ol class="arabic">
<li>
<p>item 1</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("with explicit start only", func() {
source := `[start=5]
. item`
expected := `<div class="olist arabic">
<ol class="arabic" start="5">
<li>
<p>item</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("with explicit quoted numbering and start", func() {
source := `["lowerroman", start="5"]
. item`
expected := `<div class="olist lowerroman">
<ol class="lowerroman" type="i" start="5">
<li>
<p>item</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("item reversed with explicit quoted numbering and start", func() {
source := `[lowerroman%reversed, start="5"]
. item 1
. item 2`
expected := `<div class="olist lowerroman">
<ol class="lowerroman" type="i" start="5" reversed>
<li>
<p>item 1</p>
</li>
<li>
<p>item 2</p>
</li>
</ol>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
Context("with list element continuation", func() {
It("case 1", func() {
source := `. item 1
+
foo`
expected := `<div class="olist arabic">
<ol class="arabic">
<li>
<p>item 1</p>
<div class="paragraph">
<p>foo</p>
</div>
</li>
</ol>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("case 2", func() {
source := `. item 1
+
----
foo
----`
expected := `<div class="olist arabic">
<ol class="arabic">
<li>
<p>item 1</p>
<div class="listingblock">
<div class="content">
<pre>foo</pre>
</div>
</div>
</li>
</ol>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("case 3", func() {
source := `. cookie
+
image::cookie.png[]
+
. chocolate
+
image::chocolate.png[]`
expected := `<div class="olist arabic">
<ol class="arabic">
<li>
<p>cookie</p>
<div class="imageblock">
<div class="content">
<img src="cookie.png" alt="cookie">
</div>
</div>
</li>
<li>
<p>chocolate</p>
<div class="imageblock">
<div class="content">
<img src="chocolate.png" alt="chocolate">
</div>
</div>
</li>
</ol>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("case 4", func() {
source := `. In the table, enter the data shown in <<non-uniform-mesh>>
+
[#non-uniform-mesh]
.Non-Uniform Mesh Parameters
[cols="3*^",options="header"]
|===
|Dir (X,Y,Z) |Num Cells |Size
|X |10 |0.1
|Y |10 |0.1
|Y |5 |0.2
|Z |10 |0.1
|===
+
. Click *OK*`
expected := `<div class="olist arabic">
<ol class="arabic">
<li>
<p>In the table, enter the data shown in <a href="#non-uniform-mesh">Non-Uniform Mesh Parameters</a></p>
<table id="non-uniform-mesh" class="tableblock frame-all grid-all stretch">
<caption class="title">Table 1. Non-Uniform Mesh Parameters</caption>
<colgroup>
<col style="width: 33.3333%;">
<col style="width: 33.3333%;">
<col style="width: 33.3334%;">
</colgroup>
<thead>
<tr>
<th class="tableblock halign-center valign-top">Dir (X,Y,Z)</th>
<th class="tableblock halign-center valign-top">Num Cells</th>
<th class="tableblock halign-center valign-top">Size</th>
</tr>
</thead>
<tbody>
<tr>
<td class="tableblock halign-center valign-top"><p class="tableblock">X</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">10</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">0.1</p></td>
</tr>
<tr>
<td class="tableblock halign-center valign-top"><p class="tableblock">Y</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">10</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">0.1</p></td>
</tr>
<tr>
<td class="tableblock halign-center valign-top"><p class="tableblock">Y</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">5</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">0.2</p></td>
</tr>
<tr>
<td class="tableblock halign-center valign-top"><p class="tableblock">Z</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">10</p></td>
<td class="tableblock halign-center valign-top"><p class="tableblock">0.1</p></td>
</tr>
</tbody>
</table>
</li>
<li>
<p>Click <strong>OK</strong></p>
</li>
</ol>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
})
It("with unnumbered items", func() {
source := `. item 1
.. item 1.1
... item 1.1.1
... item 1.1.2
.. item 1.2
. item 2
.. item 2.1`
expected := `<div class="olist arabic">
<ol class="arabic">
<li>
<p>item 1</p>
<div class="olist loweralpha">
<ol class="loweralpha" type="a">
<li>
<p>item 1.1</p>
<div class="olist lowerroman">
<ol class="lowerroman" type="i">
<li>
<p>item 1.1.1</p>
</li>
<li>
<p>item 1.1.2</p>
</li>
</ol>
</div>
</li>
<li>
<p>item 1.2</p>
</li>
</ol>
</div>
</li>
<li>
<p>item 2</p>
<div class="olist loweralpha">
<ol class="loweralpha" type="a">
<li>
<p>item 2.1</p>
</li>
</ol>
</div>
</li>
</ol>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("mixed with unordered list - simple case", func() {
source := `. Linux
* Fedora
* Ubuntu
* Slackware
. BSD
* FreeBSD
* NetBSD`
expected := `<div class="olist arabic">
<ol class="arabic">
<li>
<p>Linux</p>
<div class="ulist">
<ul>
<li>
<p>Fedora</p>
</li>
<li>
<p>Ubuntu</p>
</li>
<li>
<p>Slackware</p>
</li>
</ul>
</div>
</li>
<li>
<p>BSD</p>
<div class="ulist">
<ul>
<li>
<p>FreeBSD</p>
</li>
<li>
<p>NetBSD</p>
</li>
</ul>
</div>
</li>
</ol>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("mixed with unordered list - complex case", func() {
source := `- unordered 1
1. ordered 1.1
a. ordered 1.1.a
b. ordered 1.1.b
c. ordered 1.1.c
2. ordered 1.2
i) ordered 1.2.i
ii) ordered 1.2.ii
3. ordered 1.3
4. ordered 1.4
- unordered 2
* unordered 2.1
** unordered 2.1.1
with some
extra lines.
** unordered 2.1.2
* unordered 2.2
- unordered 3
. ordered 3.1
. ordered 3.2
[upperroman]
.. ordered 3.2.I
.. ordered 3.2.II
. ordered 3.3`
expected := `<div class="ulist">
<ul>
<li>
<p>unordered 1</p>
<div class="olist arabic">
<ol class="arabic">
<li>
<p>ordered 1.1</p>
<div class="olist loweralpha">
<ol class="loweralpha" type="a">
<li>
<p>ordered 1.1.a</p>
</li>
<li>
<p>ordered 1.1.b</p>
</li>
<li>
<p>ordered 1.1.c</p>
</li>
</ol>
</div>
</li>
<li>
<p>ordered 1.2</p>
<div class="olist lowerroman">
<ol class="lowerroman" type="i">
<li>
<p>ordered 1.2.i</p>
</li>
<li>
<p>ordered 1.2.ii</p>
</li>
</ol>
</div>
</li>
<li>
<p>ordered 1.3</p>
</li>
<li>
<p>ordered 1.4</p>
</li>
</ol>
</div>
</li>
<li>
<p>unordered 2</p>
<div class="ulist">
<ul>
<li>
<p>unordered 2.1</p>
<div class="ulist">
<ul>
<li>
<p>unordered 2.1.1
with some
extra lines.</p>
</li>
<li>
<p>unordered 2.1.2</p>
</li>
</ul>
</div>
</li>
<li>
<p>unordered 2.2</p>
</li>
</ul>
</div>
</li>
<li>
<p>unordered 3</p>
<div class="olist arabic">
<ol class="arabic">
<li>
<p>ordered 3.1</p>
</li>
<li>
<p>ordered 3.2</p>
<div class="olist upperroman">
<ol class="upperroman" type="I">
<li>
<p>ordered 3.2.I</p>
</li>
<li>
<p>ordered 3.2.II</p>
</li>
</ol>
</div>
</li>
<li>
<p>ordered 3.3</p>
</li>
</ol>
</div>
</li>
</ul>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("all kinds of lists - complex case 3", func() {
source := `* foo
1. bar
a. foo
2. baz
* foo2
- bar2`
expected := `<div class="ulist">
<ul>
<li>
<p>foo</p>
<div class="olist arabic">
<ol class="arabic">
<li>
<p>bar</p>
<div class="olist loweralpha">
<ol class="loweralpha" type="a">
<li>
<p>foo</p>
</li>
</ol>
</div>
</li>
<li>
<p>baz</p>
</li>
</ol>
</div>
</li>
<li>
<p>foo2</p>
<div class="ulist">
<ul>
<li>
<p>bar2</p>
</li>
</ul>
</div>
</li>
</ul>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("drop principal text in list item", func() {
source := `. {blank}
+
----
print("one")
----
. {blank}
+
----
print("one")
----`
expected := `<div class="olist arabic">
<ol class="arabic">
<li>
<p></p>
<div class="listingblock">
<div class="content">
<pre>print("one")</pre>
</div>
</div>
</li>
<li>
<p></p>
<div class="listingblock">
<div class="content">
<pre>print("one")</pre>
</div>
</div>
</li>
</ol>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
Context("attach to ordered list item ancestor", func() {
It("attach to grandparent ordered list item", func() {
source := `. grandparent list item
.. parent list item
... child list item
+
paragraph attached to grandparent list item`
expected := `<div class="olist arabic">
<ol class="arabic">
<li>
<p>grandparent list item</p>
<div class="olist loweralpha">
<ol class="loweralpha" type="a">
<li>
<p>parent list item</p>
<div class="olist lowerroman">
<ol class="lowerroman" type="i">
<li>
<p>child list item</p>
</li>
</ol>
</div>
</li>
</ol>
</div>
<div class="paragraph">
<p>paragraph attached to grandparent list item</p>
</div>
</li>
</ol>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("attach to parent ordered list item", func() {
source := `. grandparent list item
.. parent list item
... child list item
+
paragraph attached to parent list item`
expected := `<div class="olist arabic">
<ol class="arabic">
<li>
<p>grandparent list item</p>
<div class="olist loweralpha">
<ol class="loweralpha" type="a">
<li>
<p>parent list item</p>
<div class="olist lowerroman">
<ol class="lowerroman" type="i">
<li>
<p>child list item</p>
</li>
</ol>
</div>
<div class="paragraph">
<p>paragraph attached to parent list item</p>
</div>
</li>
</ol>
</div>
</li>
</ol>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
It("attach to child ordered list item", func() {
source := `. grandparent list item
.. parent list item
... child list item
+
paragraph attached to child list item`
expected := `<div class="olist arabic">
<ol class="arabic">
<li>
<p>grandparent list item</p>
<div class="olist loweralpha">
<ol class="loweralpha" type="a">
<li>
<p>parent list item</p>
<div class="olist lowerroman">
<ol class="lowerroman" type="i">
<li>
<p>child list item</p>
<div class="paragraph">
<p>paragraph attached to child list item</p>
</div>
</li>
</ol>
</div>
</li>
</ol>
</div>
</li>
</ol>
</div>
`
Expect(RenderHTML(source)).To(MatchHTML(expected))
})
})
})
|
// Copyright (c) 2011 Mateusz Czapliński (Go port)
// Copyright (c) 2011 Mahir Iqbal (as3 version)
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// based on http://code.google.com/p/as3polyclip/ (MIT licensed)
// and code by Martínez et al: http://wwwdi.ujaen.es/~fmartin/bool_op.html (public domain)
package polyclip
import (
"fmt"
"math"
)
//func _DBG(f func()) { f() }
func _DBG(f func()) {}
type polygonType int
const (
_SUBJECT polygonType = iota
_CLIPPING
)
type edgeType int
const (
_EDGE_NORMAL edgeType = iota
_EDGE_NON_CONTRIBUTING
_EDGE_SAME_TRANSITION
_EDGE_DIFFERENT_TRANSITION
)
// This class contains methods for computing clipping operations on polygons.
// It implements the algorithm for polygon intersection given by Francisco Martínez del Río.
// See http://wwwdi.ujaen.es/~fmartin/bool_op.html
type clipper struct {
subject, clipping Polygon
eventQueue
}
func (c *clipper) compute(operation Op) Polygon {
// Test 1 for trivial result case
if len(c.subject)*len(c.clipping) == 0 {
switch operation {
case DIFFERENCE:
return c.subject.Clone()
case UNION:
if len(c.subject) == 0 {
return c.clipping.Clone()
}
return c.subject.Clone()
}
return Polygon{}
}
// Test 2 for trivial result case
subjectbb := c.subject.BoundingBox()
clippingbb := c.clipping.BoundingBox()
if !subjectbb.Overlaps(clippingbb) {
switch operation {
case DIFFERENCE:
return c.subject.Clone()
case UNION:
result := c.subject.Clone()
for _, cont := range c.clipping {
result.Add(cont.Clone())
}
return result
}
return Polygon{}
}
numSegments := 0
// Add each segment to the eventQueue, sorted from left to right.
numSegments += addPolygonToQueue(&c.eventQueue, c.subject, _SUBJECT)
numSegments += addPolygonToQueue(&c.eventQueue, c.clipping, _CLIPPING)
connector := connector{} // to connect the edge solutions
// This is the sweepline. That is, we go through all the polygon edges
// by sweeping from left to right.
S := sweepline{}
MINMAX_X := math.Min(subjectbb.Max.X, clippingbb.Max.X)
_DBG(func() {
e := c.eventQueue.dequeue()
c.eventQueue.enqueue(e)
fmt.Print("\nInitial queue:\n")
for i, e := range c.eventQueue.elements {
fmt.Println(i, "=", *e)
}
})
i := 0
// From the manuscript, the cycle is executed
// n + 4k times, where n is the number of segments and k is the number of
// intersections. I believe the maximum k would be about n^2.
maxPossibleEvents := numSegments + 4*numSegments*numSegments
for !c.eventQueue.IsEmpty() {
if i > maxPossibleEvents {
}
i++
var prev, next *endpoint
e := c.eventQueue.dequeue()
_DBG(func() { fmt.Printf("\nProcess event: (%d of %d)\n%v\n", i, len(c.eventQueue.elements)+1, *e) })
// optimization 1
switch {
case operation == INTERSECTION && e.p.X > MINMAX_X:
fallthrough
case operation == DIFFERENCE && e.p.X > subjectbb.Max.X:
return connector.toPolygon()
//case operation == UNION && e.p.X > MINMAX_X:
// _DBG(func() { fmt.Print("\nUNION optimization, fast quit\n") })
// // add all the non-processed line segments to the result
// if !e.left {
// connector.add(e.segment())
// }
//
// for !c.eventQueue.IsEmpty() {
// e = c.eventQueue.dequeue()
// if !e.left {
// connector.add(e.segment())
// }
// }
// return connector.toPolygon()
}
if e.left { // the line segment must be inserted into S
pos := S.insert(e)
//e.PosInS = pos
prev = nil
if pos > 0 {
prev = S[pos-1]
}
next = nil
if pos < len(S)-1 {
next = S[pos+1]
}
// Compute the inside and inOut flags
switch {
case prev == nil: // there is not a previous line segment in S?
e.inside, e.inout = false, false
case prev.edgeType != _EDGE_NORMAL:
if pos-2 < 0 { // e overlaps with prev
// Not sure how to handle the case when pos - 2 < 0, but judging
// from the C++ implementation this looks like how it should be handled.
e.inside, e.inout = false, false
if prev.polygonType != e.polygonType { // [MC: where does this come from?]
e.inside = true
} else {
e.inout = true
}
} else { // the previous two line segments in S are overlapping line segments
prevTwo := S[pos-2]
if prev.polygonType == e.polygonType {
e.inout = !prev.inout
e.inside = !prevTwo.inout
} else {
e.inout = !prevTwo.inout
e.inside = !prev.inout
}
}
case e.polygonType == prev.polygonType: // previous line segment in S belongs to the same polygon that "e" belongs to
e.inside = prev.inside
e.inout = !prev.inout
default: // previous line segment in S belongs to a different polygon that "e" belongs to
e.inside = !prev.inout
e.inout = prev.inside
}
_DBG(func() {
fmt.Println("Status line after left insertion: ")
for _, e := range S {
fmt.Println(*e)
}
})
// Process a possible intersection between "e" and its next neighbor in S
if next != nil {
c.possibleIntersection(e, next)
}
// Process a possible intersection between "e" and its previous neighbor in S
if prev != nil {
c.possibleIntersection(prev, e)
//c.possibleIntersection(&e, prev)
}
} else { // the line segment must be removed from S
otherPos := -1
for i := range S {
if S[i].equals(e.other) {
otherPos = i
break
}
}
// otherPos := S.IndexOf(e.other)
// [or:] otherPos := e.other.PosInS
if otherPos != -1 {
prev = nil
if otherPos > 0 {
prev = S[otherPos-1]
}
next = nil
if otherPos < len(S)-1 {
next = S[otherPos+1]
}
}
// Check if the line segment belongs to the Boolean operation
switch e.edgeType {
case _EDGE_NORMAL:
switch operation {
case INTERSECTION:
if e.other.inside {
connector.add(e.segment())
}
case UNION:
if !e.other.inside {
connector.add(e.segment())
}
case DIFFERENCE:
if (e.polygonType == _SUBJECT && !e.other.inside) ||
(e.polygonType == _CLIPPING && e.other.inside) {
connector.add(e.segment())
}
case XOR:
connector.add(e.segment())
}
case _EDGE_SAME_TRANSITION:
if operation == INTERSECTION || operation == UNION {
connector.add(e.segment())
}
case _EDGE_DIFFERENT_TRANSITION:
if operation == DIFFERENCE {
connector.add(e.segment())
}
}
// delete line segment associated to e from S and check for intersection between the neighbors of "e" in S
if otherPos != -1 {
S.remove(S[otherPos])
}
if next != nil && prev != nil {
c.possibleIntersection(next, prev)
}
_DBG(func() { fmt.Print("Connector:\n", connector, "\n") })
}
_DBG(func() {
fmt.Println("Status line after processing intersections: ")
for _, e := range S {
fmt.Println(*e)
}
})
}
return connector.toPolygon()
}
var nanPoint Point
func init() {
nanPoint = Point{X: math.NaN(), Y: math.NaN()}
}
func findIntersection(seg0, seg1 segment) (int, Point, Point) {
pi0 := nanPoint
pi1 := nanPoint
p0 := seg0.start
d0 := Point{seg0.end.X - p0.X, seg0.end.Y - p0.Y}
p1 := seg1.start
d1 := Point{seg1.end.X - p1.X, seg1.end.Y - p1.Y}
sqrEpsilon := 0. // was 1e-3 earlier
E := Point{p1.X - p0.X, p1.Y - p0.Y}
kross := d0.X*d1.Y - d0.Y*d1.X
sqrKross := kross * kross
sqrLen0 := d0.Length()
sqrLen1 := d1.Length()
if sqrKross > sqrEpsilon*sqrLen0*sqrLen1 {
// lines of the segments are not parallel
s := (E.X*d1.Y - E.Y*d1.X) / kross
if s < 0 || s > 1 {
return 0, Point{}, Point{}
}
t := (E.X*d0.Y - E.Y*d0.X) / kross
if t < 0 || t > 1 {
return 0, nanPoint, nanPoint
}
// intersection of lines is a point an each segment [MC: ?]
pi0.X = p0.X + s*d0.X
pi0.Y = p0.Y + s*d0.Y
// [MC: commented fragment removed]
return 1, pi0, nanPoint
}
// lines of the segments are parallel
sqrLenE := E.Length()
kross = E.X*d0.Y - E.Y*d0.X
sqrKross = kross * kross
if sqrKross > sqrEpsilon*sqrLen0*sqrLenE {
// lines of the segment are different
return 0, nanPoint, nanPoint
}
// Lines of the segment are the same. Need to test for overlap of segments.
// s0 = Dot (D0, E) * sqrLen0
s0 := (d0.X*E.X + d0.Y*E.Y) / sqrLen0
// s1 = s0 + Dot (D0, D1) * sqrLen0
s1 := s0 + (d0.X*d1.X+d0.Y*d1.Y)/sqrLen0
smin := math.Min(s0, s1)
smax := math.Max(s0, s1)
w := make([]float64, 0, 2)
imax := findIntersection2(0.0, 1.0, smin, smax, &w)
if imax > 0 {
pi0.X = p0.X + w[0]*d0.X
pi0.Y = p0.Y + w[0]*d0.Y
// [MC: commented fragment removed]
if imax > 1 {
pi1.X = p0.X + w[1]*d0.X
pi1.Y = p0.Y + w[1]*d0.Y
}
}
return imax, pi0, pi1
}
func findIntersection2(u0, u1, v0, v1 float64, w *[]float64) int {
if u1 < v0 || u0 > v1 {
return 0
}
if u1 == v0 {
*w = append(*w, u1)
return 1
}
// u1 > v0
if u0 == v1 {
*w = append(*w, u0)
return 1
}
// u0 < v1
if u0 < v0 {
*w = append(*w, v0)
} else {
*w = append(*w, u0)
}
if u1 > v1 {
*w = append(*w, v1)
} else {
*w = append(*w, u1)
}
return 2
}
func (c *clipper) possibleIntersection(e1, e2 *endpoint) {
// [MC]: commented fragment removed
numIntersections, ip1, _ := findIntersection(e1.segment(), e2.segment())
if numIntersections == 0 {
return
}
if numIntersections == 1 && (e1.p.Equals(e2.p) || e1.other.p.Equals(e2.other.p)) {
return // the line segments intersect at an endpoint of both line segments
}
//if numIntersections == 2 && e1.p.Equals(e2.p) {
if numIntersections == 2 && e1.polygonType == e2.polygonType {
return // the line segments overlap, but they belong to the same polygon
}
if numIntersections == 1 {
if !e1.p.Equals(ip1) && !e1.other.p.Equals(ip1) {
// if ip1 is not an endpoint of the line segment associated to e1 then divide "e1"
c.divideSegment(e1, ip1)
}
if !e2.p.Equals(ip1) && !e2.other.p.Equals(ip1) {
// if ip1 is not an endpoint of the line segment associated to e2 then divide "e2"
c.divideSegment(e2, ip1)
}
return
}
// The line segments overlap and belong to different polygons
sortedEvents := make([]*endpoint, 0, 4)
switch {
case e1.p.Equals(e2.p) || e1.p.Equals(e2.other.p):
sortedEvents = append(sortedEvents, nil) // WTF [MC: WTF]
case endpointLess(e1, e2):
sortedEvents = append(sortedEvents, e2, e1)
default:
sortedEvents = append(sortedEvents, e1, e2)
}
switch {
case e1.other.p.Equals(e2.other.p) || e1.other.p.Equals(e2.p):
sortedEvents = append(sortedEvents, nil)
case endpointLess(e1.other, e2.other):
sortedEvents = append(sortedEvents, e2.other, e1.other)
default:
sortedEvents = append(sortedEvents, e1.other, e2.other)
}
if len(sortedEvents) == 2 { // are both line segments equal?
e1.edgeType, e1.other.edgeType = _EDGE_NON_CONTRIBUTING, _EDGE_NON_CONTRIBUTING
if e1.inout == e2.inout {
e2.edgeType, e2.other.edgeType = _EDGE_SAME_TRANSITION, _EDGE_SAME_TRANSITION
} else {
e2.edgeType, e2.other.edgeType = _EDGE_DIFFERENT_TRANSITION, _EDGE_DIFFERENT_TRANSITION
}
return
}
if len(sortedEvents) == 3 { // the line segments share an endpoint
sortedEvents[1].edgeType, sortedEvents[1].other.edgeType = _EDGE_NON_CONTRIBUTING, _EDGE_NON_CONTRIBUTING
var idx int
// is the right endpoint the shared point?
if sortedEvents[0] != nil {
idx = 0
} else { // the shared point is the left endpoint
idx = 2
}
if e1.inout == e2.inout {
sortedEvents[idx].other.edgeType = _EDGE_SAME_TRANSITION
} else {
sortedEvents[idx].other.edgeType = _EDGE_DIFFERENT_TRANSITION
}
if sortedEvents[0] != nil {
c.divideSegment(sortedEvents[0], sortedEvents[1].p)
} else {
c.divideSegment(sortedEvents[2].other, sortedEvents[1].p)
}
return
}
if sortedEvents[0] != sortedEvents[3].other {
// no line segment includes totally the OtherEnd one
sortedEvents[1].edgeType = _EDGE_NON_CONTRIBUTING
if e1.inout == e2.inout {
sortedEvents[2].edgeType = _EDGE_SAME_TRANSITION
} else {
sortedEvents[2].edgeType = _EDGE_DIFFERENT_TRANSITION
}
c.divideSegment(sortedEvents[0], sortedEvents[1].p)
c.divideSegment(sortedEvents[1], sortedEvents[2].p)
return
}
// one line segment includes the other one
sortedEvents[1].edgeType, sortedEvents[1].other.edgeType = _EDGE_NON_CONTRIBUTING, _EDGE_NON_CONTRIBUTING
c.divideSegment(sortedEvents[0], sortedEvents[1].p)
if e1.inout == e2.inout {
sortedEvents[3].other.edgeType = _EDGE_SAME_TRANSITION
} else {
sortedEvents[3].other.edgeType = _EDGE_DIFFERENT_TRANSITION
}
c.divideSegment(sortedEvents[3].other, sortedEvents[2].p)
}
func (c *clipper) divideSegment(e *endpoint, p Point) {
// "Right event" of the "left line segment" resulting from dividing e (the line segment associated to e)
r := &endpoint{p: p, left: false, polygonType: e.polygonType, other: e, edgeType: e.edgeType}
// "Left event" of the "right line segment" resulting from dividing e (the line segment associated to e)
l := &endpoint{p: p, left: true, polygonType: e.polygonType, other: e.other, edgeType: e.other.edgeType}
if endpointLess(l, e.other) { // avoid a rounding error. The left event would be processed after the right event
// println("Oops")
e.other.left = true
e.left = false
}
e.other.other = l
e.other = r
c.eventQueue.enqueue(l)
c.eventQueue.enqueue(r)
}
type empty struct{}
// a polygonGraph holds the points of a polygon in a graph struct.
// The index of the first map is the starting point of each segment
// in the polygon and the index of the second map is the ending point
// of each segment.
type polygonGraph map[Point]map[Point]empty
// addToGraph adds the segments of the polygon to the graph in a
// way that ensures the same segment is not included twice in the
// polygon.
func addToGraph(g *polygonGraph, seg segment) {
if seg.start.Equals(seg.end) {
// The starting and ending points are the same, so this is
// not in fact a segment.
return
}
if _, ok := (*g)[seg.start][seg.end]; ok {
// This polygonGraph already has a segment start -> end, adding
// start -> end would make the polygon degenerate, so we delete both.
delete((*g)[seg.start], seg.end)
return
}
if _, ok := (*g)[seg.end][seg.start]; ok {
// This polygonGraph already has a segment end -> start, adding
// start -> end would make the polygon degenerate, so we delete both.
delete((*g)[seg.end], seg.start)
return
}
if _, ok := (*g)[seg.start]; !ok {
(*g)[seg.start] = make(map[Point]empty)
}
// Add the segment.
(*g)[seg.start][seg.end] = empty{}
}
// addPolygonToQueue adds p to the event queue, returning the number of
// segments that were added.
func addPolygonToQueue(q *eventQueue, p Polygon, polyType polygonType) int {
g := make(polygonGraph)
for _, cont := range p {
for i := range cont {
addToGraph(&g, cont.segment(i))
}
}
numSegments := 0
for start, gg := range g {
for end := range gg {
addProcessedSegment(q, segment{start: start, end: end}, polyType)
numSegments++
}
}
return numSegments
}
func addProcessedSegment(q *eventQueue, segment segment, polyType polygonType) {
e1 := &endpoint{p: segment.start, left: true, polygonType: polyType}
e2 := &endpoint{p: segment.end, left: true, polygonType: polyType, other: e1}
e1.other = e2
switch {
case e1.p.X < e2.p.X:
e2.left = false
case e1.p.X > e2.p.X:
e1.left = false
case e1.p.Y < e2.p.Y:
// the line segment is vertical. The bottom endpoint is the left endpoint
e2.left = false
default:
e1.left = false
}
// Pushing it so the queue is sorted from left to right, with object on the left having the highest priority
q.enqueue(e1)
q.enqueue(e2)
}
|
package verify
import "testing"
var (
threeCharacterNames = []string{
"bob",
"mat",
"jim",
"sue",
}
validIntegers = []string{
"123",
"4",
"9993",
}
invalidIntegers = []string{
"12 3",
" 123",
" 1",
"1,200",
"fm",
"dskq",
" a",
}
)
func Test_Length(t *testing.T) {
for _, name := range threeCharacterNames {
v := Verify(name).MinLength(2).IsVerified()
expect(t, v, true)
v = Verify(name).MinLength(3).IsVerified()
expect(t, v, true)
v = Verify(name).MinLength(3).MaxLength(8).IsVerified()
expect(t, v, true)
v = Verify(name).MinLength(3).MaxLength(3).IsVerified()
expect(t, v, true)
v = Verify(name).MinLength(4).MaxLength(8).IsVerified()
expect(t, v, false)
v = Verify(name).Length(3).IsVerified()
expect(t, v, true)
v = Verify(name).Length(2).IsVerified()
expect(t, v, false)
v = Verify(name).Length(4).IsVerified()
expect(t, v, false)
v = Verify(name).IsntLength(123).IsVerified()
expect(t, v, true)
}
}
func Test_Integers(t *testing.T) {
for _, integer := range validIntegers {
v := Verify(integer).Int().IsVerified()
expect(t, v, true)
v = Verify(integer).IsntInt().IsVerified()
refute(t, v, true)
}
for _, integer := range invalidIntegers {
v := Verify(integer).Int().IsVerified()
expect(t, v, false)
v = Verify(integer).IsntInt().IsVerified()
refute(t, v, false)
}
}
func Test_Is(t *testing.T) {
v := Verify("seafood").Is("seafood").IsVerified()
expect(t, v, true)
v = Verify("seafood").Is("Seafood").IsVerified()
expect(t, v, false)
v = Verify("seafood").Is("Poultry").IsVerified()
expect(t, v, false)
}
func Test_Isnt(t *testing.T) {
v := Verify("seafood").Isnt("poultry").IsVerified()
expect(t, v, true)
v = Verify("seafood").Isnt("seafood").IsVerified()
expect(t, v, false)
}
func Test_IsntEmpty(t *testing.T) {
v := Verify("seafood").IsntEmpty().IsVerified()
expect(t, v, true)
v = Verify("").IsntEmpty().IsVerified()
expect(t, v, false)
}
func Test_IsEmpty(t *testing.T) {
v := Verify("").IsEmpty().IsVerified()
expect(t, v, true)
v = Verify("seafood").IsEmpty().IsVerified()
expect(t, v, false)
}
func Test_Contains(t *testing.T) {
v := Verify("team").Contains("i").IsVerified()
expect(t, v, false)
v = Verify("team").Contains("e").Contains("a").IsVerified()
expect(t, v, true)
v = Verify("team").Length(4).Contains("e").IsVerified()
expect(t, v, true)
v = Verify("team").MaxLength(20).MinLength(5).Contains("e").IsVerified()
expect(t, v, false)
v = Verify("team").MaxLength(8).MinLength(2).IContains("E").IsVerified()
expect(t, v, true)
}
func Test_DoesntContain(t *testing.T) {
v := Verify("team").DoesntContain("i").IsVerified()
expect(t, v, true)
v = Verify("team").DoesntContain("e").IsVerified()
expect(t, v, false)
v = Verify("team").MaxLength(8).IDoesntContain("yy").IsVerified()
expect(t, v, true)
}
func Test_IsIn(t *testing.T) {
array := []string{
"There",
"is",
"no",
"I",
"in",
"team",
}
v := Verify("team").IsIn(array).IsVerified()
expect(t, v, true)
v = Verify("eye").IsIn(array).IsVerified()
expect(t, v, false)
v = Verify("eye").IsntIn(array).IsVerified()
expect(t, v, true)
v = Verify("team").IsntIn(array).IsVerified()
expect(t, v, false)
}
|
package elf
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"os"
"strconv"
)
type formatterFunc func(a byte, b ...byte) string
type field struct {
offset byte
name string
fn formatterFunc
}
const (
bufSize = 64
)
type HeaderInfo struct {
Magic string `json:"magic"`
Class string `json:"class"`
Endianness string `json:"endianness"`
Version string `json:"version"`
ABI string `json:"abi"`
ABIVersion string `json:"abi_version"`
Type string `json:"type"`
Machine string `json:"machine"`
Entry string `json:"entry"`
}
var fields = []field{
field{0x00, "magic", fmtMagic},
field{0x04, "class", func(a byte, b ...byte) string {
if b[0] == 1 {
return "32-Bit"
}
if b[0] == 2 {
return "64-Bit"
}
return "Unknown"
}},
field{0x05, "endianness", func(a byte, b ...byte) string {
if b[0] == 1 {
return "Little Endian"
}
if b[0] == 2 {
return "Big Endian"
}
return "Unknown"
}},
field{0x06, "version", func(a byte, b ...byte) string {
if b[0] == 1 {
return "1 (Original)"
}
return strconv.FormatInt(int64(b[0]), 10)
}},
field{0x07, "abi", fmtABI},
field{0x08, "abi_version", nil},
field{0x10, "type", fmtObjectType},
field{0x12, "machine", fmtMachineType},
field{0x18, "entry", fmtEntry},
}
func defaultFormatter(a byte, b ...byte) string {
if len(b) == 0 {
return ""
}
return strconv.FormatInt(int64(b[0]), 10)
}
func fmtMagic(a byte, b ...byte) string {
if b[0] == 0x7f {
return string(b[1:4])
}
return ""
}
func fmtEntry(a byte, b ...byte) string {
numBytes := 0
if a == 1 { // 32 bit
numBytes = 4
} else if a == 2 { // 64 bit
numBytes = 8
}
address := uint64(0)
if len(b) >= numBytes {
for i := 0; i < numBytes; i++ {
x := numBytes - i - 1
address |= uint64(b[x]) << uint64(x*8)
}
}
return fmt.Sprintf("%#x", address)
}
func fmtABI(a byte, b ...byte) string {
switch b[0] {
case 0x00:
return "System V"
case 0x01:
return "HP-UX"
case 0x02:
return "NetBSD"
case 0x03:
return "Linux"
case 0x06:
return "Solaris"
case 0x07:
return "AIX"
case 0x08:
return "IRIX"
case 0x09:
return "FreeBSD"
case 0x0C:
return "OpenBSD"
case 0x0D:
return "OpenVMS"
}
return "Unknown API"
}
func fmtObjectType(a byte, b ...byte) string {
switch b[0] {
case 1:
return "Relocatable"
case 2:
return "Executable"
case 3:
return "Shared"
case 4:
return "Core"
}
return "Unknown"
}
func fmtMachineType(a byte, b ...byte) string {
switch b[0] {
case 0x02:
return "SPARC"
case 0x03:
return "x86"
case 0x08:
return "MIPS"
case 0x14:
return "PowerPC"
case 0x28:
return "ARM"
case 0x2A:
return "SuperH"
case 0x32:
return "IA-64"
case 0x3E:
return "x86-64"
case 0xB7:
return "AArch64"
}
return "Unknown"
}
func ReadHeaderInfo(path string) (*HeaderInfo, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
reader := bufio.NewReader(f)
var buf = [bufSize]byte{}
n, err := reader.Read(buf[:])
if err != nil {
return nil, err
}
if n != bufSize {
return nil, errors.New("Number of bytes not matches")
}
m := map[string]interface{}{}
archCode := buf[0x04]
for _, t := range fields {
b := buf[t.offset:]
fn := t.fn
if fn == nil {
fn = defaultFormatter
}
m[t.name] = fn(archCode, b...)
}
b, err := json.Marshal(m)
if err != nil {
return nil, err
}
h := HeaderInfo{}
if err := json.Unmarshal(b, &h); err != nil {
return nil, err
}
if h.Magic != "ELF" {
return nil, errors.New("Not valid ELF file")
}
return &h, nil
}
|
package main
import (
"log"
"time"
)
func main() {
ticker := time.Tick(time.Second * 1)
for i := 0; i < 4; i++ {
log.Println(i, ":", <-ticker)
}
}
|
package smaato
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/buger/jsonparser"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/adapters"
"github.com/prebid/prebid-server/config"
"github.com/prebid/prebid-server/errortypes"
"github.com/prebid/prebid-server/metrics"
"github.com/prebid/prebid-server/openrtb_ext"
"github.com/prebid/prebid-server/util/timeutil"
)
const clientVersion = "prebid_server_0.6"
type adMarkupType string
const (
smtAdTypeImg adMarkupType = "Img"
smtAdTypeRichmedia adMarkupType = "Richmedia"
smtAdTypeVideo adMarkupType = "Video"
smtAdTypeNative adMarkupType = "Native"
)
// adapter describes a Smaato prebid server adapter.
type adapter struct {
clock timeutil.Time
endpoint string
}
// userExtData defines User.Ext.Data object for Smaato
type userExtData struct {
Keywords string `json:"keywords"`
Gender string `json:"gender"`
Yob int64 `json:"yob"`
}
// siteExt defines Site.Ext object for Smaato
type siteExt struct {
Data siteExtData `json:"data"`
}
type siteExtData struct {
Keywords string `json:"keywords"`
}
// bidRequestExt defines BidRequest.Ext object for Smaato
type bidRequestExt struct {
Client string `json:"client"`
}
// bidExt defines Bid.Ext object for Smaato
type bidExt struct {
Duration int `json:"duration"`
}
// videoExt defines Video.Ext object for Smaato
type videoExt struct {
Context string `json:"context,omitempty"`
}
// Builder builds a new instance of the Smaato adapter for the given bidder with the given config.
func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {
bidder := &adapter{
clock: &timeutil.RealTime{},
endpoint: config.Endpoint,
}
return bidder, nil
}
// MakeRequests makes the HTTP requests which should be made to fetch bids.
func (adapter *adapter) MakeRequests(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) {
if len(request.Imp) == 0 {
return nil, []error{&errortypes.BadInput{Message: "No impressions in bid request."}}
}
// set data in request that is common for all requests
if err := prepareCommonRequest(request); err != nil {
return nil, []error{err}
}
isVideoEntryPoint := reqInfo.PbsEntryPoint == metrics.ReqTypeVideo
if isVideoEntryPoint {
return adapter.makePodRequests(request)
} else {
return adapter.makeIndividualRequests(request)
}
}
// MakeBids unpacks the server's response into Bids.
func (adapter *adapter) MakeBids(internalRequest *openrtb2.BidRequest, externalRequest *adapters.RequestData, response *adapters.ResponseData) (*adapters.BidderResponse, []error) {
if response.StatusCode == http.StatusNoContent {
return nil, nil
}
if response.StatusCode != http.StatusOK {
return nil, []error{&errortypes.BadServerResponse{
Message: fmt.Sprintf("Unexpected status code: %d. Run with request.debug = 1 for more info.", response.StatusCode),
}}
}
var bidResp openrtb2.BidResponse
if err := json.Unmarshal(response.Body, &bidResp); err != nil {
return nil, []error{err}
}
bidResponse := adapters.NewBidderResponseWithBidsCapacity(5)
var errors []error
for _, seatBid := range bidResp.SeatBid {
for i := 0; i < len(seatBid.Bid); i++ {
bid := seatBid.Bid[i]
adMarkupType, err := getAdMarkupType(response, bid.AdM)
if err != nil {
errors = append(errors, err)
continue
}
bid.AdM, err = renderAdMarkup(adMarkupType, bid.AdM)
if err != nil {
errors = append(errors, err)
continue
}
bidType, err := convertAdMarkupTypeToMediaType(adMarkupType)
if err != nil {
errors = append(errors, err)
continue
}
bidVideo, err := buildBidVideo(&bid, bidType)
if err != nil {
errors = append(errors, err)
continue
}
bid.Exp = adapter.getTTLFromHeaderOrDefault(response)
bidResponse.Bids = append(bidResponse.Bids, &adapters.TypedBid{
Bid: &bid,
BidType: bidType,
BidVideo: bidVideo,
})
}
}
return bidResponse, errors
}
func (adapter *adapter) makeIndividualRequests(request *openrtb2.BidRequest) ([]*adapters.RequestData, []error) {
imps := request.Imp
requests := make([]*adapters.RequestData, 0, len(imps))
errors := make([]error, 0, len(imps))
for _, imp := range imps {
impsByMediaType, err := splitImpressionsByMediaType(&imp)
if err != nil {
errors = append(errors, err)
continue
}
for _, impByMediaType := range impsByMediaType {
request.Imp = []openrtb2.Imp{impByMediaType}
if err := prepareIndividualRequest(request); err != nil {
errors = append(errors, err)
continue
}
requestData, err := adapter.makeRequest(request)
if err != nil {
errors = append(errors, err)
continue
}
requests = append(requests, requestData)
}
}
return requests, errors
}
func splitImpressionsByMediaType(imp *openrtb2.Imp) ([]openrtb2.Imp, error) {
if imp.Banner == nil && imp.Video == nil && imp.Native == nil {
return nil, &errortypes.BadInput{Message: "Invalid MediaType. Smaato only supports Banner, Video and Native."}
}
imps := make([]openrtb2.Imp, 0, 3)
if imp.Banner != nil {
impCopy := *imp
impCopy.Video = nil
impCopy.Native = nil
imps = append(imps, impCopy)
}
if imp.Video != nil {
impCopy := *imp
impCopy.Banner = nil
impCopy.Native = nil
imps = append(imps, impCopy)
}
if imp.Native != nil {
imp.Banner = nil
imp.Video = nil
imps = append(imps, *imp)
}
return imps, nil
}
func (adapter *adapter) makePodRequests(request *openrtb2.BidRequest) ([]*adapters.RequestData, []error) {
pods, orderedKeys, errors := groupImpressionsByPod(request.Imp)
requests := make([]*adapters.RequestData, 0, len(pods))
for _, key := range orderedKeys {
request.Imp = pods[key]
if err := preparePodRequest(request); err != nil {
errors = append(errors, err)
continue
}
requestData, err := adapter.makeRequest(request)
if err != nil {
errors = append(errors, err)
continue
}
requests = append(requests, requestData)
}
return requests, errors
}
func (adapter *adapter) makeRequest(request *openrtb2.BidRequest) (*adapters.RequestData, error) {
reqJSON, err := json.Marshal(request)
if err != nil {
return nil, err
}
headers := http.Header{}
headers.Add("Content-Type", "application/json;charset=utf-8")
headers.Add("Accept", "application/json")
return &adapters.RequestData{
Method: "POST",
Uri: adapter.endpoint,
Body: reqJSON,
Headers: headers,
}, nil
}
func getAdMarkupType(response *adapters.ResponseData, adMarkup string) (adMarkupType, error) {
if admType := adMarkupType(response.Headers.Get("X-Smt-Adtype")); admType != "" {
return admType, nil
} else if strings.HasPrefix(adMarkup, `{"image":`) {
return smtAdTypeImg, nil
} else if strings.HasPrefix(adMarkup, `{"richmedia":`) {
return smtAdTypeRichmedia, nil
} else if strings.HasPrefix(adMarkup, `<?xml`) {
return smtAdTypeVideo, nil
} else if strings.HasPrefix(adMarkup, `{"native":`) {
return smtAdTypeNative, nil
} else {
return "", &errortypes.BadServerResponse{
Message: fmt.Sprintf("Invalid ad markup %s.", adMarkup),
}
}
}
func (adapter *adapter) getTTLFromHeaderOrDefault(response *adapters.ResponseData) int64 {
ttl := int64(300)
if expiresAtMillis, err := strconv.ParseInt(response.Headers.Get("X-Smt-Expires"), 10, 64); err == nil {
nowMillis := adapter.clock.Now().UnixNano() / 1000000
ttl = (expiresAtMillis - nowMillis) / 1000
if ttl < 0 {
ttl = 0
}
}
return ttl
}
func renderAdMarkup(adMarkupType adMarkupType, adMarkup string) (string, error) {
switch adMarkupType {
case smtAdTypeImg:
return extractAdmImage(adMarkup)
case smtAdTypeRichmedia:
return extractAdmRichMedia(adMarkup)
case smtAdTypeVideo:
return adMarkup, nil
case smtAdTypeNative:
return extractAdmNative(adMarkup)
default:
return "", &errortypes.BadServerResponse{
Message: fmt.Sprintf("Unknown markup type %s.", adMarkupType),
}
}
}
func convertAdMarkupTypeToMediaType(adMarkupType adMarkupType) (openrtb_ext.BidType, error) {
switch adMarkupType {
case smtAdTypeImg:
return openrtb_ext.BidTypeBanner, nil
case smtAdTypeRichmedia:
return openrtb_ext.BidTypeBanner, nil
case smtAdTypeVideo:
return openrtb_ext.BidTypeVideo, nil
case smtAdTypeNative:
return openrtb_ext.BidTypeNative, nil
default:
return "", &errortypes.BadServerResponse{
Message: fmt.Sprintf("Unknown markup type %s.", adMarkupType),
}
}
}
func prepareCommonRequest(request *openrtb2.BidRequest) error {
if err := setUser(request); err != nil {
return err
}
if err := setSite(request); err != nil {
return err
}
setApp(request)
return setExt(request)
}
func prepareIndividualRequest(request *openrtb2.BidRequest) error {
imp := &request.Imp[0]
if err := setPublisherId(request, imp); err != nil {
return err
}
return setImpForAdspace(imp)
}
func preparePodRequest(request *openrtb2.BidRequest) error {
if len(request.Imp) < 1 {
return &errortypes.BadInput{Message: "No impressions in bid request."}
}
if err := setPublisherId(request, &request.Imp[0]); err != nil {
return err
}
return setImpForAdBreak(request.Imp)
}
func setUser(request *openrtb2.BidRequest) error {
if request.User != nil && request.User.Ext != nil {
var userExtRaw map[string]json.RawMessage
if err := json.Unmarshal(request.User.Ext, &userExtRaw); err != nil {
return &errortypes.BadInput{Message: "Invalid user.ext."}
}
if userExtDataRaw, present := userExtRaw["data"]; present {
var err error
var userExtData userExtData
if err = json.Unmarshal(userExtDataRaw, &userExtData); err != nil {
return &errortypes.BadInput{Message: "Invalid user.ext.data."}
}
userCopy := *request.User
if userExtData.Gender != "" {
userCopy.Gender = userExtData.Gender
}
if userExtData.Yob != 0 {
userCopy.Yob = userExtData.Yob
}
if userExtData.Keywords != "" {
userCopy.Keywords = userExtData.Keywords
}
delete(userExtRaw, "data")
if userCopy.Ext, err = json.Marshal(userExtRaw); err != nil {
return err
}
request.User = &userCopy
}
}
return nil
}
func setExt(request *openrtb2.BidRequest) error {
var err error
request.Ext, err = json.Marshal(bidRequestExt{Client: clientVersion})
return err
}
func setSite(request *openrtb2.BidRequest) error {
if request.Site != nil {
siteCopy := *request.Site
if request.Site.Ext != nil {
var siteExt siteExt
if err := json.Unmarshal(request.Site.Ext, &siteExt); err != nil {
return &errortypes.BadInput{Message: "Invalid site.ext."}
}
siteCopy.Keywords = siteExt.Data.Keywords
siteCopy.Ext = nil
}
request.Site = &siteCopy
}
return nil
}
func setApp(request *openrtb2.BidRequest) {
if request.App != nil {
appCopy := *request.App
request.App = &appCopy
}
}
func setPublisherId(request *openrtb2.BidRequest, imp *openrtb2.Imp) error {
publisherID, err := jsonparser.GetString(imp.Ext, "bidder", "publisherId")
if err != nil {
return &errortypes.BadInput{Message: "Missing publisherId parameter."}
}
if request.Site != nil {
// Site is already a copy
request.Site.Publisher = &openrtb2.Publisher{ID: publisherID}
return nil
} else if request.App != nil {
// App is already a copy
request.App.Publisher = &openrtb2.Publisher{ID: publisherID}
return nil
} else {
return &errortypes.BadInput{Message: "Missing Site/App."}
}
}
func setImpForAdspace(imp *openrtb2.Imp) error {
adSpaceID, err := jsonparser.GetString(imp.Ext, "bidder", "adspaceId")
if err != nil {
return &errortypes.BadInput{Message: "Missing adspaceId parameter."}
}
impExt, err := makeImpExt(&imp.Ext)
if err != nil {
return err
}
if imp.Banner != nil {
bannerCopy, err := setBannerDimension(imp.Banner)
if err != nil {
return err
}
imp.Banner = bannerCopy
imp.TagID = adSpaceID
imp.Ext = impExt
return nil
}
if imp.Video != nil || imp.Native != nil {
imp.TagID = adSpaceID
imp.Ext = impExt
return nil
}
return nil
}
func setImpForAdBreak(imps []openrtb2.Imp) error {
if len(imps) < 1 {
return &errortypes.BadInput{Message: "No impressions in bid request."}
}
adBreakID, err := jsonparser.GetString(imps[0].Ext, "bidder", "adbreakId")
if err != nil {
return &errortypes.BadInput{Message: "Missing adbreakId parameter."}
}
impExt, err := makeImpExt(&imps[0].Ext)
if err != nil {
return err
}
for i := range imps {
imps[i].TagID = adBreakID
imps[i].Ext = nil
videoCopy := *(imps[i].Video)
videoCopy.Sequence = int8(i + 1)
videoCopy.Ext, _ = json.Marshal(&videoExt{Context: "adpod"})
imps[i].Video = &videoCopy
}
imps[0].Ext = impExt
return nil
}
func makeImpExt(impExtRaw *json.RawMessage) (json.RawMessage, error) {
var impExt openrtb_ext.ExtImpExtraDataSmaato
if err := json.Unmarshal(*impExtRaw, &impExt); err != nil {
return nil, &errortypes.BadInput{Message: "Invalid imp.ext."}
}
if impExtSkadnRaw := impExt.Skadn; impExtSkadnRaw != nil {
var impExtSkadn map[string]json.RawMessage
if err := json.Unmarshal(impExtSkadnRaw, &impExtSkadn); err != nil {
return nil, &errortypes.BadInput{Message: "Invalid imp.ext.skadn."}
}
}
if impExtJson, err := json.Marshal(impExt); string(impExtJson) != "{}" {
return impExtJson, err
} else {
return nil, nil
}
}
func setBannerDimension(banner *openrtb2.Banner) (*openrtb2.Banner, error) {
if banner.W != nil && banner.H != nil {
return banner, nil
}
if len(banner.Format) == 0 {
return banner, &errortypes.BadInput{Message: "No sizes provided for Banner."}
}
bannerCopy := *banner
bannerCopy.W = openrtb2.Int64Ptr(banner.Format[0].W)
bannerCopy.H = openrtb2.Int64Ptr(banner.Format[0].H)
return &bannerCopy, nil
}
func groupImpressionsByPod(imps []openrtb2.Imp) (map[string]([]openrtb2.Imp), []string, []error) {
pods := make(map[string][]openrtb2.Imp)
orderKeys := make([]string, 0)
errors := make([]error, 0, len(imps))
for _, imp := range imps {
if imp.Video == nil {
errors = append(errors, &errortypes.BadInput{Message: "Invalid MediaType. Smaato only supports Video for AdPod."})
continue
}
pod := strings.Split(imp.ID, "_")[0]
if _, present := pods[pod]; !present {
orderKeys = append(orderKeys, pod)
}
pods[pod] = append(pods[pod], imp)
}
return pods, orderKeys, errors
}
func buildBidVideo(bid *openrtb2.Bid, bidType openrtb_ext.BidType) (*openrtb_ext.ExtBidPrebidVideo, error) {
if bidType != openrtb_ext.BidTypeVideo {
return nil, nil
}
if bid.Ext == nil {
return nil, nil
}
var primaryCategory string
if len(bid.Cat) > 0 {
primaryCategory = bid.Cat[0]
}
var bidExt bidExt
if err := json.Unmarshal(bid.Ext, &bidExt); err != nil {
return nil, &errortypes.BadServerResponse{Message: "Invalid bid.ext."}
}
return &openrtb_ext.ExtBidPrebidVideo{
Duration: bidExt.Duration,
PrimaryCategory: primaryCategory,
}, nil
}
|
package assert
import "testing"
func TestNotNilValue(t *testing.T) {
{
err := AssertThat(t, NotNilValue())
if err != nil {
t.Fatal("expect nil")
}
}
{
err := AssertThat(0, NotNilValue())
if err != nil {
t.Fatal("expect nil")
}
}
{
err := AssertThat("", NotNilValue())
if err != nil {
t.Fatal("expect nil")
}
}
{
err := AssertThat(nil, NotNilValue())
if err == nil {
t.Fatal("expect nil")
}
}
{
var foo *string
err := AssertThat(foo, NotNilValue())
if err == nil {
t.Fatal("expect not nil")
}
}
{
err := AssertThat(nil, NotNilValue())
expectedValue := "expected not nil value"
if err.Error() != expectedValue {
t.Fatalf("error message missmatch, expected '%v' but was '%v'", expectedValue, err.Error())
}
}
{
err := AssertThat(nil, NotNilValue().Message("msg"))
expectedValue := "msg, expected not nil value"
if err.Error() != expectedValue {
t.Fatalf("error message missmatch, expected '%v' but was '%v'", expectedValue, err.Error())
}
}
}
|
package config
import (
"fmt"
"github.com/spf13/viper"
"os"
)
type (
MongodbConfig struct{
Database string `mapstructure:"database"`
Host string `mapstructure:"host"`
}
SwaggerConfig struct{
Host string `mapstructure:"host"`
Version string `mapstructure:"version"`
BasePath string `mapstructure:"base_path"`
}
LogConfig struct {
LogFilePath string `mapstructure:"log_file"`
LogLevel string `mapstructure:"log_level"`
JsonLogFormat bool `mapstructure:"json_log_format"`
LogRotation bool `mapstructure:"log_rotation"`
}
GeneralConfig struct{
Log LogConfig `mapstructure:"log"`
Mongodb MongodbConfig `mapstructure:"mongodb"`
Swagger SwaggerConfig `mapstructure:"swagger"`
}
)
func Loadconfig(filepaths ...string) *GeneralConfig {
if len(filepaths) == 0{
panic(fmt.Errorf("Empty config file"))
}
viper.SetConfigFile(filepaths[0])
viper.SetConfigType("yaml")
err := viper.ReadInConfig()
if err != nil {
panic(fmt.Errorf("Fatal error config file: %s \n", err))
}
for _, filepath := range filepaths[1:] {
func(filepath string){
f, err := os.Open(filepath)
if err != nil{
panic(fmt.Errorf("Fatal error read config file: %s \n", err))
}
defer f.Close()
err = viper.MergeConfig(f)
if err != nil {
panic(fmt.Errorf("Fatal error mergeing config file: %s \n", err))
}
}(filepath)
}
var config GeneralConfig
err = viper.Unmarshal(&config)
if err != nil {
panic(fmt.Errorf("Fatal error marshal config file: %s \n", err))
}
return &config
}
|
package action
import (
"github.com/mylxsw/adanos-alert/configs"
"github.com/mylxsw/adanos-alert/internal/queue"
"github.com/mylxsw/adanos-alert/internal/repository"
"github.com/mylxsw/asteria/log"
"github.com/mylxsw/glacier/infra"
"github.com/pkg/errors"
)
type Provider struct{}
func (s Provider) Register(app infra.Binder) {
app.MustSingleton(NewManager)
}
func (s Provider) Boot(app infra.Resolver) {
app.MustResolve(func(manager Manager, queueManager queue.Manager, conf *configs.Config) {
manager.Register("http", NewHTTPAction(manager))
manager.Register("dingding", NewDingdingAction(manager))
manager.Register("email", NewEmailAction(manager, conf))
manager.Register("wechat", NewWechatAction(manager))
manager.Register("phone_call_aliyun", NewPhoneCallAliyunAction(manager))
manager.Register("sms_aliyun", NewSmsAliyunAction(manager))
manager.Register("sms_yunxin", NewSmsYunxinAction(manager))
manager.Register("jira", NewJiraAction(manager))
queueManager.RegisterHandler("action", func(item repository.QueueJob) error {
var payload Payload
if err := payload.Decode([]byte(item.Payload)); err != nil {
log.WithFields(log.Fields{
"item": item,
"err": err.Error(),
}).Errorf("can not decode payload: %s", err)
return errors.Wrap(err, "can not decode payload")
}
return manager.Run(payload.Action).Handle(payload.Rule, payload.Trigger, payload.Group)
})
})
}
|
/*
Copyright 2014 Jiang Le
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package models
import (
"fmt"
"github.com/astaxie/beego/orm"
"github.com/naokij/gotalk/setting"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
"time"
)
//话题/帖子
type Topic struct {
Id int
Title string `orm:"size(255)"`
ContentHex string `orm:"size(24)"` //mongodb对象编号
Content *Content `orm:"-"`
User *User `orm:"rel(fk);index"`
Username string `orm:"size(30)`
Category *Category `orm:"rel(fk);index"`
PvCount int `orm:"index"`
CommentCount int `orm:"index"`
BookmarkCount int `orm:"index"`
IsExcellent bool `orm:"index"`
IsClosed bool `orm:""`
LastReplyUsername string `orm:"size(30)`
LastReplyAt time.Time `orm:""`
Created time.Time `orm:"auto_now_add"`
Updated time.Time `orm:"auto_now"`
Ip string `orm:"size(39)"`
}
func (m *Topic) Insert() error {
var err error
if m.Content != nil {
m.ContentHex, err = m.Content.Insert()
if err != nil {
return err
}
}
if _, err := orm.NewOrm().Insert(m); err != nil {
return err
}
return nil
}
func (m *Topic) Read(fields ...string) error {
if err := orm.NewOrm().Read(m, fields...); err != nil {
return err
}
if m.ContentHex != "" && m.ContentHex != "0" {
fmt.Println("ContentHex", m.ContentHex)
content := Content{}
m.Content = &content
m.Content.Id = bson.ObjectIdHex(m.ContentHex)
err := m.Content.Read()
if err != nil {
return err
}
}
return nil
}
func (m *Topic) Update(fields ...string) error {
if m.Content != nil {
err := m.Content.Update()
if err != nil {
return err
}
m.ContentHex = m.Content.Id.Hex()
}
if _, err := orm.NewOrm().Update(m, fields...); err != nil {
return err
}
return nil
}
func (m *Topic) Delete() error {
if _, err := orm.NewOrm().Delete(m); err != nil {
return err
}
if m.Content != nil {
m.Content.Delete()
}
return nil
}
func (m *Topic) TableEngine() string {
return "INNODB"
}
//留言/回帖
type Comment struct {
Id int
Topic *Topic `orm:"rel(fk);index"`
ContentHex string `orm:"size(24)"`
Content *Content `orm:"-"`
User *User `orm:"rel(fk);index"`
Username string `orm:"size(30)`
Ip string `orm:"size(39)"`
Created time.Time `orm:"auto_now_add";index`
Updated time.Time `orm:"auto_now"`
}
func (m *Comment) Insert() error {
var err error
if m.Content != nil {
m.ContentHex, err = m.Content.Insert()
if err != nil {
return err
}
}
if _, err := orm.NewOrm().Insert(m); err != nil {
return err
}
return nil
}
func (m *Comment) Read(fields ...string) error {
if err := orm.NewOrm().Read(m, fields...); err != nil {
return err
}
if m.ContentHex != "" && m.ContentHex != "0" {
content := Content{}
m.Content = &content
m.Content.Id = bson.ObjectIdHex(m.ContentHex)
err := m.Content.Read()
if err != nil {
return err
}
}
return nil
}
func (m *Comment) SyncContent() error {
if m.ContentHex != "" && m.ContentHex != "0" {
content := Content{}
m.Content = &content
m.Content.Id = bson.ObjectIdHex(m.ContentHex)
err := m.Content.Read()
if err != nil {
return err
}
}
return nil
}
func (m *Comment) Update(fields ...string) error {
if m.Content != nil {
err := m.Content.Update()
if err != nil {
return err
}
m.ContentHex = m.Content.Id.Hex()
}
if _, err := orm.NewOrm().Update(m, fields...); err != nil {
return err
}
return nil
}
func (m *Comment) Delete() error {
if _, err := orm.NewOrm().Delete(m); err != nil {
return err
}
if m.Content != nil {
m.Content.Delete()
}
return nil
}
func (m *Comment) TableEngine() string {
return "INNODB"
}
type Content struct {
Id bson.ObjectId `bson:"_id,omitempty"`
TopicId int
CommentId int
Message string
}
func (m *Content) Session() *mgo.Session {
return setting.MongodbSession.Clone()
}
func (m *Content) Insert() (string, error) {
objectId := bson.NewObjectId()
m.Id = objectId
session := m.Session()
defer func() {
session.Close()
}()
c := session.DB(setting.MongodbName).C("Content")
err := c.Insert(m)
return m.Id.Hex(), err
}
func (m *Content) Read() error {
session := m.Session()
defer func() {
session.Close()
}()
c := session.DB(setting.MongodbName).C("Content")
err := c.FindId(m.Id).One(&m)
return err
}
func (m *Content) Update() error {
session := m.Session()
defer func() {
session.Close()
}()
c := session.DB(setting.MongodbName).C("Content")
err := c.UpdateId(m.Id, m)
return err
}
func (m *Content) Delete() error {
session := m.Session()
defer func() {
session.Close()
}()
c := session.DB(setting.MongodbName).C("Content")
err := c.RemoveId(m.Id)
return err
}
func initContentIndex() {
session := setting.MongodbSession.Clone()
c := session.DB(setting.MongodbName).C("Content")
index := mgo.Index{
Key: []string{"_id", "firstname"},
Unique: true,
DropDups: true,
Background: true, // See notes.
Sparse: true,
}
c.EnsureIndex(index)
}
func init() {
orm.RegisterModel(new(Topic), new(Comment))
}
|
package cmd
import (
"bytes"
"context"
"fmt"
"net"
"os"
"strconv"
"time"
"github.com/google/uuid"
"github.com/gridscale/gsclient-go/v3"
"github.com/gridscale/gscloud/render"
"github.com/sethvargo/go-password/password"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
type serverCmdFlags struct {
forceShutdown bool
memory int
cores int
storageSize int
serverName string
template string
hostName string
profile string
availabilityZone string
autoRecovery bool
includeRelated bool
force bool
userDataBase64 string
}
var (
serverFlags serverCmdFlags
)
var serverCmd = &cobra.Command{
Use: "server",
Short: "Operations on servers",
Long: `List, create, or remove servers.`,
}
func serverLsCmdRun(cmd *cobra.Command, args []string) error {
serverOp := rt.ServerOperator()
ctx := context.Background()
out := new(bytes.Buffer)
servers, err := serverOp.GetServerList(ctx)
if err != nil {
return NewError(cmd, "Could not get list of servers", err)
}
var rows [][]string
if !rootFlags.json {
heading := []string{"id", "name", "core", "mem", "changed", "power"}
for _, server := range servers {
power := "off"
if server.Properties.Power {
power = "on"
}
fill := [][]string{
{
server.Properties.ObjectUUID,
server.Properties.Name,
strconv.FormatInt(int64(server.Properties.Cores), 10),
strconv.FormatInt(int64(server.Properties.Memory), 10),
server.Properties.ChangeTime.Local().Format(time.RFC3339),
power,
},
}
rows = append(rows, fill...)
}
if rootFlags.quiet {
for _, info := range rows {
fmt.Println(info[0])
}
} else {
render.AsTable(out, heading, rows, renderOpts)
}
} else {
render.AsJSON(out, servers)
}
fmt.Print(out)
return nil
}
var serverLsCmd = &cobra.Command{
Use: "ls [flags]",
Aliases: []string{"list"},
Short: "List servers",
Long: `List server objects.`,
RunE: serverLsCmdRun,
}
func serverOnCmdRun(cmd *cobra.Command, args []string) error {
ctx := context.Background()
serverOp := rt.ServerOperator()
err := serverOp.StartServer(ctx, args[0])
if err != nil {
return NewError(cmd, "Failed starting server", err)
}
return nil
}
var serverOnCmd = &cobra.Command{
Use: "on ID",
Short: "Turn server on",
Args: cobra.ExactArgs(1),
RunE: serverOnCmdRun,
}
func serverOffCmdRun(cmd *cobra.Command, args []string) error {
ctx := context.Background()
serverOp := rt.ServerOperator()
if serverFlags.forceShutdown {
err := serverOp.StopServer(ctx, args[0])
if err != nil {
return NewError(cmd, "Failed stopping server", err)
}
} else {
err := serverOp.ShutdownServer(ctx, args[0])
if err != nil {
return NewError(cmd, "Failed shutting down server", err)
}
}
return nil
}
var serverOffCmd = &cobra.Command{
Use: "off [flags] ID",
Short: "Turn server off via ACPI",
Args: cobra.ExactArgs(1),
RunE: serverOffCmdRun,
}
func serverRmCmdRun(cmd *cobra.Command, args []string) error {
serverOp := rt.ServerOperator()
ctx := context.Background()
id := args[0]
s, err := serverOp.GetServer(ctx, id)
if err != nil {
return NewError(cmd, "Look up server failed", err)
}
if serverFlags.force {
if s.Properties.Power {
err := serverOp.StopServer(ctx, args[0])
if err != nil {
return NewError(cmd, "Failed stopping server", err)
}
}
}
var storages []gsclient.ServerStorageRelationProperties
var ipAddrs []gsclient.ServerIPRelationProperties
if serverFlags.includeRelated {
out := new(bytes.Buffer)
storages, err = rt.ServerStorageRelationOperator().GetServerStorageList(ctx, id)
if err != nil {
return NewError(cmd, "Could not get related storages", err)
}
ipAddrs, err = rt.ServerIPRelationOperator().GetServerIPList(ctx, id)
if err != nil {
return NewError(cmd, "Could not get assigned IP addresses", err)
}
if !rootFlags.quiet {
var rows [][]string
heading := []string{"id", "type", "name"}
rows = append(rows, []string{
id,
"Server",
s.Properties.Name,
})
for _, storage := range storages {
fill := [][]string{
{
storage.ObjectUUID,
"Storage",
storage.ObjectName,
},
}
rows = append(rows, fill...)
}
for _, addr := range ipAddrs {
fill := [][]string{
{
addr.ObjectUUID,
fmt.Sprintf("IPv%d address", addr.Family),
addr.IP,
},
}
rows = append(rows, fill...)
}
render.AsTable(out, heading, rows, renderOpts)
fmt.Print(out)
}
if !serverFlags.force {
msg := "This can destroy your data. "
if rootFlags.quiet {
msg += "Re-run with --force to remove"
} else {
msg += "Re-run with --force to remove above objects"
}
log.Println(msg)
return nil
}
}
err = serverOp.DeleteServer(ctx, id)
if err != nil {
return NewError(cmd, "Deleting server failed", err)
}
fmt.Fprintf(os.Stderr, "Removed %s\n", id)
if serverFlags.includeRelated {
storageOp := rt.StorageOperator()
for _, storage := range storages {
err = storageOp.DeleteStorage(ctx, storage.ObjectUUID)
if err != nil {
return NewError(cmd, "Failed removing storage", err)
}
fmt.Fprintf(os.Stderr, "Removed %s\n", storage.ObjectUUID)
}
ipOp := rt.IPOperator()
for _, addr := range ipAddrs {
err = ipOp.DeleteIP(ctx, addr.ObjectUUID)
if err != nil {
return NewError(cmd, "Failed removing IP address", err)
}
fmt.Fprintf(os.Stderr, "Removed %s\n", addr.ObjectUUID)
}
}
return nil
}
var serverRmCmd = &cobra.Command{
Use: "rm [flags] ID",
Aliases: []string{"remove"},
Short: "Remove server",
Long: `**gscloud server rm** removes an existing server from a project.
With the **--include-related** option, you can delete all referenced storages and assigned IP addresses, if any. By default, storages and IP addresses are not removed to prevent important data from being deleted.
# EXAMPLES
Remove a server including storages and IP addresses:
$ gscloud server rm --include-related --force 37d53278-8e5f-47e1-a63f-54513e4b4d53
`,
Args: cobra.ExactArgs(1),
RunE: serverRmCmdRun,
}
var serverCreateCmd = &cobra.Command{
Use: "create [flags]",
Example: `gscloud server create --name "My machine" --cores 2 --mem 4 --with-template "My template" --hostname myhost`,
Short: "Create server",
Long: `Create a new server.
# EXAMPLES
Create a server with 25 GB storage from the CentOS 8 template:
$ gscloud server create \
--name worker-1 \
--cores=2 \
--mem=4 \
--with-template="CentOS 8 (x86_64)" \
--storage-size=25 \
--hostname worker-1
To create a server without any storage just omit --with-template flag:
$ gscloud server create --name worker-2 --cores=1 --mem=1
`,
RunE: func(cmd *cobra.Command, args []string) error {
type output struct {
Server string `json:"server"`
Storage string `json:"storage"`
Password string `json:"password"`
}
var templateID string
serverOp := rt.ServerOperator()
ctx := context.Background()
profile, err := toHardwareProfile(serverFlags.profile)
if err != nil {
return NewError(cmd, "Cannot create server", err)
}
if serverFlags.template != "" {
// Might be an ID or a name
id, err := uuid.Parse(serverFlags.template)
if err == nil {
templateID = id.String()
} else {
templateOp := rt.TemplateOperator()
template, err := templateOp.GetTemplateByName(ctx, serverFlags.template)
if err != nil {
return NewError(cmd, "Cannot create server", err)
}
templateID = template.Properties.ObjectUUID
}
}
cleanupServer := false
serverCreateRequest := gsclient.ServerCreateRequest{
Name: serverFlags.serverName,
Cores: serverFlags.cores,
Memory: serverFlags.memory,
HardwareProfile: profile,
AvailablityZone: serverFlags.availabilityZone,
AutoRecovery: &serverFlags.autoRecovery,
}
if serverFlags.userDataBase64 != "" {
serverCreateRequest.UserData = &serverFlags.userDataBase64
}
server, err := serverOp.CreateServer(ctx, serverCreateRequest)
if err != nil {
return NewError(cmd, "Creating server failed", err)
}
cleanupServer = true
defer func() {
if cleanupServer {
err = serverOp.DeleteServer(ctx, server.ObjectUUID)
if err != nil {
panic(err)
}
}
}()
if serverFlags.template != "" {
password := generatePassword()
storageOp := rt.StorageOperator()
storage, err := storageOp.CreateStorage(ctx, gsclient.StorageCreateRequest{
Name: string(serverFlags.serverName),
Capacity: serverFlags.storageSize,
StorageType: gsclient.DefaultStorageType,
Template: &gsclient.StorageTemplate{
TemplateUUID: templateID,
Password: password,
PasswordType: gsclient.PlainPasswordType,
Hostname: serverFlags.hostName,
},
})
if err != nil {
return NewError(cmd, "Creating storage failed", err)
}
serverStorageOp := rt.ServerStorageRelationOperator()
err = serverStorageOp.CreateServerStorage(
ctx,
server.ObjectUUID,
gsclient.ServerStorageRelationCreateRequest{
ObjectUUID: storage.ObjectUUID,
BootDevice: true,
})
if err != nil {
return NewError(cmd, "Linking storage to server failed", err)
}
cleanupServer = false
if !rootFlags.json {
fmt.Println("Server created:", server.ObjectUUID)
fmt.Println("Storage created:", storage.ObjectUUID)
fmt.Println("Password:", password)
} else {
jsonOutput := output{
Server: server.ObjectUUID,
Storage: storage.ObjectUUID,
Password: password,
}
render.AsJSON(os.Stdout, jsonOutput)
}
}
cleanupServer = false
return nil
},
}
var serverSetCmd = &cobra.Command{
Use: "set [flags] ID",
Example: `gscloud server set 37d53278-8e5f-47e1-a63f-54513e4b4d53 --cores 4`,
Short: "Update server",
Long: `Update properties of an existing server.`,
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
serverOp := rt.ServerOperator()
ctx := context.Background()
serverUpdateRequest := gsclient.ServerUpdateRequest{
Cores: serverFlags.cores,
Memory: serverFlags.memory,
Name: serverFlags.serverName,
}
if serverFlags.userDataBase64 != "" {
serverUpdateRequest.UserData = &serverFlags.userDataBase64
}
err := serverOp.UpdateServer(
ctx,
args[0],
serverUpdateRequest,
)
if err != nil {
return NewError(cmd, "Failed setting property", err)
}
return nil
},
}
var serverAssignCmd = &cobra.Command{
Use: "assign ID ADDR",
Example: `gscloud server assign 37d53278-8e5f-47e1-a63f-54513e4b4d53 2001:db8:0:1::1c8`,
Short: "Assign an IP address",
Long: `Assign an existing IP address to a server.`,
Args: cobra.ExactArgs(2),
RunE: func(cmd *cobra.Command, args []string) error {
var serverID string
var addrID string
var err error
serverID = args[0]
ctx := context.Background()
ipOp := rt.IPOperator()
addr := net.ParseIP(args[1])
if addr != nil {
addrID, err = idForAddress(ctx, addr, ipOp)
if err != nil {
return err
}
} else {
addrID = args[1]
}
err = rt.Client().LinkIP(ctx, serverID, addrID)
if err != nil {
return NewError(cmd, "Could not assign IP address", err)
}
return nil
},
}
var serverEventsCmd = &cobra.Command{
Use: "events ID",
Example: `gscloud server events 37d53278-8e5f-47e1-a63f-54513e4b4d53`,
Short: "List events",
Long: `Retrieve event log for given server.
# EXAMPLES
List all events of a server:
$ gscloud server events 37d53278-8e5f-47e1-a63f-54513e4b4d53
Only list request IDs of a server (in case you need to tell suport what happened):
$ gscloud server events --quiet 37d53278-8e5f-47e1-a63f-54513e4b4d53
`,
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
serverID := args[0]
ctx := context.Background()
serverOp := rt.ServerOperator()
events, err := serverOp.GetServerEventList(ctx, serverID)
if err != nil {
return NewError(cmd, "Could not get list of events", err)
}
out := new(bytes.Buffer)
if rootFlags.json {
render.AsJSON(out, events)
} else {
if rootFlags.quiet {
for _, event := range events {
fmt.Println(event.Properties.RequestUUID)
}
} else {
var rows [][]string
heading := []string{
"time", "request id", "request type", "details", "initiator",
}
for _, event := range events {
fill := [][]string{
{
event.Properties.Timestamp.Local().Format(time.RFC3339),
event.Properties.RequestUUID,
event.Properties.RequestType,
event.Properties.Change,
event.Properties.Initiator,
},
}
rows = append(rows, fill...)
}
render.AsTable(out, heading, rows, renderOpts)
}
}
fmt.Print(out)
return nil
},
}
func init() {
serverOffCmd.Flags().BoolVarP(&serverFlags.forceShutdown, "force", "f", false, "Force shutdown (no ACPI)")
serverCreateCmd.Flags().IntVar(&serverFlags.memory, "mem", 1, "Memory (GB)")
serverCreateCmd.Flags().IntVar(&serverFlags.cores, "cores", 1, "No. of cores")
serverCreateCmd.Flags().IntVar(&serverFlags.storageSize, "storage-size", 10, "Storage capacity (GB)")
serverCreateCmd.Flags().StringVarP(&serverFlags.serverName, "name", "n", "", "Name of the server")
serverCreateCmd.Flags().StringVar(&serverFlags.template, "with-template", "", "Name or ID of template to use")
serverCreateCmd.Flags().StringVar(&serverFlags.hostName, "hostname", "", "Hostname")
serverCreateCmd.Flags().StringVar(&serverFlags.profile, "profile", "q35", "Hardware profile")
serverCreateCmd.Flags().StringVar(&serverFlags.availabilityZone, "availability-zone", "", "Availability zone. One of \"a\", \"b\", \"c\" (default \"\")")
serverCreateCmd.Flags().BoolVar(&serverFlags.autoRecovery, "auto-recovery", true, "Whether to restart in case of errors")
serverCreateCmd.Flags().StringVar(&serverFlags.userDataBase64, "user-data-base64", "", "For system configuration on first boot. May contain cloud-config data or shell scripting, encoded as base64 string. Supported tools are cloud-init, Cloudbase-init, and Ignition.")
serverSetCmd.Flags().IntVar(&serverFlags.memory, "mem", 0, "Memory (GB)")
serverSetCmd.Flags().IntVar(&serverFlags.cores, "cores", 0, "No. of cores")
serverSetCmd.Flags().StringVarP(&serverFlags.serverName, "name", "n", "", "Name of the server")
serverSetCmd.Flags().StringVar(&serverFlags.userDataBase64, "user-data-base64", "", "For system configuration on first boot. May contain cloud-config data or shell scripting, encoded as base64 string. Supported tools are cloud-init, Cloudbase-init, and Ignition.")
serverRmCmd.Flags().BoolVarP(&serverFlags.includeRelated, "include-related", "i", false, "Remove all objects currently related to this server, not just the server")
serverRmCmd.Flags().BoolVarP(&serverFlags.force, "force", "f", false, "Force a destructive operation")
serverCmd.AddCommand(serverLsCmd, serverOnCmd, serverOffCmd, serverRmCmd, serverCreateCmd, serverSetCmd, serverAssignCmd, serverEventsCmd)
rootCmd.AddCommand(serverCmd)
}
func generatePassword() string {
res, err := password.Generate(12, 6, 2, false, false)
if err != nil {
panic(err)
}
return res
}
func toHardwareProfile(val string) (gsclient.ServerHardwareProfile, error) {
var prof gsclient.ServerHardwareProfile
switch val {
case "default":
prof = gsclient.DefaultServerHardware
case "nested":
prof = gsclient.NestedServerHardware
case "legacy":
prof = gsclient.LegacyServerHardware
case "cisco_csr":
prof = gsclient.CiscoCSRServerHardware
case "sophos_utm":
prof = gsclient.SophosUTMServerHardware
case "f5_bigip":
prof = gsclient.F5BigipServerHardware
case "q35":
prof = gsclient.Q35ServerHardware
default:
return "", fmt.Errorf("not a valid profile: %s", val)
}
return prof, nil
}
|
package log
import (
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
func newZapLogger() *zap.SugaredLogger {
var cfg *zap.Config
if _debugMode {
cfg = &zap.Config{
Level: zap.NewAtomicLevelAt(zap.DebugLevel),
Development: true,
Encoding: "console",
EncoderConfig: zapcore.EncoderConfig{
MessageKey: "M",
LevelKey: "L",
TimeKey: "T",
NameKey: "N",
CallerKey: "C",
FunctionKey: zapcore.OmitKey,
StacktraceKey: "S",
LineEnding: zapcore.DefaultLineEnding,
EncodeLevel: zapcore.CapitalLevelEncoder,
EncodeTime: zapcore.ISO8601TimeEncoder,
EncodeDuration: zapcore.StringDurationEncoder,
EncodeCaller: zapcore.ShortCallerEncoder,
EncodeName: zapcore.FullNameEncoder,
ConsoleSeparator: " ",
},
OutputPaths: []string{"stderr"},
ErrorOutputPaths: []string{"stderr"},
}
} else {
cfg = &zap.Config{
Level: zap.NewAtomicLevelAt(zap.InfoLevel),
Development: false,
Sampling: &zap.SamplingConfig{
Initial: 100,
Thereafter: 100,
},
Encoding: "json",
EncoderConfig: zapcore.EncoderConfig{
MessageKey: "msg",
LevelKey: "level",
TimeKey: "ts",
NameKey: "logger",
CallerKey: "caller",
FunctionKey: "func",
StacktraceKey: "stacktrace",
LineEnding: zapcore.DefaultLineEnding,
EncodeLevel: zapcore.LowercaseLevelEncoder,
EncodeTime: zapcore.EpochTimeEncoder,
EncodeDuration: zapcore.SecondsDurationEncoder,
EncodeCaller: zapcore.ShortCallerEncoder,
EncodeName: zapcore.FullNameEncoder,
},
OutputPaths: []string{"stderr"},
ErrorOutputPaths: []string{"stderr"},
}
}
l, err := cfg.Build()
if err != nil {
panic(err)
}
return l.Sugar()
}
|
// ===================================== //
// author: gavingqf //
// == Please don'g change me by hand == //
//====================================== //
/*you have defined the following interface:
type IConfig interface {
// load interface
Load(path string) bool
// clear interface
Clear()
}
*/
package base
import (
"shared/utility/glog"
)
type CfgGraveyardProduceBuff struct {
Id int32
Type int32
TypeContent int32
BuildId int32
Stage0drop int32
Stage1drop int32
Stage2drop int32
Stage3drop int32
Stage4drop int32
Stage5drop int32
}
type CfgGraveyardProduceBuffConfig struct {
data map[int32]*CfgGraveyardProduceBuff
}
func NewCfgGraveyardProduceBuffConfig() *CfgGraveyardProduceBuffConfig {
return &CfgGraveyardProduceBuffConfig{
data: make(map[int32]*CfgGraveyardProduceBuff),
}
}
func (c *CfgGraveyardProduceBuffConfig) Load(filePath string) bool {
parse := NewParser()
if err := parse.Load(filePath, true); err != nil {
glog.Info("Load", filePath, "err: ", err)
return false
}
// iterator all lines' content
for i := 2; i < parse.GetAllCount(); i++ {
data := new(CfgGraveyardProduceBuff)
/* parse Id field */
vId, _ := parse.GetFieldByName(uint32(i), "id")
var IdRet bool
data.Id, IdRet = String2Int32(vId)
if !IdRet {
glog.Error("Parse CfgGraveyardProduceBuff.Id field error,value:", vId)
return false
}
/* parse Type field */
vType, _ := parse.GetFieldByName(uint32(i), "type")
var TypeRet bool
data.Type, TypeRet = String2Int32(vType)
if !TypeRet {
glog.Error("Parse CfgGraveyardProduceBuff.Type field error,value:", vType)
return false
}
/* parse TypeContent field */
vTypeContent, _ := parse.GetFieldByName(uint32(i), "typeContent")
var TypeContentRet bool
data.TypeContent, TypeContentRet = String2Int32(vTypeContent)
if !TypeContentRet {
glog.Error("Parse CfgGraveyardProduceBuff.TypeContent field error,value:", vTypeContent)
return false
}
/* parse BuildId field */
vBuildId, _ := parse.GetFieldByName(uint32(i), "buildId")
var BuildIdRet bool
data.BuildId, BuildIdRet = String2Int32(vBuildId)
if !BuildIdRet {
glog.Error("Parse CfgGraveyardProduceBuff.BuildId field error,value:", vBuildId)
return false
}
/* parse Stage0drop field */
vStage0drop, _ := parse.GetFieldByName(uint32(i), "stage0drop")
var Stage0dropRet bool
data.Stage0drop, Stage0dropRet = String2Int32(vStage0drop)
if !Stage0dropRet {
glog.Error("Parse CfgGraveyardProduceBuff.Stage0drop field error,value:", vStage0drop)
return false
}
/* parse Stage1drop field */
vStage1drop, _ := parse.GetFieldByName(uint32(i), "stage1drop")
var Stage1dropRet bool
data.Stage1drop, Stage1dropRet = String2Int32(vStage1drop)
if !Stage1dropRet {
glog.Error("Parse CfgGraveyardProduceBuff.Stage1drop field error,value:", vStage1drop)
return false
}
/* parse Stage2drop field */
vStage2drop, _ := parse.GetFieldByName(uint32(i), "stage2drop")
var Stage2dropRet bool
data.Stage2drop, Stage2dropRet = String2Int32(vStage2drop)
if !Stage2dropRet {
glog.Error("Parse CfgGraveyardProduceBuff.Stage2drop field error,value:", vStage2drop)
return false
}
/* parse Stage3drop field */
vStage3drop, _ := parse.GetFieldByName(uint32(i), "stage3drop")
var Stage3dropRet bool
data.Stage3drop, Stage3dropRet = String2Int32(vStage3drop)
if !Stage3dropRet {
glog.Error("Parse CfgGraveyardProduceBuff.Stage3drop field error,value:", vStage3drop)
return false
}
/* parse Stage4drop field */
vStage4drop, _ := parse.GetFieldByName(uint32(i), "stage4drop")
var Stage4dropRet bool
data.Stage4drop, Stage4dropRet = String2Int32(vStage4drop)
if !Stage4dropRet {
glog.Error("Parse CfgGraveyardProduceBuff.Stage4drop field error,value:", vStage4drop)
return false
}
/* parse Stage5drop field */
vStage5drop, _ := parse.GetFieldByName(uint32(i), "stage5drop")
var Stage5dropRet bool
data.Stage5drop, Stage5dropRet = String2Int32(vStage5drop)
if !Stage5dropRet {
glog.Error("Parse CfgGraveyardProduceBuff.Stage5drop field error,value:", vStage5drop)
return false
}
if _, ok := c.data[data.Id]; ok {
glog.Errorf("Find %d repeated", data.Id)
return false
}
c.data[data.Id] = data
}
return true
}
func (c *CfgGraveyardProduceBuffConfig) Clear() {
}
func (c *CfgGraveyardProduceBuffConfig) Find(id int32) (*CfgGraveyardProduceBuff, bool) {
v, ok := c.data[id]
return v, ok
}
func (c *CfgGraveyardProduceBuffConfig) GetAllData() map[int32]*CfgGraveyardProduceBuff {
return c.data
}
func (c *CfgGraveyardProduceBuffConfig) Traverse() {
for _, v := range c.data {
glog.Info(v.Id, ",", v.Type, ",", v.TypeContent, ",", v.BuildId, ",", v.Stage0drop, ",", v.Stage1drop, ",", v.Stage2drop, ",", v.Stage3drop, ",", v.Stage4drop, ",", v.Stage5drop)
}
}
|
package value
import (
"fmt"
"go.starlark.net/starlark"
)
type Stringable struct {
Value string
IsSet bool
}
func (s *Stringable) Unpack(v starlark.Value) error {
str, ok := AsString(v)
if !ok {
return fmt.Errorf("Value should be convertible to string, but is type %s", v.Type())
}
s.Value = str
s.IsSet = true
return nil
}
type ImplicitStringer interface {
ImplicitString() string
}
// Wrapper around starlark.AsString
func AsString(x starlark.Value) (string, bool) {
is, ok := x.(ImplicitStringer)
if ok {
return is.ImplicitString(), true
}
return starlark.AsString(x)
}
type StringList []string
var _ starlark.Unpacker = &StringList{}
// Unpack an argument that can be expressed as a list or tuple of strings.
func (s *StringList) Unpack(v starlark.Value) error {
*s = nil
if v == nil {
return nil
}
var iter starlark.Iterator
switch x := v.(type) {
case *starlark.List:
iter = x.Iterate()
case starlark.Tuple:
iter = x.Iterate()
case starlark.NoneType:
return nil
default:
return fmt.Errorf("value should be a List or Tuple of strings, but is of type %s", v.Type())
}
defer iter.Done()
var item starlark.Value
for iter.Next(&item) {
sv, ok := AsString(item)
if !ok {
return fmt.Errorf("value should contain only strings, but element %q was of type %s", item.String(), item.Type())
}
*s = append(*s, sv)
}
return nil
}
type StringOrStringList struct {
Values []string
}
var _ starlark.Unpacker = &StringOrStringList{}
// Unpack an argument that can either be expressed as
// a string or as a list of strings.
func (s *StringOrStringList) Unpack(v starlark.Value) error {
s.Values = nil
if v == nil {
return nil
}
vs, ok := AsString(v)
if ok {
s.Values = []string{vs}
return nil
}
var iter starlark.Iterator
switch x := v.(type) {
case *starlark.List:
iter = x.Iterate()
case starlark.Tuple:
iter = x.Iterate()
case starlark.NoneType:
return nil
default:
return fmt.Errorf("value should be a string or List or Tuple of strings, but is of type %s", v.Type())
}
defer iter.Done()
var item starlark.Value
for iter.Next(&item) {
sv, ok := AsString(item)
if !ok {
return fmt.Errorf("list should contain only strings, but element %q was of type %s", item.String(), item.Type())
}
s.Values = append(s.Values, sv)
}
return nil
}
|
package kubectl
import (
devspacecontext "github.com/loft-sh/devspace/pkg/devspace/context"
"github.com/loft-sh/devspace/pkg/devspace/kubectl"
)
func Delete(ctx devspacecontext.Context, deploymentName string) error {
deploymentCache, ok := ctx.Config().RemoteCache().GetDeployment(deploymentName)
if !ok || deploymentCache.Kubectl == nil || len(deploymentCache.Kubectl.Objects) == 0 {
return nil
}
for _, resource := range deploymentCache.Kubectl.Objects {
_, err := ctx.KubeClient().GenericRequest(ctx.Context(), &kubectl.GenericRequestOptions{
Kind: resource.Kind,
APIVersion: resource.APIVersion,
Name: resource.Name,
Namespace: resource.Namespace,
Method: "delete",
})
if err != nil {
ctx.Log().Errorf("error deleting %s %s: %v", resource.Kind, resource.Name, err)
}
}
return nil
}
|
package cron
import "time"
import "log"
import "encoding/json"
import "fmt"
import "strconv"
import "cointhink/config"
import "cointhink/proto"
import "cointhink/common"
import "cointhink/constants"
import "cointhink/httpclients"
import "cointhink/q"
import "cointhink/lxd"
import "cointhink/mailer"
import "cointhink/container"
import "cointhink/model/schedule"
import "cointhink/model/account"
import "cointhink/model/algorun"
var (
day time.Time
)
func Setup() {
day = time.Now()
log.Printf("Current year-day is %d", day.YearDay())
}
func DoEvery(d time.Duration, f func(time.Time)) {
for x := range time.Tick(d) {
f(x)
}
}
func CronMinute(time time.Time) {
if time.Minute()%int(config.C.QueryNumber("cron.websocket_ping_minutes")) == 0 {
go websocketPinger()
}
if time.Minute()%int(config.C.QueryNumber("cron.market_prices_minutes")) == 0 {
go marketPrices(time)
}
reaper_minutes := int(config.C.QueryNumber("cron.schedule_reaper_minutes"))
if time.Minute()%reaper_minutes == 0 {
go scheduleWarn(time, reaper_minutes)
go scheduleReaper(time)
}
if day.YearDay() != time.YearDay() {
day = time
CronDay(time)
}
}
func CronDay(time time.Time) {
common.RespondAll(&proto.TickTock{Time: time.UTC().Format(constants.ISO8601)})
}
func marketPrices(time time.Time) {
fmt.Printf("coin price fetch.\n")
pricePing := &proto.MarketPrices{Prices: []*proto.MarketPrice{}}
coinNames := []string{"bitcoin", "ethereum"}
for _, coinName := range coinNames {
coin, err := coinFetch(coinName)
if err != nil {
fmt.Printf("priceFetch error for %s %+v\n", coinName, err)
} else {
price := &proto.MarketPrice{
Exchange: "coinmarketcap",
Market: coin.Symbol + "/USD",
Amount: coin.PriceUsd,
Currency: "USD",
ReceivedAt: time.UTC().Format(constants.ISO8601)}
pricePing.Prices = append(pricePing.Prices, price)
}
}
fmt.Printf("coin price pump of %d prices.\n", len(pricePing.Prices))
common.RespondAll(pricePing)
common.LambdaAll(pricePing)
}
func coinFetch(name string) (CoinMarketCap, error) {
coin := CoinMarketCap{}
quote_api := "https://api.coinmarketcap.com/v1/ticker/" + name + "/"
fmt.Println(quote_api)
now := time.Now()
bodyJson, err := common.Httpget(quote_api)
if err != nil {
fmt.Printf("price fetch error %+v", err)
return coin, err
} else {
list := []CoinMarketCap{}
err = json.Unmarshal([]byte(bodyJson), &list)
delay := time.Now().Sub(now).Nanoseconds() / 1000000 //millisec
go common.InfluxWrite("marketdata", "exchange", "coinmarketcap", strconv.FormatInt(delay, 10))
return list[0], err
}
}
type CoinMarketCap struct {
ID string `json:"id"`
Name string `json:"name"`
Symbol string `json:"symbol"`
Rank string `json:"rank"`
PriceUsd string `json:"price_usd"`
PriceBtc string `json:"price_btc"`
Two4HVolumeUsd string `json:"24h_volume_usd"`
MarketCapUsd string `json:"market_cap_usd"`
AvailableSupply string `json:"available_supply"`
TotalSupply string `json:"total_supply"`
PercentChange1H string `json:"percent_change_1h"`
PercentChange24H string `json:"percent_change_24h"`
PercentChange7D string `json:"percent_change_7d"`
LastUpdated string `json:"last_updated"`
PriceEur string `json:"price_eur"`
Two4HVolumeEur string `json:"24h_volume_eur"`
MarketCapEur string `json:"market_cap_eur"`
}
func scheduleWarn(_time time.Time, windowMinutes int) {
//halfWindow := 30 //time.Duration(windowMinutes / 2)
//start := _time.Add(halfWindow * time.Minute)
//end := _time.Sub(halfWindow * time.Minute)
//nearlyExpiredSchedules := schedule.RunningExpireds(_time, _time)
}
func scheduleReaper(time time.Time) {
expiredSchedules := schedule.RunningExpireds(time)
log.Printf("** cron.scheduleReaper found %d expired schedules.", len(expiredSchedules))
for _, _schedule := range expiredSchedules {
_account, err := account.Find(_schedule.AccountId)
if err != nil {
} else {
log.Printf("Schedule %s expired. Account %s.", _schedule.Id, _account.Id)
err = schedule.EnableUntilNextMonth(_schedule, &_account)
if err != nil {
log.Printf("No credits left. Stopping %s", _schedule.Id)
schedule.UpdateStatus(_schedule, proto.Schedule_disabled)
mailer.MailScheduleStopped(_account.Email, _schedule.AlgorithmId)
boxes, _ := algorun.FindReady(_account.Id, _schedule.Id)
for _, box := range boxes {
algorun.UpdateStatus(box, proto.Algorun_deleted)
stat, _ := lxd.Status(box.Id)
if stat.ErrorCode != 404 {
container.Stop(box)
}
}
} else {
log.Printf("Debiting 1 credit for %s", _schedule.Id)
mailer.MailCreditDebit(_account.Email, _schedule.AlgorithmId)
}
}
}
}
func websocketPinger() {
log.Printf("ws ping for %+v clients. (%d unresponsive)\n",
len(httpclients.Clients), len(httpclients.Pinglist))
httpclients.Pinglist = []string{}
for wsocket := range httpclients.Clients {
q.OUTq <- q.RpcOut{Socket: wsocket}
httpclients.Pinglist = append(httpclients.Pinglist, wsocket.RemoteAddr().String())
}
}
|
package main
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"github.com/sensu/sensu-go/types"
"github.com/spf13/cobra"
pushbullet "github.com/xconstruct/go-pushbullet"
)
var (
token, device string
stdin *os.File
allDevices bool
)
func main() {
rootCmd := configureRootCommand()
if err := rootCmd.Execute(); err != nil {
log.Fatal(err.Error())
}
}
func configureRootCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "sensu-pushbullet-handler",
Short: "The Sensu Go handler plugin for pushbullet",
RunE: run,
}
cmd.Flags().StringVarP(&token,
"token",
"t",
os.Getenv("PUSHBULLET_APP_TOKEN"),
"Pushbullet API app token, use default from PUSHBULLET_APP_TOKEN env var")
cmd.Flags().StringVarP(&device,
"device",
"d",
os.Getenv("DEVICE"),
"A device registered with Pushbullet")
cmd.Flags().BoolVar(&allDevices,
"alldevices",
false,
"Bool for sending notifications to all devices")
return cmd
}
func run(cmd *cobra.Command, args []string) error {
if len(args) != 0 {
_ = cmd.Help()
return fmt.Errorf("invalid argument(s) received")
}
if token == "" {
_ = cmd.Help()
return fmt.Errorf("api token is empty")
}
if stdin == nil {
stdin = os.Stdin
}
eventJSON, err := ioutil.ReadAll(stdin)
if err != nil {
return fmt.Errorf("failed to read stdin: %s", err.Error())
}
event := &types.Event{}
err = json.Unmarshal(eventJSON, event)
if err != nil {
return fmt.Errorf("failed to unmarshal stdin data: %s", err.Error())
}
if err = validateEvent(event); err != nil {
return errors.New(err.Error())
}
if allDevices && device == "" {
if err = notifyPushbulletAll(event); err != nil {
return errors.New(err.Error())
}
}
if !allDevices && device != "" {
if err = notifyPushbulletOne(event); err != nil {
return errors.New(err.Error())
}
}
if !allDevices && device == "" {
fmt.Printf("%s\n", "Must chose to send notification to all devices or one")
}
if allDevices && device != "" {
fmt.Printf("%s\n", "Can not send to all device and one device")
}
return nil
}
// Send event notification to all devices
func notifyPushbulletAll(event *types.Event) error {
pushall := pushbullet.New(token)
devs, err := pushall.Devices()
if err != nil {
panic(err)
}
title := fmt.Sprintf("%s/%s", event.Entity.Name, event.Check.Name)
message := event.Check.Output
for i := 0; i < len(devs); i++ {
err = pushall.PushNote(devs[i].Iden, title, message)
// Need to add something better here than a panic.
// PB seems to hold onto old device Identities.
// For now doing nothing seems ok
if err != nil {
// panic(err)
}
}
return nil
}
func notifyPushbulletOne(event *types.Event) error {
pushone := pushbullet.New(token)
dev, err := pushone.Device(device)
if err != nil {
panic(err)
}
title := fmt.Sprintf("%s/%s", event.Entity.Name, event.Check.Name)
message := event.Check.Output
err = dev.PushNote(title, message)
// Need to add something better here than a panic.
// PB seems to hold onto old device Identities.
// For now doing nothing seems ok
if err != nil {
// panic(err)
}
return nil
}
func validateEvent(event *types.Event) error {
// Doesn't work for known sample events.
// if event.Timestamp <= 0 {
// return errors.New("timestamp is missing or must be greater than zero")
// }
if event.Entity == nil {
return errors.New("entity is missing from event")
}
if !event.HasCheck() {
return errors.New("check is missing from event")
}
if err := event.Entity.Validate(); err != nil {
return err
}
if err := event.Check.Validate(); err != nil {
return errors.New(err.Error())
}
return nil
}
|
package entity
import "fmt"
type GraphDataElements []GraphDataElement
func (g GraphDataElements) MinSize() (min int) {
for _, v := range g {
if min == 0 {
min = v.SizeBytes
}
if v.SizeBytes < min {
min = v.SizeBytes
}
}
return
}
func (g GraphDataElements) MaxSize() (max int) {
for _, v := range g {
if max == 0 {
max = v.SizeBytes
}
if v.SizeBytes > max {
max = v.SizeBytes
}
}
return
}
type GraphDataElement struct {
SourceHost string
SourceService string
TargetHost string
TargetService string
PacketsCnt int
SizeBytes int
SizeBytesPerMinute int
}
func (g GraphDataElement) SourceServiceID() string {
return fmt.Sprintf("%s-%s", g.SourceHost, g.SourceService)
}
func (g GraphDataElement) TargetServiceID() string {
return fmt.Sprintf("%s-%s", g.TargetHost, g.TargetService)
}
func (g GraphDataElement) EdgeId() string {
return fmt.Sprintf("%s-%s-%s-%s", g.SourceHost, g.SourceService, g.TargetHost, g.TargetService)
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvserver
import (
"bytes"
"context"
"fmt"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/stretchr/testify/require"
)
// TestTxnRecoveryFromStaging tests the recovery process for a transaction that
// stages its transaction record immediately before its coordinator dies. It
// tests that concurrent transactions are able to recover from the indeterminate
// commit state after it becomes clear that the original transaction is no
// longer live. The test checks both the case where the parallel commit was
// successful and the case where the parallel commit failed.
func TestTxnRecoveryFromStaging(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
keyA, keyB := roachpb.Key("a"), roachpb.Key("b")
for i, tc := range []struct {
// implicitCommit says whether we expect the transaction to satisfy the
// implicit-commit condition.
implicitCommit bool
// If implicitCommit is false, writeTooOld dictates what kind of push will
// be experienced by one of the txn's intents. An intent being pushed is the
// reason why the implicit-commit condition is expected to fail. We simulate
// both pushes by the timestamp cache, and by deferred write-too-old
// conditions.
writeTooOld bool
// futureWrites dictates whether the transaction has been writing at the
// present time or whether it has been writing into the future with a
// synthetic timestamp.
futureWrites bool
}{
{
implicitCommit: true,
},
{
implicitCommit: false,
writeTooOld: false,
},
{
implicitCommit: false,
writeTooOld: true,
},
{
implicitCommit: true,
futureWrites: true,
},
{
implicitCommit: false,
writeTooOld: false,
futureWrites: true,
},
{
implicitCommit: false,
writeTooOld: true,
futureWrites: true,
},
} {
name := fmt.Sprintf("%d-commit:%t,writeTooOld:%t,futureWrites:%t", i, tc.implicitCommit, tc.writeTooOld, tc.futureWrites)
t.Run(name, func(t *testing.T) {
stopper := stop.NewStopper()
defer stopper.Stop(ctx)
manual := hlc.NewManualClock(123)
cfg := TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond))
// Set the RecoverIndeterminateCommitsOnFailedPushes flag to true so
// that a push on a STAGING transaction record immediately launches
// the transaction recovery process.
cfg.TestingKnobs.EvalKnobs.RecoverIndeterminateCommitsOnFailedPushes = true
store := createTestStoreWithConfig(t, stopper, testStoreOpts{createSystemRanges: true}, &cfg)
// Create a transaction that will get stuck performing a parallel commit.
txn := newTransaction("txn", keyA, 1, store.Clock())
// If the transaction is writing into the future, bump its write
// timestamp. Also, bump its read timestamp to simulate a situation
// where it has refreshed up to its write timestamp in preparation
// to commit.
if tc.futureWrites {
txn.WriteTimestamp = txn.ReadTimestamp.Add(50, 0).WithSynthetic(true)
txn.ReadTimestamp = txn.WriteTimestamp // simulate refresh
}
// Issue two writes, which will be considered in-flight at the time of
// the transaction's EndTxn request.
keyAVal := []byte("value")
pArgs := putArgs(keyA, keyAVal)
pArgs.Sequence = 1
h := roachpb.Header{Txn: txn}
if _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), h, &pArgs); pErr != nil {
t.Fatal(pErr)
}
// If we don't want this transaction to commit successfully, perform a
// conflicting operation on keyB to prevent the transaction's write to
// keyB from writing at its desired timestamp. This prevents an implicit
// commit state.
conflictH := roachpb.Header{Timestamp: txn.WriteTimestamp.Next()}
if !tc.implicitCommit {
if !tc.writeTooOld {
gArgs := getArgs(keyB)
if _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), conflictH, &gArgs); pErr != nil {
t.Fatal(pErr)
}
} else {
pArgs = putArgs(keyB, []byte("pusher val"))
if _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), conflictH, &pArgs); pErr != nil {
t.Fatal(pErr)
}
}
}
pArgs = putArgs(keyB, []byte("value2"))
pArgs.Sequence = 2
if _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), h, &pArgs); pErr != nil {
t.Fatal(pErr)
}
// Issue a parallel commit, which will put the transaction into a
// STAGING state. Include both writes as the EndTxn's in-flight writes.
et, etH := endTxnArgs(txn, true)
et.InFlightWrites = []roachpb.SequencedWrite{
{Key: keyA, Sequence: 1},
{Key: keyB, Sequence: 2},
}
etReply, pErr := kv.SendWrappedWith(ctx, store.TestSender(), etH, &et)
if pErr != nil {
t.Fatal(pErr)
}
if replyTxn := etReply.Header().Txn; replyTxn.Status != roachpb.STAGING {
t.Fatalf("expected STAGING txn, found %v", replyTxn)
}
// Pretend the transaction coordinator for the parallel commit died at this
// point. Typically, we would have to wait out the TxnLivenessThreshold. But
// since we set RecoverIndeterminateCommitsOnFailedPushes, we don't need to
// wait. So issue a read on one of the keys that the transaction wrote. This
// will result in a transaction push and eventually a full transaction
// recovery in order to resolve the indeterminate commit.
gArgs := getArgs(keyA)
gReply, pErr := kv.SendWrappedWith(ctx, store.TestSender(), conflictH, &gArgs)
if pErr != nil {
t.Fatal(pErr)
}
if tc.implicitCommit {
if val := gReply.(*roachpb.GetResponse).Value; val == nil {
t.Fatalf("expected non-nil value when reading key %v", keyA)
} else if valBytes, err := val.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(valBytes, keyAVal) {
t.Fatalf("actual value %q did not match expected value %q", valBytes, keyAVal)
}
} else {
if val := gReply.(*roachpb.GetResponse).Value; val != nil {
t.Fatalf("expected nil value when reading key %v; found %v", keyA, val)
}
}
// Query the transaction and verify that it has the right status.
qtArgs := queryTxnArgs(txn.TxnMeta, false /* waitForUpdate */)
qtReply, pErr := kv.SendWrapped(ctx, store.TestSender(), &qtArgs)
if pErr != nil {
t.Fatal(pErr)
}
status := qtReply.(*roachpb.QueryTxnResponse).QueriedTxn.Status
expStatus := roachpb.ABORTED
if tc.implicitCommit {
expStatus = roachpb.COMMITTED
}
if status != expStatus {
t.Fatalf("expected transaction status %v; found %v", expStatus, status)
}
})
}
}
// TestTxnRecoveryFromStagingWithHighPriority tests the transaction recovery
// process initiated by a high-priority operation which encounters a staging
// transaction. The test contains a subtest for each of the combinations of the
// following boolean options:
//
// - pushAbort: configures whether or not the high-priority operation is a
// read (false) or a write (true), which dictates the kind of push
// operation dispatched against the staging transaction.
//
// - newEpoch: configures whether or not the staging transaction wrote the
// intent which the high-priority operation conflicts with at a higher
// epoch than it is staged at. If true, the staging transaction is not
// implicitly committed.
//
// - newTimestamp: configures whether or not the staging transaction wrote the
// intent which the high-priority operation conflicts with at a higher
// timestamp than it is staged at. If true, the staging transaction is not
// implicitly committed.
//
func TestTxnRecoveryFromStagingWithHighPriority(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
run := func(t *testing.T, pushAbort, newEpoch, newTimestamp bool) {
stopper := stop.NewStopper()
defer stopper.Stop(ctx)
manual := hlc.NewManualClock(123)
cfg := TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond))
store := createTestStoreWithConfig(t, stopper, testStoreOpts{createSystemRanges: true}, &cfg)
// Create a transaction that will get stuck performing a parallel
// commit.
keyA, keyB := roachpb.Key("a"), roachpb.Key("b")
txn := newTransaction("txn", keyA, 1, store.Clock())
// Issue two writes, which will be considered in-flight at the time of
// the transaction's EndTxn request.
keyAVal := []byte("value")
pArgs := putArgs(keyA, keyAVal)
pArgs.Sequence = 1
h := roachpb.Header{Txn: txn}
_, pErr := kv.SendWrappedWith(ctx, store.TestSender(), h, &pArgs)
require.Nil(t, pErr, "error: %s", pErr)
// The second write may or may not be bumped.
pArgs = putArgs(keyB, []byte("value2"))
pArgs.Sequence = 2
h2 := roachpb.Header{Txn: txn.Clone()}
if newEpoch {
h2.Txn.BumpEpoch()
}
if newTimestamp {
manual.Increment(100)
h2.Txn.WriteTimestamp = store.Clock().Now()
}
_, pErr = kv.SendWrappedWith(ctx, store.TestSender(), h2, &pArgs)
require.Nil(t, pErr, "error: %s", pErr)
// Issue a parallel commit, which will put the transaction into a
// STAGING state. Include both writes as the EndTxn's in-flight writes.
et, etH := endTxnArgs(txn, true)
et.InFlightWrites = []roachpb.SequencedWrite{
{Key: keyA, Sequence: 1},
{Key: keyB, Sequence: 2},
}
etReply, pErr := kv.SendWrappedWith(ctx, store.TestSender(), etH, &et)
require.Nil(t, pErr, "error: %s", pErr)
require.Equal(t, roachpb.STAGING, etReply.Header().Txn.Status)
// Issue a conflicting, high-priority operation.
var conflictArgs roachpb.Request
if pushAbort {
pArgs = putArgs(keyB, []byte("value3"))
conflictArgs = &pArgs
} else {
gArgs := getArgs(keyB)
conflictArgs = &gArgs
}
manual.Increment(100)
conflictH := roachpb.Header{
UserPriority: roachpb.MaxUserPriority,
Timestamp: store.Clock().Now(),
}
_, pErr = kv.SendWrappedWith(ctx, store.TestSender(), conflictH, conflictArgs)
require.Nil(t, pErr, "error: %s", pErr)
// Query the transaction and verify that it has the right state.
qtArgs := queryTxnArgs(txn.TxnMeta, false /* waitForUpdate */)
qtReply, pErr := kv.SendWrapped(ctx, store.TestSender(), &qtArgs)
require.Nil(t, pErr, "error: %s", pErr)
qtTxn := qtReply.(*roachpb.QueryTxnResponse).QueriedTxn
if !newEpoch && !newTimestamp {
// The transaction was implicitly committed at its initial epoch and
// timestamp.
require.Equal(t, roachpb.COMMITTED, qtTxn.Status)
require.Equal(t, txn.Epoch, qtTxn.Epoch)
require.Equal(t, txn.WriteTimestamp, qtTxn.WriteTimestamp)
} else if newEpoch {
// The transaction is aborted if that's what the high-priority
// request wants. Otherwise, the transaction's record is bumped to
// the new epoch pulled from its intent and pushed above the
// high-priority request's timestamp.
if pushAbort {
require.Equal(t, roachpb.ABORTED, qtTxn.Status)
} else /* pushTimestamp */ {
require.Equal(t, roachpb.PENDING, qtTxn.Status)
require.Equal(t, txn.Epoch+1, qtTxn.Epoch)
require.Equal(t, conflictH.Timestamp.Next(), qtTxn.WriteTimestamp)
}
} else /* if newTimestamp */ {
// The transaction is aborted, even if the high-priority request
// only needed it to be pushed to a higher timestamp. This is
// because we don't allow a STAGING transaction record to move back
// to PENDING in the same epoch.
require.Equal(t, roachpb.ABORTED, qtTxn.Status)
}
}
testutils.RunTrueAndFalse(t, "push_abort", func(t *testing.T, pushAbort bool) {
testutils.RunTrueAndFalse(t, "new_epoch", func(t *testing.T, newEpoch bool) {
testutils.RunTrueAndFalse(t, "new_timestamp", func(t *testing.T, newTimestamp bool) {
run(t, pushAbort, newEpoch, newTimestamp)
})
})
})
}
// TestTxnClearRangeIntents tests whether a ClearRange call blindly removes
// write intents. This can cause it to remove an intent from an implicitly
// committed STAGING txn. When txn recovery kicks in, it will fail to find the
// expected intent, causing it to roll back a committed txn (including any
// values outside of the cleared range).
//
// Because the fix for this relies on separated intents, the bug will continue
// to be present until the planned migration in 21.2. Since tests currently
// enable separated intents at random, we assert the buggy behavior when these
// are disabled. See also: https://github.com/cockroachdb/cockroach/issues/46764
func TestTxnClearRangeIntents(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
stopper := stop.NewStopper()
defer stopper.Stop(ctx)
cfg := TestStoreConfig(nil)
// Immediately launch transaction recovery when pushing a STAGING txn.
cfg.TestingKnobs.EvalKnobs.RecoverIndeterminateCommitsOnFailedPushes = true
store := createTestStoreWithConfig(t, stopper, testStoreOpts{createSystemRanges: true}, &cfg)
// Set up a couple of keys to write, and a range to clear that covers
// B and its intent.
keyA, valueA := roachpb.Key("a"), []byte("value1")
keyB, valueB := roachpb.Key("b"), []byte("value2")
clearFrom, clearTo := roachpb.Key("aa"), roachpb.Key("x")
// Create a transaction that will get stuck performing a parallel commit.
txn := newTransaction("txn", keyA, 1, store.Clock())
txnHeader := roachpb.Header{Txn: txn}
// Issue two writes, which will be considered in-flight at the time of the
// transaction's EndTxn request.
put := putArgs(keyA, valueA)
put.Sequence = 1
_, pErr := kv.SendWrappedWith(ctx, store.TestSender(), txnHeader, &put)
require.Nil(t, pErr, "error: %s", pErr)
put = putArgs(keyB, valueB)
put.Sequence = 2
_, pErr = kv.SendWrappedWith(ctx, store.TestSender(), txnHeader, &put)
require.Nil(t, pErr, "error: %s", pErr)
// Issue a parallel commit, which will put the transaction into a STAGING
// state. Include both writes as the EndTxn's in-flight writes.
endTxn, endTxnHeader := endTxnArgs(txn, true)
endTxn.InFlightWrites = []roachpb.SequencedWrite{
{Key: keyA, Sequence: 1},
{Key: keyB, Sequence: 2},
}
reply, pErr := kv.SendWrappedWith(ctx, store.TestSender(), endTxnHeader, &endTxn)
require.Nil(t, pErr, pErr)
require.Equal(t, roachpb.STAGING, reply.Header().Txn.Status, "expected STAGING txn")
// Make sure intents exists for keys A and B.
queryIntent := queryIntentArgs(keyA, txn.TxnMeta, false)
reply, pErr = kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{}, &queryIntent)
require.Nil(t, pErr, "error: %s", pErr)
require.True(t, reply.(*roachpb.QueryIntentResponse).FoundIntent, "intent missing for %q", keyA)
queryIntent = queryIntentArgs(keyB, txn.TxnMeta, false)
reply, pErr = kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{}, &queryIntent)
require.Nil(t, pErr, "error: %s", pErr)
require.True(t, reply.(*roachpb.QueryIntentResponse).FoundIntent, "intent missing for %q", keyB)
// Call ClearRange covering key B and its intent.
clearRange := clearRangeArgs(clearFrom, clearTo)
_, pErr = kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{}, &clearRange)
require.Nil(t, pErr, "error: %s", pErr)
// If separated intents are enabled, all should be well.
if store.engine.IsSeparatedIntentsEnabledForTesting(ctx) {
// Reading A should succeed, but B should be gone.
get := getArgs(keyA)
reply, pErr = kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{}, &get)
require.Nil(t, pErr, "error: %s", pErr)
require.NotNil(t, reply.(*roachpb.GetResponse).Value, "expected value for A")
value, err := reply.(*roachpb.GetResponse).Value.GetBytes()
require.NoError(t, err)
require.Equal(t, value, valueA)
get = getArgs(keyB)
reply, pErr = kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{}, &get)
require.Nil(t, pErr, "error: %s", pErr)
require.Nil(t, reply.(*roachpb.GetResponse).Value, "unexpected value for B")
// Query the original transaction, which should now be committed.
queryTxn := queryTxnArgs(txn.TxnMeta, false)
reply, pErr = kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{}, &queryTxn)
require.Nil(t, pErr, "error: %s", pErr)
require.Equal(t, roachpb.COMMITTED, reply.(*roachpb.QueryTxnResponse).QueriedTxn.Status)
} else {
// If separated intents are disabled, ClearRange will have removed B's
// intent without resolving it. When we read A, txn recovery will expect
// to find B's intent, but when missing it assumes the txn did not
// complete and aborts it, rolling back all writes (including A).
get := getArgs(keyA)
reply, pErr = kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{}, &get)
require.Nil(t, pErr, "error: %s", pErr)
require.Nil(t, reply.(*roachpb.GetResponse).Value, "unexpected value for A")
get = getArgs(keyB)
reply, pErr = kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{}, &get)
require.Nil(t, pErr, "error: %s", pErr)
require.Nil(t, reply.(*roachpb.GetResponse).Value, "unexpected value for B")
// Query the original transaction, which should now be aborted.
queryTxn := queryTxnArgs(txn.TxnMeta, false)
reply, pErr = kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{}, &queryTxn)
require.Nil(t, pErr, "error: %s", pErr)
require.Equal(t, roachpb.ABORTED, reply.(*roachpb.QueryTxnResponse).QueriedTxn.Status)
}
}
|
//115. Returning a func
// //回傳的類型 他是type
//bar() func() int
//橘色為回傳類型 func()int
//因 橘色 回傳類型限定 所以 得綠色方式回傳
//而因紅色 輸出 bar() 需要再多個 ()當容器
//dc 圖解 115
package main
import "fmt"
func main() {
x := bar()
fmt.Printf("%T\n", x)
fmt.Println(bar()())
}
func bar() func() int {
return func() int {
return 451
}
}
|
package main
import (
"io/ioutil"
"log"
"os"
"github.com/alecthomas/chroma/lexers"
"github.com/ktnyt/carrera"
carreraBuffer "github.com/ktnyt/carrera/buffer"
termbox "github.com/ktnyt/termbox-go"
)
func openFile(filename string) carrera.BufferService {
file, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
p, err := ioutil.ReadAll(file)
if err != nil {
log.Fatal(err)
}
file.Close()
lexer := lexers.Match(filename)
syntax := carrera.NewSyntax(lexer)
buffer := carrera.Buffer([]rune(string(p)))
service := carreraBuffer.NewBufferService(buffer, syntax.Highlight)
return service
}
func run(filename string) error {
var err error
run := true
service := openFile(filename)
termbox.Init()
termbox.SetOutputMode(termbox.OutputXterm)
presenter := carreraBuffer.NewBufferPresenter(service)
width, height := termbox.Size()
view := carreraBuffer.NewBufferView(presenter, width, height)
for run {
view.Draw()
termbox.Flush()
switch e := termbox.PollEvent(); e.Type {
case termbox.EventKey:
switch e.Key {
case termbox.KeyEsc:
run = false
default:
switch e.Ch {
case 'q':
run = false
}
}
case termbox.EventError:
run = true
}
}
termbox.Close()
return err
}
func main() {
if err := run(os.Args[1]); err != nil {
log.Fatal(err)
}
}
|
package main
import (
"fmt"
"os"
"runtime"
)
func operator() {
assert((true && true) == true, "Wrong logic")
assert((true && false) == false, "Wrong logic")
assert((false && true) == false, "Wrong logic")
assert((false && false) == false, "Wrong logic")
assert((true || true) == true, "Wrong logic")
assert((true || false) == true, "Wrong logic")
assert((false || true) == true, "Wrong logic")
assert((false || false) == false, "Wrong logic")
assert((!true) == false, "Wrong logic")
assert((!false) == true, "Wrong logic")
}
//檢查函式
func assert(cond bool, msg string) {
//Caller 取得呼叫者資訊
//f 檔案名稱
//l 所在行數
_, f, l, _ := runtime.Caller(1)
if !cond {
//Stderr標準錯誤
fmt.Fprintf(os.Stderr, "Failed on (%s:%d):%s", f, l, msg)
os.Exit(1)
}
}
|
package cmd
import (
"github.com/spf13/cobra"
)
var azureCmd = &cobra.Command{
Use: "azure",
Short: "Azure subcommands",
}
func init() {
createCmd.AddCommand(azureCmd)
}
|
package Binary_Tree_Right_Side_View
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestRightSideView(t *testing.T) {
ast := assert.New(t)
ast.Equal([]int{1, 3, 4, 7}, rightSideView(&TreeNode{
Val: 1,
Left: &TreeNode{
Val: 2,
Right: &TreeNode{
Val: 5,
Left: &TreeNode{
Val: 7,
},
},
},
Right: &TreeNode{
Val: 3,
Right: &TreeNode{
Val: 4,
},
},
}))
}
|
package bittrex
import (
"crypto/hmac"
"crypto/sha512"
"encoding/hex"
"encoding/json"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strconv"
"time"
"github.com/pkg/errors"
)
type restCall struct {
req *http.Request
res *restResponse
resBody []byte
httpClient *http.Client
httpTransport *http.Transport
apiKey string
apiSecret string
// The host address of the Bittrex service. We split this out to simplify
// testing.
hostAddr string
params map[string]string
// A function that returns a value representing "now". We can replace this
// value to simplify testing.
nower nower
}
func (rc *restCall) addAuthData() {
secret := []byte(rc.apiSecret)
nonce := strconv.FormatInt(rc.nower.Now().Unix(), 10)
// Extract the existing query.
q := rc.req.URL.Query()
// Add the API key and nonce.
q.Add("apiKey", rc.apiKey)
q.Add("nonce", nonce)
// Save the new query to the request object.
rc.req.URL.RawQuery = q.Encode()
// Calculate the signature.
mac := hmac.New(sha512.New, secret)
// We ignore the error because the documentation states errors will never be
// returned: https://golang.org/pkg/hash/#Hash
_, _ = mac.Write([]byte(rc.req.URL.String())) // nolint: gas, gosec
sign := mac.Sum(nil)
signHex := hex.EncodeToString(sign)
// Add the header to the request.
rc.req.Header.Add("apisign", signHex)
}
func (rc *restCall) doV1_1(api string, requiresAuth bool) error {
uri := rc.hostAddr + "/api/v1.1/" + api
err := rc.doGenericCall(uri, requiresAuth)
if err != nil {
return errors.Wrap(err, "generic call failed")
}
return nil
}
func (rc *restCall) doV2_0(api string) error {
uri := rc.hostAddr + "/Api/v2.0/" + api
err := rc.doGenericCall(uri, false)
if err != nil {
return errors.Wrap(err, "generic call failed")
}
return nil
}
type restResponse struct {
Success bool `json:"success"`
Message string `json:"message"`
Result *json.RawMessage `json:"result"`
}
// nower is the interface that wraps Now.
//
// Now gets a time that represents the current time.
type nower interface {
Now() time.Time
}
// defaultNower is the default value of the Connector.Nower interface, which is
// used to obtain a time that represents "now".
type defaultNower struct{}
// Now returns the result of time.Now().
func (dn defaultNower) Now() time.Time {
return time.Now()
}
// TODO(carter): add some type of retry capability here
func (rc *restCall) doGenericCall(uri string, requiresAuth bool) error {
// Create a client.
var c *http.Client
if rc.httpClient == nil {
c = http.DefaultClient
} else {
c = rc.httpClient
}
// Adjust the transport.
if rc.httpTransport == nil {
c.Transport = http.DefaultTransport
transport := c.Transport.(*http.Transport)
transport.TLSHandshakeTimeout = 20 * time.Second
c.Transport = transport
} else {
c.Transport = rc.httpTransport
}
// Create a new request.
var err error
rc.req, err = http.NewRequest("GET", uri, nil)
if err != nil {
return errors.Wrap(err, "get request creation failed")
}
// Add the URL parameters to the request.
rc.req.URL = addParamsToURL(rc.params, rc.req.URL)
// Prepare authorization parameters and headers.
if requiresAuth {
rc.addAuthData()
}
// Execute the API call.
var respRaw *http.Response
respRaw, err = c.Do(rc.req)
if err != nil {
return errors.Wrap(err, "failed to execute the api call")
}
// Get the response body.
rc.resBody, err = getBody(respRaw.Body)
if err != nil {
return errors.Wrap(err, "failed to get body")
}
// Convert the results.
rc.res, err = getRestResponse(rc.resBody, respRaw.Status)
if err != nil {
return errors.Wrap(err, "failed to get the rest response")
}
return nil
}
func newRestCall(httpClient *http.Client, httpTransport *http.Transport, apiKey, apiSecret, hostAddr string) *restCall {
return &restCall{
httpClient: httpClient,
httpTransport: httpTransport,
apiKey: apiKey,
apiSecret: apiSecret,
hostAddr: hostAddr,
nower: defaultNower{},
}
}
func getBody(readCloser io.ReadCloser) (body []byte, err error) {
defer func() {
derr := readCloser.Close()
if derr != nil {
if err != nil {
err = errors.Wrapf(err, "error in defer")
err = errors.Wrapf(err, derr.Error())
} else {
err = errors.Wrap(derr, "error in defer")
}
}
}()
body, err = ioutil.ReadAll(readCloser)
if err != nil {
return []byte{}, errors.Wrap(err, "read failed")
}
return body, nil
}
func getRestResponse(body []byte, status string) (*restResponse, error) {
// Unmarshal the response body.
rr := &restResponse{}
err := json.Unmarshal(body, &rr)
if err != nil {
debugMessage("status: %v, body: %v", status, body)
return nil, errors.Wrap(err, "json unmarshal failed")
}
// Verify the response was marked as a success.
if !rr.Success {
return nil, errors.Errorf("success is non-existent or false: %s", string(body))
}
// Make sure a response was set.
if rr.Result == nil {
return nil, errors.Errorf("no result exists: %s", string(body))
}
return rr, nil
}
func addParamsToURL(params map[string]string, u *url.URL) *url.URL {
newURL := *u
// Extract the original query.
q := newURL.Query()
// Add each of the parameters.
for k, v := range params {
q.Add(k, v)
}
// Save the new query.
newURL.RawQuery = q.Encode()
return &newURL
}
func debugEnabled() bool {
v := os.Getenv("DEBUG")
return v != ""
}
func debugMessage(msg string, v ...interface{}) {
if debugEnabled() {
log.Printf(msg, v...)
}
}
|
package routes
import (
"net/http"
"github.com/jesperkha/SuperSurveys/app/data"
)
func setEncodedCookie(res http.ResponseWriter, name string, key string, value interface{}) {
if encoded, err := CookieHandler.Encode(key, value); err == nil {
cookie := &http.Cookie{
Name: name,
Value: encoded,
Path: "/",
}
http.SetCookie(res, cookie)
}
}
func getUserIdFromCookie(req *http.Request) (id string) {
var decoded string
id = ""
if cookie, err := req.Cookie("token"); err == nil {
if err = CookieHandler.Decode("userId", cookie.Value, &decoded); err == nil {
id = decoded
}
}
return id
}
func Authorize(req *http.Request) (user data.User, authorized bool) {
if user, err := data.GetUserById(getUserIdFromCookie(req)); err == nil {
return user, true
}
return user, false
}
func LoginHandler(res http.ResponseWriter, req *http.Request) (errorCode int) {
if req.URL.Path != "/login" {
return 404
}
if req.Method == "GET" {
http.ServeFile(res, req, "./client/auth/login.html")
return 0
}
if req.Method == "POST" {
username := req.FormValue("username")
password := req.FormValue("password")
if user, err := data.GetUser(username, password); err == nil {
setEncodedCookie(res, "token", "userId", user.UserId)
http.Redirect(res, req, "/users/dashboard", http.StatusFound)
}
return 0
}
return 400
}
func LogoutHandler(res http.ResponseWriter, req *http.Request) (errorCode int) {
// remove cookie
return 0
}
func SignupHandler(res http.ResponseWriter, req *http.Request) (errorCode int) {
// create user account
return 0
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package main
import (
"bytes"
"htrace/common"
"testing"
)
func TestSpansToDot(t *testing.T) {
TEST_SPANS := common.SpanSlice{
&common.Span{
Id: common.TestId("814c8ee0e7984be3a8af00ac64adccb6"),
SpanData: common.SpanData{
Begin: 1424813349020,
End: 1424813349134,
Description: "newDFSInputStream",
TracerId: "FsShell",
Parents: []common.SpanId{},
Info: common.TraceInfoMap{
"path": "/",
},
},
},
&common.Span{
Id: common.TestId("cf2d5de696454548bc055d1e6024054c"),
SpanData: common.SpanData{
Begin: 1424813349025,
End: 1424813349133,
Description: "getBlockLocations",
TracerId: "FsShell",
Parents: []common.SpanId{common.TestId("814c8ee0e7984be3a8af00ac64adccb6")},
},
},
&common.Span{
Id: common.TestId("37623806f9c64483b834b8ea5d6b4827"),
SpanData: common.SpanData{
Begin: 1424813349027,
End: 1424813349073,
Description: "ClientNamenodeProtocol#getBlockLocations",
TracerId: "FsShell",
Parents: []common.SpanId{common.TestId("cf2d5de696454548bc055d1e6024054c")},
},
},
}
w := bytes.NewBuffer(make([]byte, 0, 2048))
err := spansToDot(TEST_SPANS, w)
if err != nil {
t.Fatalf("spansToDot failed: error %s\n", err.Error())
}
EXPECTED_STR := `digraph spans {
"37623806f9c64483b834b8ea5d6b4827" [label="ClientNamenodeProtocol#getBlockLocations"];
"814c8ee0e7984be3a8af00ac64adccb6" [label="newDFSInputStream"];
"cf2d5de696454548bc055d1e6024054c" [label="getBlockLocations"];
"814c8ee0e7984be3a8af00ac64adccb6" -> "cf2d5de696454548bc055d1e6024054c";
"cf2d5de696454548bc055d1e6024054c" -> "37623806f9c64483b834b8ea5d6b4827";
}
`
if w.String() != EXPECTED_STR {
t.Fatalf("Expected to get:\n%s\nGot:\n%s\n", EXPECTED_STR, w.String())
}
}
|
package e2e
import (
"context"
framework "github.com/operator-framework/operator-sdk/pkg/test"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"kubevirt-image-service/pkg/apis"
"kubevirt-image-service/pkg/apis/hypercloud/v1alpha1"
"kubevirt-image-service/pkg/util"
"testing"
)
const (
storageClassName = "rook-ceph-block"
snapshotClassName = "csi-rbdplugin-snapclass"
)
func virtualMachineImageTest(t *testing.T, ctx *framework.Context) error {
ns, err := ctx.GetWatchNamespace()
if err != nil {
return err
}
sc := &v1alpha1.VirtualMachineImage{}
err = framework.AddToFrameworkScheme(apis.AddToScheme, sc)
if err != nil {
return err
}
if err := testVmi(t, ns); err != nil {
return err
}
if err := testVmiWithInvalidSnapshotClassName(t, ns); err != nil {
return err
}
if err := testVmiWithPvcRwx(t, ns); err != nil {
return err
}
return nil
}
func testVmi(t *testing.T, namespace string) error {
vmiName := "availvmi"
vmi := newVmi(namespace, vmiName)
if err := framework.Global.Client.Create(context.Background(), vmi, &cleanupOptions); err != nil {
return err
}
return waitForVmi(t, namespace, vmiName)
}
func testVmiWithInvalidSnapshotClassName(t *testing.T, namespace string) error {
vmiName := "errorvmi"
vmi := newVmi(namespace, vmiName)
vmi.Spec.SnapshotClassName = "wrongSnapshotClassname"
if err := framework.Global.Client.Create(context.Background(), vmi, &cleanupOptions); err != nil {
return err
}
return waitForVmiStateError(t, namespace, vmiName)
}
func testVmiWithPvcRwx(t *testing.T, namespace string) error {
vmiName := "rwxvmi"
vmi := newVmi(namespace, vmiName)
vmi.Spec.PVC.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}
if err := framework.Global.Client.Create(context.Background(), vmi, &cleanupOptions); err != nil {
return err
}
return waitForVmi(t, namespace, vmiName)
}
func waitForVmi(t *testing.T, namespace, name string) error {
return wait.Poll(retryInterval, timeout, func() (done bool, err error) {
t.Logf("Waiting for creating vmi: %s in Namespace: %s \n", name, namespace)
vmi := &v1alpha1.VirtualMachineImage{}
if err := framework.Global.Client.Get(context.Background(), types.NamespacedName{Namespace: namespace, Name: name}, vmi); err != nil {
if errors.IsNotFound(err) {
return false, nil
}
return false, err
}
found, cond := util.GetConditionByType(vmi.Status.Conditions, v1alpha1.ConditionReadyToUse)
if found {
// TODO: check error condition
return cond.Status == corev1.ConditionTrue, nil
}
return false, nil
})
}
func waitForVmiStateError(t *testing.T, namespace, name string) error {
return wait.Poll(retryInterval, timeout, func() (done bool, err error) {
t.Logf("Waiting for creating vmi: %s in Namespace: %s \n", name, namespace)
vmi := &v1alpha1.VirtualMachineImage{}
if err := framework.Global.Client.Get(context.Background(), types.NamespacedName{Namespace: namespace, Name: name}, vmi); err != nil {
if errors.IsNotFound(err) {
return false, nil
}
return false, err
}
return vmi.Status.State == v1alpha1.VirtualMachineImageStateError, nil
})
}
func newVmi(ns, name string) *v1alpha1.VirtualMachineImage {
scName := storageClassName
volumeMode := corev1.PersistentVolumeBlock
return &v1alpha1.VirtualMachineImage{
ObjectMeta: v1.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: v1alpha1.VirtualMachineImageSpec{
Source: v1alpha1.VirtualMachineImageSource{
HTTP: "https://download.cirros-cloud.net/contrib/0.3.0/cirros-0.3.0-i386-disk.img",
},
PVC: corev1.PersistentVolumeClaimSpec{
VolumeMode: &volumeMode,
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
Resources: corev1.ResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceStorage: resource.MustParse("3Gi"),
},
},
StorageClassName: &scName,
},
SnapshotClassName: snapshotClassName,
},
}
}
|
package wasi
import (
"os"
"sync/atomic"
)
const unknownDevice = (1 << 64) - 1
var fileCookie uint64
func fileStatUnknown(info os.FileInfo) FileStat {
modTime := info.ModTime()
return FileStat{
Dev: unknownDevice,
Inode: atomic.AddUint64(&fileCookie, 1),
Mode: info.Mode(),
LinkCount: 1,
Size: uint64(info.Size()),
AccessTime: modTime,
ModTime: modTime,
ChangeTime: modTime,
}
}
|
package identity
import "context"
type contextKey string
func (k contextKey) String() string {
return "context key: " + string(k)
}
var (
userKey = contextKey("user")
)
// WithUser adds the user to the request context.
func WithUser(ctx context.Context, user string) context.Context {
return context.WithValue(ctx, userKey, user)
}
// FromContext retrieves the user from the context.
func FromContext(ctx context.Context) string {
if val, ok := ctx.Value(userKey).(string); ok {
return val
}
return ""
}
|
package socket
import (
"errors"
protoutil "github.com/gogo/protobuf/proto"
"net"
"xj_web_server/httpserver/wss/proto"
"xj_web_server/util"
"strings"
"sync"
"time"
)
const (
// 写超时时间
writeWait = 10 * time.Second
msgCont = 1024
)
type Connection struct {
tcpConn net.Conn
inChan chan []byte
outChan chan []byte
closeChan chan struct{}
mutex sync.Mutex
isClosed bool
}
func InitConn(address string, connFunc func(conn *Connection)) error {
tcpConn, err := net.Listen("tcp", address)
if err == nil {
for {
tcpConn, err := tcpConn.Accept()
if err != nil {
if strings.Contains(err.Error(), "use of closed network connection") {
break
}
util.Logger.Errorf("tcp accept err:%v", err)
continue
}
var conn *Connection
conn = &Connection{
tcpConn: tcpConn,
inChan: make(chan []byte, 0),
outChan: make(chan []byte, 0),
closeChan: make(chan struct{}, 1),
}
go conn.readLoop()
go conn.writeLoop()
go connFunc(conn)
}
}
return err
}
// TODO 修改创建msg的方法
func (conn *Connection) createMsg(cmd byte, data []byte, uid int32) ([]byte, error) {
//TODO 根据uid查询相应的公私钥
//1,组成命令包
createCmd := util.CreateCmd(cmd, data, uid)
return createCmd, nil
}
func (conn *Connection) ReadMessage() (data []byte, err error) {
select {
case data = <-conn.inChan:
case <-conn.closeChan:
err = errors.New("connection is closed")
}
return
}
func (conn *Connection) WriteMessage(cmd byte, data []byte, uid int32) (err error) {
msg, err := conn.createMsg(cmd, data, uid)
if err != nil {
err = errors.New("msg create err" + err.Error())
}
select {
case conn.outChan <- msg:
case <-conn.closeChan:
err = errors.New("connection is closed")
}
return
}
// 非法返回
func (conn *Connection) WriteErr(code int32, errorMsg string) (err error) {
//心跳
msg := &proto.Hall_S_Fail{
ErrorCode: code,
ErrorMsg: errorMsg,
}
dataMsg, _ := protoutil.Marshal(msg)
return conn.WriteMessage(0x09, dataMsg, 0)
}
// 获取客户端ip
func (conn *Connection) GetIP() string {
return conn.tcpConn.RemoteAddr().String()
}
func (conn *Connection) Close() error {
time.Sleep(50 * time.Millisecond)
//关闭chan 只能执行一次
conn.mutex.Lock()
if !conn.isClosed {
close(conn.closeChan)
conn.isClosed = true
}
conn.mutex.Unlock()
// 线程安全,可以重复close
err := conn.tcpConn.Close()
return err
}
// 读 协程
func (conn *Connection) readLoop() {
for {
var data = make([]byte, msgCont)
if n, err := conn.tcpConn.Read(data); err != nil {
goto ERROR
} else {
// 阻塞这里,等待inChan 空闲
select {
case conn.inChan <- data[:n]:
}
}
}
ERROR:
conn.Close()
}
//写 协程
func (conn *Connection) writeLoop() {
for {
select {
case data := <-conn.outChan:
//写超时时间
if err := conn.tcpConn.SetWriteDeadline(time.Now().Add(writeWait)); err != nil {
util.Logger.Errorf("write err:%v", err)
goto ERROR
}
if _, err := conn.tcpConn.Write(data); err != nil {
util.Logger.Errorf("write err:%v", err)
goto ERROR
}
}
}
ERROR:
conn.Close()
}
|
package email
import (
"bytes"
"fmt"
"html/template"
"log"
"mta_app/config"
"net/smtp"
)
type emailUser struct {
Username string
Password string
EmailServer string
Port int
SendTo []string
}
func NewEmailUser(opts config.EmailUser) emailUser {
return emailUser{
Username: opts.Username,
Password: opts.Password,
EmailServer: opts.Server,
Port: opts.Port,
SendTo: opts.SendTo,
}
}
func (user emailUser) SendEmail(mtaInfo config.MTAInfo) error {
auth := smtp.PlainAuth("", user.Username, user.Password, user.EmailServer)
email, err := user.createEmail(mtaInfo)
if err != nil {
return err
}
if err := smtp.SendMail("smtp.gmail.com:587", auth, "benraskin92@gmail.com", user.SendTo, email); err != nil {
return err
}
log.Printf("sending email to %s", user.SendTo)
return nil
}
type SmtpTemplateData struct {
From string
// To string
Subject string
Body string
}
const emailTemplate = `From: {{.From}}
Subject: {{.Subject}}
{{.Body}}
{{.From}}
`
func (user emailUser) createEmail(mtaInfo config.MTAInfo) ([]byte, error) {
var doc bytes.Buffer
var email []byte
context := &SmtpTemplateData{
From: "Ben",
Subject: "Bus is 2 stops away!",
Body: fmt.Sprintf("M11 is currently at %s", mtaInfo.StopCheck),
}
t := template.New("emailTemplate")
t, err := t.Parse(emailTemplate)
if err != nil {
err = fmt.Errorf("error trying to parse mail template: %v", err)
return email, err
}
if err = t.Execute(&doc, context); err != nil {
err = fmt.Errorf("error trying to execute mail template: %v", err)
return email, err
}
email = doc.Bytes()
return email, nil
}
|
package virtualmachine
import (
"fmt"
"testing"
"../ast"
"../compiler"
"../lexer"
"../object"
"../parser"
)
// virtualMachineTestCase :
type virtualMachineTestCase struct {
input string
expected interface{}
}
// parse :
func parse(input string) *ast.Program {
l := lexer.InitializeLexer(input)
p := parser.InitializeParser(l)
return p.ParseProgram()
}
// testIntegerObject :
func testIntegerObject(expected int64, actual object.Object) error {
result, ok := actual.(*object.Integer)
if !ok {
return fmt.Errorf("object is not Integer, got=%T (%+v)", actual, actual)
}
if expected != result.Value {
return fmt.Errorf("object has wrong value, got=%d, want=%d", result.Value, expected)
}
return nil
}
// testBooleanObject :
func testBooleanObject(expected bool, actual object.Object) error {
result, ok := actual.(*object.Boolean)
if !ok {
return fmt.Errorf("object is not Boolean, got=%T (%+v)", actual, actual)
}
if result.Value != expected {
return fmt.Errorf("object has wrong value, got=%t, want=%t", result.Value, expected)
}
return nil
}
// testStringObject :
func testStringObject(expected string, actual object.Object) error {
result, ok := actual.(*object.String)
if !ok {
return fmt.Errorf("object is not String, got=%T (%+v)", actual, actual)
}
if result.Value != expected {
return fmt.Errorf("object has wrong value, got =%q, want+%q", result.Value, expected)
}
return nil
}
// testExpectedObject :
func testExpectedObject(t *testing.T, expected interface{}, actual object.Object) {
t.Helper()
switch expected := expected.(type) {
case int:
err := testIntegerObject(int64(expected), actual)
if nil != err {
t.Errorf("testIntegerObject failed: %s", err)
}
case bool:
err := testBooleanObject(bool(expected), actual)
if nil != err {
t.Errorf("testBooleanObject failed: %s", err)
}
case *object.Null:
if actual != NULL {
t.Errorf("object is not NULL: %T (%+v)", actual, actual)
}
case string:
err := testStringObject(expected, actual)
if nil != err {
t.Errorf("testStringObject failed: %s", err)
}
case []int:
array, ok := actual.(*object.Array)
if !ok {
t.Errorf("object not Array: %T (%+v)", actual, actual)
return
}
if len(array.Elements) != len(expected) {
t.Errorf("wrong number of elements, want=%d, got=%d", len(expected), len(array.Elements))
return
}
for index, expectedElement := range expected {
err := testIntegerObject(int64(expectedElement), array.Elements[index])
if nil != err {
t.Errorf("testIntegerObject failed: %s", err)
}
}
case *object.Error:
errorObject, ok := actual.(*object.Error)
if !ok {
t.Errorf("object is not Error: %T (%+v)", actual, actual)
return
}
if errorObject.Message != expected.Message {
t.Errorf("wrong error message, expected=%q, got=%q", expected.Message, errorObject.Message)
}
default:
t.Errorf("object not defined: %T (%+v)", actual, actual)
}
}
// runVirtualMachineTests :
func runVirtualMachineTests(t *testing.T, tests []virtualMachineTestCase) {
t.Helper()
for _, tt := range tests {
program := parse(tt.input)
comp := compiler.InitializeCompiler()
err := comp.Compile(program)
if nil != err {
t.Fatalf("compiler error: %s", err)
}
//
// Debug propurses
//
// for index, constant := range comp.Bytecode().Constants {
// fmt.Printf("CONSTANT %d %p (%T):\n", index, constant, constant)
// switch constantType := constant.(type) {
// case *object.CompiledFunction:
// fmt.Printf(" Instructions:\n%s", constantType.Instructions)
// case *object.Integer:
// fmt.Printf(" Value: %d\n", constantType.Value)
// }
// fmt.Printf("\n")
// }
virtualMachine := InitializeVirtualMachine(comp.Bytecode())
err = virtualMachine.Run()
if nil != err {
t.Fatalf("Virtual Machine error: %s", err)
}
stackElement := virtualMachine.LastPoppedStackElement()
testExpectedObject(t, tt.expected, stackElement)
}
}
// TestIntegerArithmetic :
func TestIntegerArithmetic(t *testing.T) {
tests := []virtualMachineTestCase{
{
"1",
1,
},
{
"2",
2,
},
{
"1 + 2",
3,
},
{
"-5",
-5,
},
{
"-10",
-10,
},
{
"-50 + 100 + -50",
0,
},
{
"(5 + 10 * 2 + 15 / 3) * 2 + -10",
50,
},
}
runVirtualMachineTests(t, tests)
}
// TestBooleanExpressions :
func TestBooleanExpressions(t *testing.T) {
tests := []virtualMachineTestCase{
{
"TRUE",
true,
},
{
"FALSE",
false,
},
{
"1 < 2",
true,
},
{
"1 > 2",
false,
},
{
"1 < 1",
false,
},
{
"1 > 1",
false,
},
{
"1 == 1",
true,
},
{
"1 != 1",
false,
},
{
"1 == 2",
false,
},
{
"1 != 2",
true,
},
{
"TRUE == TRUE",
true,
},
{
"FALSE == FALSE",
true,
},
{
"TRUE == FALSE",
false,
},
{
"TRUE != FALSE",
true,
},
{
"FALSE != TRUE",
true,
},
{
"(1 < 2) == TRUE",
true,
},
{
"(1 < 2) == FALSE",
false,
},
{
"(1 > 2) == TRUE",
false,
},
{
"(1 > 2) == FALSE",
true,
},
{
"!TRUE",
false,
},
{
"!FALSE",
true,
},
{
"!5",
false,
},
{
"!!TRUE",
true,
},
{
"!!FALSE",
false,
},
{
"!!5",
true,
},
{
"!(if (FALSE) { 5 })",
true,
},
}
runVirtualMachineTests(t, tests)
}
// TestConditionals :
func TestConditionals(t *testing.T) {
tests := []virtualMachineTestCase{
{
"if (TRUE) { 10 }",
10,
},
{
"if (TRUE) { 10 } else { 20 }",
10,
},
{
"if (FALSE) { 10 } else { 20 }",
20,
},
{
"if (1) { 10 }",
10,
},
{
"if (1 < 2) { 10 }",
10,
},
{
"if (1 < 2) { 10 } else { 20 }",
10,
},
{
"if (1 > 2) { 10 } else { 20 }",
20,
},
{
"if (1 > 2) { 10 }",
NULL,
},
{
"if (FALSE) { 10 }",
NULL,
},
{
"if ((if (FALSE) { 10 })) { 10 } else { 20 }",
20,
},
}
runVirtualMachineTests(t, tests)
}
// TestGlobalLetStatements :
func TestGlobalLetStatements(t *testing.T) {
tests := []virtualMachineTestCase{
{
"let one <- 1; one",
1,
},
{
"let one <- 1; let two <- 2; one + two",
3,
},
{
"let one <- 1; let two <- one + one; one + two",
3,
},
}
runVirtualMachineTests(t, tests)
}
// TestStringExpressions :
func TestStringExpressions(t *testing.T) {
tests := []virtualMachineTestCase{
{
`"foo"`,
"foo",
},
{
`"foo" + " bar"`,
"foo bar",
},
{
`"foo" + " bar" + " baz"`,
"foo bar baz",
},
}
runVirtualMachineTests(t, tests)
}
// TestArrayLiterals :
func TestArrayLiterals(t *testing.T) {
tests := []virtualMachineTestCase{
{
"[]",
[]int{},
},
{
"[1, 2, 3]",
[]int{
1,
2,
3,
},
},
{
"[1 + 2, 3 * 4, 5 + 6]",
[]int{
3,
12,
11,
},
},
}
runVirtualMachineTests(t, tests)
}
// TestIndexExpressions :
func TestIndexExpressions(t *testing.T) {
tests := []virtualMachineTestCase{
{
"[1, 2, 3][1]",
2,
},
{
"[1, 2, 3][0 + 2]",
3,
},
{
"[[1, 1, 1]][0][0]",
1,
},
{
"[][0]",
NULL,
},
{
"[1, 2, 3][99]",
NULL,
},
{
"[1][-1]",
NULL,
},
}
runVirtualMachineTests(t, tests)
}
// TestCallingFunctionsWithoutParameters :
func TestCallingFunctionsWithoutParameters(t *testing.T) {
tests := []virtualMachineTestCase{
{
input: `
let fivePlusTen <- function() { 5 + 10 }
fivePlusTen()
`,
expected: 15,
},
{
input: `
let first <- function() { 1 }
let second <- function() { 2 }
first() + second()
`,
expected: 3,
},
{
input: `
let a <- function() { 1 }
let b <- function() { a() + 1 }
let c <- function() { b() + 1 }
c()
`,
expected: 3,
},
}
runVirtualMachineTests(t, tests)
}
// TestFunctionsWithReturnStatement :
func TestFunctionsWithReturnStatement(t *testing.T) {
tests := []virtualMachineTestCase{
{
input: `
let earlyExit <- function() { return 99; 100; }
earlyExit()
`,
expected: 99,
},
{
input: `
let earlyExit <- function() { return 99; return 100; }
earlyExit()
`,
expected: 99,
},
}
runVirtualMachineTests(t, tests)
}
// TestFunctionsWithoutReturnValue :
func TestFunctionsWithoutReturnValue(t *testing.T) {
tests := []virtualMachineTestCase{
{
input: `
let noReturn <- function() {}
noReturn()
`,
expected: NULL,
},
{
input: `
let noReturn <- function() {}
let noReturnAgain <- function() { noReturn() }
noReturn()
noReturnAgain()
`,
expected: NULL,
},
}
runVirtualMachineTests(t, tests)
}
// TestFirstClassFunctions :
func TestFirstClassFunctions(t *testing.T) {
tests := []virtualMachineTestCase{
{
input: `
let returnOne <- function() { 1 }
let returnOneReturner <- function() { returnOne }
returnOneReturner()()
`,
expected: 1,
},
}
runVirtualMachineTests(t, tests)
}
// TestCallingFunctionsWithBindings :
func TestCallingFunctionsWithBindings(t *testing.T) {
tests := []virtualMachineTestCase{
{
input: `
let one <- function() { let one <- 1; one }
one()
`,
expected: 1,
},
{
input: `
let oneAndTwo <- function() { let one <- 1; let two <- 2; one + two }
oneAndTwo()
`,
expected: 3,
},
{
input: `
let oneAndTwo <- function() { let one <- 1; let two <- 2; one + two }
let threeAndFour <- function() { let three <- 3; let four <- 4; three + four }
oneAndTwo() + threeAndFour()
`,
expected: 10,
},
{
input: `
let firstFoo <- function() { let foo <- 50; foo }
let secondFoo <- function() { let foo <- 100; foo }
firstFoo() + secondFoo()
`,
expected: 150,
},
{
input: `
let firstFoo <- function() { let foo <- 50; foo }
let secondFoo <- function() { let foo <- 100; foo }
firstFoo() + secondFoo()
`,
expected: 150,
},
}
runVirtualMachineTests(t, tests)
}
// TestFirsClassFunctions :
func TestFirsClassFunctions(t *testing.T) {
tests := []virtualMachineTestCase{
{
input: `
let returnOneReturns <- function() {
let returnOne <- function() {
1
}
returnOne
}
returnOneReturns()()
`,
expected: 1,
},
}
runVirtualMachineTests(t, tests)
}
// TestCallingFunctionsWithParametersAndBindings :
func TestCallingFunctionsWithParametersAndBindings(t *testing.T) {
tests := []virtualMachineTestCase{
{
input: `
let identity <- function(a) { a }
identity(4)
`,
expected: 4,
},
{
input: `
let sum <- function(a, b) { a + b }
sum(1, 2)
`,
expected: 3,
},
{
input: `
let sum <- function(a, b) {
let c <- a + b;
c;
}
sum(1, 2)
`,
expected: 3,
},
{
input: `
let sum <- function(a, b) {
let c <- a + b;
c;
}
sum(1, 2) + sum(3, 4);
`,
expected: 10,
},
{
input: `
let sum <- function(a, b) {
let c <- a + b;
c;
}
let outer <- function() {
sum(1, 2) + sum(3, 4)
}
outer();
`,
expected: 10,
},
{
input: `
let globalNumber <- 10;
let sum <- function(a, b) {
let c <- a + b;
c + globalNumber;
}
let outer <- function() {
sum(1, 2) + sum(3, 4) + globalNumber
}
outer() + globalNumber;
`,
expected: 50,
},
}
runVirtualMachineTests(t, tests)
}
// TestCallingFunctionsWithWrongParameters :
func TestCallingFunctionsWithWrongParameters(t *testing.T) {
tests := []virtualMachineTestCase{
{
input: `function() { 1 }(1)`,
expected: `wrong number of parameters: want=0, got=1`,
},
{
input: `function(a) { a }()`,
expected: `wrong number of parameters: want=1, got=0`,
},
{
input: `function(a, b) { a + b }(1)`,
expected: `wrong number of parameters: want=2, got=1`,
},
}
for _, tt := range tests {
program := parse(tt.input)
comp := compiler.InitializeCompiler()
err := comp.Compile(program)
if nil != err {
t.Fatalf("compiler error: %s", err)
}
vm := InitializeVirtualMachine(comp.Bytecode())
err = vm.Run()
if nil == err {
t.Fatalf("expected Virtual Machine error but resulted in none.")
}
if err.Error() != tt.expected {
t.Fatalf("wrong Virtual Machine error: want=%q, got=%q", tt.expected, err)
}
}
}
// TestBuiltinFunctions :
func TestBuiltinFunctions(t *testing.T) {
tests := []virtualMachineTestCase{
{
`len("")`,
0,
},
{
`len("four")`,
4,
},
{
`len("hello, world")`,
12,
},
{
`len(1)`,
&object.Error{
Message: "parameters to `len` not supported, got=INTEGER",
},
},
{
`len("one", "two")`,
&object.Error{
Message: "wrong number of parameters, got=2, want=1",
},
},
{
`len([])`,
0,
},
{
`len([1, 2, 3])`,
3,
},
{
`puts("hello", "world!")`,
NULL,
},
{
`head([1, 2, 3])`,
1,
},
{
`head([])`,
NULL,
},
{
`head(1)`,
&object.Error{
Message: "parameter to `head` must be ARRAY, got INTEGER",
},
},
{
`last([1, 2, 3])`,
3,
},
{
`last([])`,
NULL,
},
{
`last(1)`,
&object.Error{
Message: "parameter to `last` must be ARRAY, got INTEGER",
},
},
{
`tail([1, 2, 3])`,
[]int{
2,
3,
},
},
{
`tail([])`,
NULL,
},
{
`tail(1)`,
&object.Error{
Message: "parameter to `tail` must be ARRAY, got INTEGER",
},
},
{
`push([1, 2], 3)`,
[]int{
1,
2,
3,
},
},
{
`push([], 1)`,
[]int{
1,
},
},
{
`push(1, 1)`,
&object.Error{
Message: "parameter to `push` must be ARRAY, got INTEGER",
},
},
}
runVirtualMachineTests(t, tests)
}
// TestClosures :
func TestClosures(t *testing.T) {
tests := []virtualMachineTestCase{
{
input: `
let newClosure <- function(a) {
function() {
a;
};
};
let closure <- newClosure(99);
closure();
`,
expected: 99,
},
{
input: `
let newAdder <- function(a, b) {
function(c) {
a + b + c;
};
};
let adder <- newAdder(1, 2);
adder(3);
`,
expected: 6,
},
{
input: `
let newAdder <- function(a, b) {
let c <- a + b
function(d) {
c + d;
};
};
let adder <- newAdder(1, 2);
adder(3);
`,
expected: 6,
},
{
input: `
let newAdderOuter <- function(a, b) {
let c <- a + b;
function(d) {
let e <- c + d;
function(f) {
e + f;
};
};
};
let newAdderInner <- newAdderOuter(1, 2);
let adder <- newAdderInner(3);
adder(4);
`,
expected: 10,
},
{
input: `
let a <- 1;
let newAdderOuter <- function(b) {
function(c) {
function(d) {
a+ b + c + d;
};
};
};
let newAdderInner <- newAdderOuter(2);
let adder <- newAdderInner(3);
adder(4);
`,
expected: 10,
},
{
input: `
let newClosure <- function(a, b) {
let one <- function() {
a;
}
let two <- function() {
b;
}
function() {
one() + two();
}
}
let closure <- newClosure(1, 9)
closure()
`,
expected: 10,
},
}
runVirtualMachineTests(t, tests)
}
// TestRecursiveClosures :
func TestRecursiveClosures(t *testing.T) {
tests := []virtualMachineTestCase{
{
input: `
let countDown <- function(x) {
if (0 == x) {
return 0;
}
countDown(x - 1);
};
countDown(1);
`,
expected: 0,
},
{
input: `
let countDown <- function(x) {
if (0 == x) {
return 0;
}
countDown(x - 1);
};
let wrapper <- function() {
countDown(1);
}
wrapper();
`,
expected: 0,
},
{
input: `
let wrapper <- function() {
let countDown <- function(x) {
if (0 == x) {
return 0;
}
countDown(x - 1);
};
countDown(1);
}
wrapper();
`,
expected: 0,
},
}
runVirtualMachineTests(t, tests)
}
// TestRecursiveFibonacci :
func TestRecursiveFibonacci(t *testing.T) {
tests := []virtualMachineTestCase{
{
input: `
let fibonacci <- function(x) {
if (0 == x) {
return 0
} else {
if (1 == x) {
return 1
} else {
fibonacci(x - 1) + fibonacci(x -2)
}
}
}
fibonacci(15)
`,
expected: 610,
},
}
runVirtualMachineTests(t, tests)
}
|
package scan
import (
"io"
"github.com/bobappleyard/readline"
// "gitlab.com/Scheming/interpreter/config" //// config not yet implemented
)
type gnuReadline struct {
// config *config.T //// config not yet implemented
line string
next int
}
func NewConsoleReader( /* config *config.T */ ) io.ByteReader {
return &gnuReadline{ /*////config,*/ "", 0}
}
var historyLoaded = false
func (r *gnuReadline) ReadByte() (byte, error) {
var err error
for r.next >= len(r.line) {
if !historyLoaded {
readline.LoadHistory("./LiSP.history")
historyLoaded = true
}
//// prompt := r.config.Prompt
//// if prompt == "" {
//// prompt = "scm> "
//// }
prompt := "scm> "
r.line, err = readline.String(prompt)
if err != nil {
return 0, err
}
// fmt.Printf("Got %q\n", r.line)
readline.AddHistory(r.line)
r.next = 0
r.line += "\n"
}
var b byte = r.line[r.next]
r.next++
// fmt.Printf("Returning %c (%#c)\n", b, b)
return b, nil
}
// func splitAtRuneBoundary(b []byte, s string) (byteCount int, remaining string) {
// if len(s) <= len(b) {
// copy(b, s)
// return len(s), ""
// }
// i := len(b)
// for i > 0 && i >= len(b)-utf8.UTFMax && !utf8.RuneStart(s[i]) {
// i--
// }
// copy(b, s[:i])
// return i, s[i:]
// }
|
package logging
import (
"fmt"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"os"
"strconv"
"time"
)
func NewLogger() *zap.Logger {
year, month, day := time.Now().Date()
_ = os.Mkdir("logs", 0755)
filename := "/logs/" + strconv.Itoa(year) + "-" + strconv.Itoa(int(month)) + "-" + strconv.Itoa(day) + ".log"
_, _ = os.Create(filename)
config := zap.NewDevelopmentConfig()
config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
config.OutputPaths = []string{
"stdout",
"." + filename,
}
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
logger, err := config.Build(zap.AddCaller())
if err != nil {
fmt.Println("error creating zap logging - returning NewNop: ", err)
return zap.NewNop()
}
return logger
}
|
package noolite
import (
"errors"
"reflect"
"testing"
)
type FakeUart struct {
awaitReq []byte
successRead bool
needFail bool
}
func (fu FakeUart) Read(d []byte) (count int, err error) {
if fu.successRead {
d[0] = 173
d[16] = 174
if !fu.needFail {
d[15] = 173
}
return 17, nil
}
return 0, errors.New("need error")
}
func (fu FakeUart) Write(d []byte) (count int, err error) {
count = len(d)
if !reflect.DeepEqual(d, fu.awaitReq) {
return 0, errors.New("not equal")
}
return count, nil
}
func (fu FakeUart) Close() error {
return nil
}
func TestWrite(t *testing.T) {
c := new(Connection)
fu := new(FakeUart)
c.uart = fu
req := new(Request)
fu.awaitReq = req.toBytes()
err := c.Write(req)
if err != nil {
t.Error("Fail on write!")
}
fu.awaitReq = []byte{}
err = c.Write(req)
if err == nil {
t.Error("Await error, but it's nil")
}
}
func TestRead(t *testing.T) {
c := new(Connection)
fu := new(FakeUart)
c.uart = fu
fu.successRead = true
fu.needFail = false
_, err := c.Read()
if err != nil {
t.Error("fail on read")
}
fu.needFail = true
_, err = c.Read()
if err == nil {
t.Error("want crc error, but nil")
}
fu.successRead = false
if err == nil {
t.Error("want read error, but nil")
}
}
|
package leetcode
func missingNumber(nums []int) int {
length := len(nums)
var lsum int
var nsum int
lsum = (length + 1) * length / 2
for i := 0; i < length; i++ {
nsum += nums[i]
}
return lsum - nsum
}
// Test site: bit operation
func missingNumber(nums []int) int {
length := len(nums)
var xor int
for i := 1; i <= length; i++ {
xor = xor ^ i
}
for i := 0; i < length; i++ {
xor = xor ^ nums[i]
}
return xor
}
|
package main
func main() {
var x int
x = 1 + 2 +
}
|
package main
import "fmt"
type matrix struct {
row int
column int
mat [][]int
}
func (m matrix) create_mat (r int, c int) matrix{
temp:= matrix{}
temp.row = r
temp.column = c
temp.mat = make([][]int, r)
for k:= 0; k<r; k++ {
temp.mat[k] = make([]int, c)
}
return temp
}
func (m matrix) sum (previous_sum int) int {
var sum int
sum = previous_sum
for i := 0; i < m.row; i++ {
for j := 0; j < m.column; j++ {
sum = sum + m.mat[i][j]
}
}
return sum
}
func main() {
var m,r,c,previous_sum int
fmt.Print("Enter no of matrix:")
fmt.Scan(&m)
fmt.Print("Enter no of rows:")
fmt.Scan(&r)
fmt.Print("Enter no of cloumns:")
fmt.Scan(&c)
previous_sum = 0
for k := 0; k < m; k++ {
mm := matrix{};
mm = mm.create_mat(r,c)
for i := 0; i < r; i++ {
for j := 0; j < c; j++ {
fmt.Print("Enter each element:")
fmt.Scan(&mm.mat[i][j])
}
}
previous_sum = mm.sum(previous_sum)
}
fmt.Println("sum of all matrix:", previous_sum)
}
|
package main
func f(a, b, int) {
}
|
/**
动态扩容的数组
*/
package array
type DynamicArray struct {
elements []int
length int
capacity int
}
func NewDynamicArray(cnt int) *DynamicArray {
if cnt <= 0 {
panic("array capacity must be gt 0.")
}
return &DynamicArray{
elements: make([]int, cnt),
length: 0,
capacity: cnt,
}
}
func (da *DynamicArray) Add(m, x int) {
// m:加入的索引位置,x:要加入的值
if m > da.length {
panic("couldn't add, hollow between elements.")
}
if da.length+1 > da.capacity {
da.capacity = da.capacity << 1
tmp := make([]int, da.capacity) // 申请一个 2 倍大的数组
for i := 0; i < da.length; i++ {
tmp[i] = da.elements[i]
}
da.elements = tmp
}
if m < da.length {
for i := da.length - 1; i >= m; i-- {
da.elements[i+1] = da.elements[i]
}
}
da.elements[m] = x
da.length += 1
}
|
// Copyright (C) 2015-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under
// the terms of the under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package instanceiterator_test
import (
"log"
"github.com/pivotal-cf/on-demand-service-broker/instanceiterator"
"github.com/pivotal-cf/on-demand-service-broker/instanceiterator/fakes"
"github.com/pivotal-cf/on-demand-service-broker/loggerfactory"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
)
var _ = Describe("CF querier", func() {
var logger *log.Logger
BeforeEach(func() {
loggerFactory := loggerfactory.New(gbytes.NewBuffer(), "process-all-service-instances", loggerfactory.Flags)
logger = loggerFactory.New()
})
DescribeTable("CanUpgradeUsingCF",
func(expect, maintenanceInfoPresent bool, cfClient instanceiterator.CFClient) {
Expect(instanceiterator.CanUpgradeUsingCF(cfClient, maintenanceInfoPresent, logger)).To(Equal(expect))
},
Entry("MaintenanceInfo is configured and supported by CF", true, true, func() instanceiterator.CFClient {
fake := fakes.FakeCFClient{}
fake.CheckMinimumOSBAPIVersionReturns(true)
return &fake
}()),
Entry("CF not configured", false, true, nil),
Entry("MaintenanceInfo not configured for the adapter", false, false, &fakes.FakeCFClient{}),
Entry("CF does not support MaintenanceInfo", false, true, &fakes.FakeCFClient{}),
)
})
|
package controller
import (
"github.com/gin-gonic/gin"
"gopetstore_v2/src/domain"
"gopetstore_v2/src/global"
"gopetstore_v2/src/service"
"gopetstore_v2/src/util"
"log"
"net/http"
)
// file name
const (
signInFormFile = "signInForm.html"
registerFormFile = "registerForm.html"
editAccountFormFile = "editAccountForm.html"
mainFile = "main.html"
)
// view select value
var (
languages = []string{
"english",
"japanese",
}
categories = []string{
"FISH",
"DOGS",
"REPTILES",
"CATS",
"BIRDS",
}
)
// view
// view login form
func ViewLogin(c *gin.Context) {
c.HTML(http.StatusOK, signInFormFile, gin.H{})
}
// view register form
func ViewRegister(c *gin.Context) {
c.HTML(http.StatusOK, registerFormFile, gin.H{
"Languages": languages,
"Categories": categories,
})
}
// view edit account form
func ViewEditAccount(c *gin.Context) {
a := util.GetAccountFromSession(c.Request)
c.HTML(http.StatusOK, editAccountFormFile, gin.H{
"Account": a,
"Languages": languages,
"Categories": categories,
})
}
// action
// login
func Login(c *gin.Context) {
userName := c.PostForm("username")
password := c.PostForm("password")
a, err := service.GetAccountByUserNameAndPassword(userName, password)
if err != nil {
util.ViewError(c, err)
return
}
if a != nil {
s, err := util.GetSession(c.Request)
if err != nil {
util.ViewError(c, err)
return
}
if s != nil {
err = s.Save(global.AccountKey, a, c.Writer, c.Request)
if err != nil {
util.ViewError(c, err)
return
}
}
c.HTML(http.StatusOK, mainFile, gin.H{
"Account": a,
})
} else {
c.HTML(http.StatusOK, signInFormFile, gin.H{
"Message": "登录失败,账号或密码错误",
})
}
}
// sign out
func SignOut(c *gin.Context) {
s, err := util.GetSession(c.Request)
if err != nil {
util.ViewError(c, err)
return
}
if s != nil {
err = s.Del(global.AccountKey, c.Writer, c.Request)
err = s.Del(global.CartKey, c.Writer, c.Request)
err = s.Del(global.OrderKey, c.Writer, c.Request)
if err != nil {
util.ViewError(c, err)
return
}
}
c.HTML(http.StatusOK, mainFile, gin.H{})
}
// register
func NewAccount(c *gin.Context) {
accountInfo := getAccountFromInfoForm(c)
repeatedPassword := c.PostForm("repeatedPassword")
if accountInfo.Password != repeatedPassword {
c.HTML(http.StatusOK, registerFormFile, gin.H{
"Message": "密码和重复密码不一致",
"Languages": languages,
"Categories": categories,
})
return
}
a, err := service.GetAccountByUserName(accountInfo.UserName)
if err != nil {
util.ViewError(c, err)
return
}
if a == nil {
// 进行注册
err := service.InsertAccount(accountInfo)
if err != nil {
util.ViewError(c, err)
} else {
c.HTML(http.StatusOK, signInFormFile, gin.H{
"Message": "注册成功",
})
}
} else {
c.HTML(http.StatusOK, registerFormFile, gin.H{
"Message": "该用户名已存在",
"Languages": languages,
"Categories": categories,
})
}
}
// update account
func ConfirmEdit(c *gin.Context) {
a := getAccountFromInfoForm(c)
err := service.UpdateAccount(a)
if err != nil {
util.ViewError(c, err)
return
}
// 修改成功后需要重置 session
s, err := util.GetSession(c.Request)
if err != nil {
log.Printf("ConfirmEdit GetSession error: %v", err.Error())
}
if s != nil {
err = s.Save(global.AccountKey, a, c.Writer, c.Request)
if err != nil {
log.Printf("ConfirmEdit Save error: %v", err.Error())
}
}
c.HTML(http.StatusOK, editAccountFormFile, gin.H{
"Message": "修改成功",
"Account": a,
})
}
// get account info from form
func getAccountFromInfoForm(c *gin.Context) *domain.Account {
userName := c.PostForm("username")
password := c.PostForm("password")
firstName := c.PostForm("firstName")
lastName := c.PostForm("lastName")
email := c.PostForm("email")
phone := c.PostForm("phone")
address1 := c.PostForm("address1")
address2 := c.PostForm("address2")
city := c.PostForm("city")
state := c.PostForm("state")
zip := c.PostForm("zip")
country := c.PostForm("country")
languagePreference := c.PostForm("languagePreference")
favouriteCategoryId := c.PostForm("favouriteCategoryId")
listOption := c.PostForm("listOption")
bannerOption := c.PostForm("bannerOption")
finalListOption := len(listOption) > 0
finalBannerOption := len(bannerOption) > 0
a := &domain.Account{
UserName: userName,
Email: email,
FirstName: firstName,
LastName: lastName,
Status: "OK",
Address1: address1,
Address2: address2,
City: city,
State: state,
Zip: zip,
Country: country,
Phone: phone,
Password: password,
FavouriteCategoryId: favouriteCategoryId,
LanguagePreference: languagePreference,
ListOption: finalListOption,
BannerOption: finalBannerOption,
}
return a
}
|
package main
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
var prev *TreeNode
func _flatten(root *TreeNode) {
if root == nil {
return
}
_flatten(root.Right)
_flatten(root.Left)
root.Right, root.Left, prev = prev, nil, root
}
func flatten(root *TreeNode) {
prev = nil
_flatten(root)
}
func main() {}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rowexec
import (
"bytes"
"sort"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/inverted"
"github.com/cockroachdb/errors"
)
// The abstractions in this file help with evaluating (batches of)
// invertedexpr.SpanExpression. The spans in a SpanExpression represent spans
// of an inverted index, which consists of an inverted column followed by the
// primary key of the table. The set expressions involve union and
// intersection over operands. The operands are sets of primary keys contained
// in the corresponding span. Callers should use batchedInvertedExprEvaluator.
// This evaluator does not do the actual scan -- it is fed the set elements as
// the inverted index is scanned, and routes a set element to all the sets to
// which it belongs (since spans can be overlapping). Once the scan is
// complete, the expressions are evaluated.
// KeyIndex is used as a set element. It is already de-duped.
type KeyIndex = int
// setContainer is a set of key indexes in increasing order.
type setContainer []KeyIndex
func (s setContainer) Len() int {
return len(s)
}
func (s setContainer) Less(i, j int) bool {
return s[i] < s[j]
}
func (s setContainer) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func unionSetContainers(a, b setContainer) setContainer {
if len(a) == 0 {
return b
}
if len(b) == 0 {
return a
}
var out setContainer
var i, j int
for i < len(a) && j < len(b) {
if a[i] < b[j] {
out = append(out, a[i])
i++
} else if a[i] > b[j] {
out = append(out, b[j])
j++
} else {
out = append(out, a[i])
i++
j++
}
}
for ; i < len(a); i++ {
out = append(out, a[i])
}
for ; j < len(b); j++ {
out = append(out, b[j])
}
return out
}
func intersectSetContainers(a, b setContainer) setContainer {
var out setContainer
var i, j int
// TODO(sumeer): when one set is much larger than the other
// it is more efficient to iterate over the smaller set
// and seek into the larger set.
for i < len(a) && j < len(b) {
if a[i] < b[j] {
i++
} else if a[i] > b[j] {
j++
} else {
out = append(out, a[i])
i++
j++
}
}
return out
}
// setExpression follows the structure of SpanExpression.
type setExpression struct {
op inverted.SetOperator
// The index in invertedExprEvaluator.sets
unionSetIndex int
left *setExpression
right *setExpression
}
type invertedSpan = inverted.SpanExpressionProto_Span
type invertedSpans = inverted.SpanExpressionProtoSpans
type spanExpression = inverted.SpanExpressionProto_Node
// The spans in a SpanExpression.FactoredUnionSpans and the corresponding index
// in invertedExprEvaluator.sets. Only populated when FactoredUnionsSpans is
// non-empty.
type spansAndSetIndex struct {
spans []invertedSpan
setIndex int
}
// invertedExprEvaluator evaluates a single expression. It should not be directly
// used -- see batchedInvertedExprEvaluator.
type invertedExprEvaluator struct {
setExpr *setExpression
// These are initially populated by calls to addIndexRow() as
// the inverted index is scanned.
sets []setContainer
spansIndex []spansAndSetIndex
}
func newInvertedExprEvaluator(expr *spanExpression) *invertedExprEvaluator {
eval := &invertedExprEvaluator{}
eval.setExpr = eval.initSetExpr(expr)
return eval
}
func (ev *invertedExprEvaluator) initSetExpr(expr *spanExpression) *setExpression {
// Assign it an index even if FactoredUnionSpans is empty, since we will
// need it when evaluating.
i := len(ev.sets)
ev.sets = append(ev.sets, nil)
sx := &setExpression{op: expr.Operator, unionSetIndex: i}
if len(expr.FactoredUnionSpans) > 0 {
ev.spansIndex = append(ev.spansIndex,
spansAndSetIndex{spans: expr.FactoredUnionSpans, setIndex: i})
}
if expr.Left != nil {
sx.left = ev.initSetExpr(expr.Left)
}
if expr.Right != nil {
sx.right = ev.initSetExpr(expr.Right)
}
return sx
}
// getSpansAndSetIndex returns the spans and corresponding set indexes for
// this expression. The spans are not in sorted order and can be overlapping.
func (ev *invertedExprEvaluator) getSpansAndSetIndex() []spansAndSetIndex {
return ev.spansIndex
}
// Adds a row to the given set. KeyIndexes are not added in increasing order,
// nor do they represent any ordering of the primary key of the table whose
// inverted index is being read. Also, the same KeyIndex could be added
// repeatedly to a set.
func (ev *invertedExprEvaluator) addIndexRow(setIndex int, keyIndex KeyIndex) {
// If duplicates in a set become a memory problem in this build phase, we
// could do periodic de-duplication as we go. For now, we simply append to
// the slice and de-dup at the start of evaluate().
ev.sets[setIndex] = append(ev.sets[setIndex], keyIndex)
}
// Evaluates the expression. The return value is in increasing order
// of KeyIndex.
func (ev *invertedExprEvaluator) evaluate() []KeyIndex {
// Sort and de-dup the sets so that we can efficiently do set operations.
for i, c := range ev.sets {
if len(c) == 0 {
continue
}
sort.Sort(c)
// De-duplicate
set := c[:0]
for j := range c {
if len(set) > 0 && c[j] == set[len(set)-1] {
continue
}
set = append(set, c[j])
}
ev.sets[i] = set
}
return ev.evaluateSetExpr(ev.setExpr)
}
func (ev *invertedExprEvaluator) evaluateSetExpr(sx *setExpression) setContainer {
var left, right setContainer
if sx.left != nil {
left = ev.evaluateSetExpr(sx.left)
}
if sx.right != nil {
right = ev.evaluateSetExpr(sx.right)
}
var childrenSet setContainer
switch sx.op {
case inverted.SetUnion:
childrenSet = unionSetContainers(left, right)
case inverted.SetIntersection:
childrenSet = intersectSetContainers(left, right)
}
return unionSetContainers(ev.sets[sx.unionSetIndex], childrenSet)
}
// Supporting struct for invertedSpanRoutingInfo.
type exprAndSetIndex struct {
// An index into batchedInvertedExprEvaluator.exprEvals.
exprIndex int
// An index into batchedInvertedExprEvaluator.exprEvals[exprIndex].sets.
setIndex int
}
type exprAndSetIndexSorter []exprAndSetIndex
// Implement sort.Interface. Sorts in increasing order of exprIndex.
func (esis exprAndSetIndexSorter) Len() int { return len(esis) }
func (esis exprAndSetIndexSorter) Swap(i, j int) { esis[i], esis[j] = esis[j], esis[i] }
func (esis exprAndSetIndexSorter) Less(i, j int) bool {
return esis[i].exprIndex < esis[j].exprIndex
}
// invertedSpanRoutingInfo contains the list of exprAndSetIndex pairs that
// need rows from the inverted index span. A []invertedSpanRoutingInfo with
// spans that are sorted and non-overlapping is used to route an added row to
// all the expressions and sets that need that row.
type invertedSpanRoutingInfo struct {
span invertedSpan
// Sorted in increasing order of exprIndex.
exprAndSetIndexList []exprAndSetIndex
// A de-duped and sorted list of exprIndex values from exprAndSetIndexList.
// Used for pre-filtering, since the pre-filter is applied on a per
// exprIndex basis.
exprIndexList []int
}
// invertedSpanRoutingInfosByEndKey is a slice of invertedSpanRoutingInfo that
// implements the sort.Interface interface by sorting infos by their span's end
// key. The (unchecked) assumption is that spans in a slice all have the same
// start key.
type invertedSpanRoutingInfosByEndKey []invertedSpanRoutingInfo
// Implement sort.Interface.
func (s invertedSpanRoutingInfosByEndKey) Len() int { return len(s) }
func (s invertedSpanRoutingInfosByEndKey) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s invertedSpanRoutingInfosByEndKey) Less(i, j int) bool {
return bytes.Compare(s[i].span.End, s[j].span.End) < 0
}
// preFilterer is the single method from DatumsToInvertedExpr that is relevant here.
type preFilterer interface {
PreFilter(enc inverted.EncVal, preFilters []interface{}, result []bool) (bool, error)
}
// batchedInvertedExprEvaluator is for evaluating one or more expressions. The
// batched evaluator can be reused by calling reset(). In the build phase,
// append expressions directly to exprs. A nil expression is permitted, and is
// just a placeholder that will result in a nil []KeyIndex in evaluate().
// init() must be called before calls to {prepare}addIndexRow() -- it builds the
// fragmentedSpans used for routing the added rows.
type batchedInvertedExprEvaluator struct {
filterer preFilterer
exprs []*inverted.SpanExpressionProto
// The pre-filtering state for each expression. When pre-filtering, this
// is the same length as exprs.
preFilterState []interface{}
// The parameters and result of pre-filtering for an inverted row are
// kept in this temporary state.
tempPreFilters []interface{}
tempPreFilterResult []bool
// The evaluators for all the exprs.
exprEvals []*invertedExprEvaluator
// The keys that constrain the non-inverted prefix columns, if the index is
// a multi-column inverted index. For multi-column inverted indexes, these
// keys are in one-to-one correspondence with exprEvals.
nonInvertedPrefixes []roachpb.Key
// Spans here are in sorted order and non-overlapping.
fragmentedSpans []invertedSpanRoutingInfo
// The routing index computed by prepareAddIndexRow.
routingIndex int
// Temporary state used during initialization.
routingSpans []invertedSpanRoutingInfo
coveringSpans []invertedSpan
pendingSpansToSort invertedSpanRoutingInfosByEndKey
}
// Helper used in building fragmentedSpans using pendingSpans. pendingSpans
// contains spans with the same start key. This fragments and removes all
// spans up to end key fragmentUntil (or all spans if fragmentUntil == nil).
// It then returns the remaining pendingSpans.
//
// Example 1:
// pendingSpans contains
// c---g
// c-----i
// c--e
//
// And fragmentUntil = i. Since end keys are exclusive we can fragment and
// remove all spans in pendingSpans. These will be:
// c-e-g
// c-e-g-i
// c-e
//
// For the c-e span, all the exprAndSetIndexList slices for these spans are
// appended since any row in that span needs to be routed to all these
// expressions and sets. For the e-g span only the exprAndSetIndexList slices
// for the top two spans are unioned.
//
// Example 2:
//
// Same pendingSpans, and fragmentUntil = f. The fragments that are generated
// for fragmentedSpans and the remaining spans in pendingSpans are:
//
// fragments remaining
// c-e-f f-g
// c-e-f f-i
// c-e
func (b *batchedInvertedExprEvaluator) fragmentPendingSpans(
pendingSpans []invertedSpanRoutingInfo, fragmentUntil inverted.EncVal,
) []invertedSpanRoutingInfo {
// The start keys are the same, so this only sorts in increasing order of
// end keys. Assign slice to a field on the receiver before sorting to avoid
// a heap allocation when the slice header passes through an interface.
b.pendingSpansToSort = invertedSpanRoutingInfosByEndKey(pendingSpans)
sort.Sort(&b.pendingSpansToSort)
for len(pendingSpans) > 0 {
if fragmentUntil != nil && bytes.Compare(fragmentUntil, pendingSpans[0].span.Start) <= 0 {
break
}
// The prefix of pendingSpans that will be completely consumed when
// the next fragment is constructed.
var removeSize int
// The end of the next fragment.
var end inverted.EncVal
// The start of the fragment after the next fragment.
var nextStart inverted.EncVal
if fragmentUntil != nil && bytes.Compare(fragmentUntil, pendingSpans[0].span.End) < 0 {
// Can't completely remove any spans from pendingSpans, but a prefix
// of these spans will be removed
removeSize = 0
end = fragmentUntil
nextStart = end
} else {
// We can remove all spans whose end key is the same as span[0].
// The end of span[0] is also the end key of this fragment.
removeSize = b.pendingLenWithSameEnd(pendingSpans)
end = pendingSpans[0].span.End
nextStart = end
}
// The next span to be added to fragmentedSpans.
nextSpan := invertedSpanRoutingInfo{
span: invertedSpan{
Start: pendingSpans[0].span.Start,
End: end,
},
}
for i := 0; i < len(pendingSpans); i++ {
if i >= removeSize {
// This span is not completely removed so adjust its start.
pendingSpans[i].span.Start = nextStart
}
// All spans in pendingSpans contribute to exprAndSetIndexList.
nextSpan.exprAndSetIndexList =
append(nextSpan.exprAndSetIndexList, pendingSpans[i].exprAndSetIndexList...)
}
// Sort the exprAndSetIndexList, since we need to use it to initialize the
// exprIndexList before we push nextSpan onto b.fragmentedSpans.
sort.Sort(exprAndSetIndexSorter(nextSpan.exprAndSetIndexList))
nextSpan.exprIndexList = make([]int, 0, len(nextSpan.exprAndSetIndexList))
for i := range nextSpan.exprAndSetIndexList {
length := len(nextSpan.exprIndexList)
exprIndex := nextSpan.exprAndSetIndexList[i].exprIndex
if length == 0 || nextSpan.exprIndexList[length-1] != exprIndex {
nextSpan.exprIndexList = append(nextSpan.exprIndexList, exprIndex)
}
}
b.fragmentedSpans = append(b.fragmentedSpans, nextSpan)
pendingSpans = pendingSpans[removeSize:]
if removeSize == 0 {
// fragmentUntil was earlier than the smallest End key in the pending
// spans, so cannot fragment any more.
break
}
}
return pendingSpans
}
func (b *batchedInvertedExprEvaluator) pendingLenWithSameEnd(
pendingSpans []invertedSpanRoutingInfo,
) int {
length := 1
for i := 1; i < len(pendingSpans); i++ {
if !bytes.Equal(pendingSpans[0].span.End, pendingSpans[i].span.End) {
break
}
length++
}
return length
}
// init fragments the spans for later routing of rows and returns spans
// representing a union of all the spans (for executing the scan). The
// returned slice is only valid until the next call to reset.
func (b *batchedInvertedExprEvaluator) init() (invertedSpans, error) {
if len(b.nonInvertedPrefixes) > 0 && len(b.nonInvertedPrefixes) != len(b.exprs) {
return nil, errors.AssertionFailedf("length of non-empty nonInvertedPrefixes must equal length of exprs")
}
if cap(b.exprEvals) < len(b.exprs) {
b.exprEvals = make([]*invertedExprEvaluator, len(b.exprs))
} else {
b.exprEvals = b.exprEvals[:len(b.exprs)]
}
// Initial spans fetched from all expressions.
for i, expr := range b.exprs {
if expr == nil {
b.exprEvals[i] = nil
continue
}
var prefixKey roachpb.Key
if len(b.nonInvertedPrefixes) > 0 {
prefixKey = b.nonInvertedPrefixes[i]
}
b.exprEvals[i] = newInvertedExprEvaluator(&expr.Node)
exprSpans := b.exprEvals[i].getSpansAndSetIndex()
for _, spans := range exprSpans {
for _, span := range spans.spans {
if len(prefixKey) > 0 {
// TODO(mgartner/sumeer): It may be possible to reduce
// allocations and memory usage by adding a level of
// indirection for prefix keys (like a map of prefixes to
// routingSpans), rather than prepending prefix keys to each
// span.
span = prefixInvertedSpan(prefixKey, span)
}
b.routingSpans = append(b.routingSpans,
invertedSpanRoutingInfo{
span: span,
exprAndSetIndexList: []exprAndSetIndex{{exprIndex: i, setIndex: spans.setIndex}},
},
)
}
}
}
if len(b.routingSpans) == 0 {
return nil, nil
}
// Sort the routingSpans in increasing order of start key, and for equal
// start keys in increasing order of end key.
sort.Slice(b.routingSpans, func(i, j int) bool {
cmp := bytes.Compare(b.routingSpans[i].span.Start, b.routingSpans[j].span.Start)
if cmp == 0 {
cmp = bytes.Compare(b.routingSpans[i].span.End, b.routingSpans[j].span.End)
}
return cmp < 0
})
// The union of the spans, which is returned from this function.
currentCoveringSpan := b.routingSpans[0].span
// Create a slice of pendingSpans to be fragmented by windowing over the
// full collection of routingSpans. All spans in a given window have the
// same start key. They are not sorted by end key.
pendingSpans := b.routingSpans[:1]
// This loop does both the union of the routingSpans and fragments the
// routingSpans. The pendingSpans slice contains a subsequence of the
// routingSpans slice, that when passed to fragmentPendingSpans will be
// mutated by it.
for i := 1; i < len(b.routingSpans); i++ {
span := b.routingSpans[i]
if bytes.Compare(pendingSpans[0].span.Start, span.span.Start) < 0 {
pendingSpans = b.fragmentPendingSpans(pendingSpans, span.span.Start)
if bytes.Compare(currentCoveringSpan.End, span.span.Start) < 0 {
b.coveringSpans = append(b.coveringSpans, currentCoveringSpan)
currentCoveringSpan = span.span
} else if bytes.Compare(currentCoveringSpan.End, span.span.End) < 0 {
currentCoveringSpan.End = span.span.End
}
} else if bytes.Compare(currentCoveringSpan.End, span.span.End) < 0 {
currentCoveringSpan.End = span.span.End
}
// Add this span to the pending list by expanding the window over
// b.routingSpans.
pendingSpans = pendingSpans[:len(pendingSpans)+1]
}
b.fragmentPendingSpans(pendingSpans, nil)
b.coveringSpans = append(b.coveringSpans, currentCoveringSpan)
return b.coveringSpans, nil
}
// prepareAddIndexRow must be called prior to addIndexRow to do any
// pre-filtering. The return value indicates whether addIndexRow should be
// called. encFull should include the entire index key, including non-inverted
// prefix columns. It should be nil if the index is not a multi-column inverted
// index.
// TODO(sumeer): if this will be called in non-decreasing order of enc,
// use that to optimize the binary search.
func (b *batchedInvertedExprEvaluator) prepareAddIndexRow(
enc inverted.EncVal, encFull inverted.EncVal,
) (bool, error) {
routingEnc := enc
if encFull != nil {
routingEnc = encFull
}
// Find the first span that comes after the encoded routing value.
i := sort.Search(len(b.fragmentedSpans), func(i int) bool {
return bytes.Compare(b.fragmentedSpans[i].span.Start, routingEnc) > 0
})
// Decrement by 1 so that now i tracks the index of the span that might
// contain the encoded routing value.
i--
if i < 0 {
// Negative index indicates that some assumptions are violated, return
// an assertion error in this case.
return false, errors.AssertionFailedf("unexpectedly negative routing index %d", i)
}
if bytes.Compare(b.fragmentedSpans[i].span.End, routingEnc) <= 0 {
return false, errors.AssertionFailedf(
"unexpectedly the end of the routing span %d is not greater "+
"than encoded routing value", i,
)
}
b.routingIndex = i
return b.prefilter(enc)
}
// prefilter applies b.filterer, if it exists, returning true if addIndexRow
// should be called for the row corresponding to the encoded value.
// prepareAddIndexRow or prepareAddMultiColumnIndexRow must be called first.
func (b *batchedInvertedExprEvaluator) prefilter(enc inverted.EncVal) (bool, error) {
if b.filterer != nil {
exprIndexList := b.fragmentedSpans[b.routingIndex].exprIndexList
if len(exprIndexList) > cap(b.tempPreFilters) {
b.tempPreFilters = make([]interface{}, len(exprIndexList))
b.tempPreFilterResult = make([]bool, len(exprIndexList))
} else {
b.tempPreFilters = b.tempPreFilters[:len(exprIndexList)]
b.tempPreFilterResult = b.tempPreFilterResult[:len(exprIndexList)]
}
for j := range exprIndexList {
b.tempPreFilters[j] = b.preFilterState[exprIndexList[j]]
}
return b.filterer.PreFilter(enc, b.tempPreFilters, b.tempPreFilterResult)
}
return true, nil
}
// addIndexRow must be called iff prepareAddIndexRow returned true.
func (b *batchedInvertedExprEvaluator) addIndexRow(keyIndex KeyIndex) error {
i := b.routingIndex
if b.filterer != nil {
exprIndexes := b.fragmentedSpans[i].exprIndexList
exprSetIndexes := b.fragmentedSpans[i].exprAndSetIndexList
if len(exprIndexes) != len(b.tempPreFilterResult) {
return errors.Errorf("non-matching lengths of tempPreFilterResult and exprIndexes")
}
// Coordinated iteration over exprIndexes and exprSetIndexes.
j := 0
for k := range exprSetIndexes {
elem := exprSetIndexes[k]
if elem.exprIndex > exprIndexes[j] {
j++
if exprIndexes[j] != elem.exprIndex {
return errors.Errorf("non-matching expr indexes")
}
}
if b.tempPreFilterResult[j] {
b.exprEvals[elem.exprIndex].addIndexRow(elem.setIndex, keyIndex)
}
}
} else {
for _, elem := range b.fragmentedSpans[i].exprAndSetIndexList {
b.exprEvals[elem.exprIndex].addIndexRow(elem.setIndex, keyIndex)
}
}
return nil
}
func (b *batchedInvertedExprEvaluator) evaluate() [][]KeyIndex {
result := make([][]KeyIndex, len(b.exprs))
for i := range b.exprEvals {
if b.exprEvals[i] == nil {
continue
}
result[i] = b.exprEvals[i].evaluate()
}
return result
}
func (b *batchedInvertedExprEvaluator) reset() {
b.exprs = b.exprs[:0]
b.preFilterState = b.preFilterState[:0]
b.exprEvals = b.exprEvals[:0]
b.fragmentedSpans = b.fragmentedSpans[:0]
b.routingSpans = b.routingSpans[:0]
b.coveringSpans = b.coveringSpans[:0]
b.nonInvertedPrefixes = b.nonInvertedPrefixes[:0]
}
// prefixInvertedSpan returns a new invertedSpan with prefix prepended to the
// input span's Start and End keys. This is similar to the internals of
// rowenc.appendEncDatumsToKey.
func prefixInvertedSpan(prefix roachpb.Key, span invertedSpan) invertedSpan {
newSpan := invertedSpan{
Start: make(roachpb.Key, 0, len(prefix)+len(span.Start)),
End: make(roachpb.Key, 0, len(prefix)+len(span.End)),
}
newSpan.Start = append(newSpan.Start, prefix...)
newSpan.Start = append(newSpan.Start, span.Start...)
newSpan.End = append(newSpan.End, prefix...)
newSpan.End = append(newSpan.End, span.End...)
return newSpan
}
|
package hub
import (
"testing"
"github.com/pkg/errors"
"h12.io/sej"
)
type stackTracer interface {
StackTrace() errors.StackTrace
}
func TestMain(m *testing.M) {
sej.Test{}.Main(m)
}
|
package main
import (
"flag"
"fmt"
"log"
"bytes"
"time"
"github.com/live-dash/live-dash/templates"
"github.com/live-dash/live-dash/sse"
"github.com/valyala/fasthttp"
)
type MyHandler struct {
routes map[string]fasthttp.RequestHandler
}
var (
addr = flag.String("addr", ":80", "TCP address to listen to")
compress = flag.Bool("compress", false, "Whether to enable transparent response compression")
)
func newHandler() (*MyHandler) {
return &MyHandler{
routes: make(map[string]fasthttp.RequestHandler),
}
}
func (h *MyHandler) AddRoute(route string, handler fasthttp.RequestHandler) {
h.routes[route] = handler
}
func (h *MyHandler) RouteHandler(ctx *fasthttp.RequestCtx) {
if handler, ok := h.routes[string(ctx.Path())]; ok {
handler(ctx)
} else {
ctx.Error("Unsupported path", fasthttp.StatusNotFound)
}
ctx.SetContentType("text/html; charset=utf-8")
}
func indexHandler(ctx *fasthttp.RequestCtx) {
presences := &[]templates.TalkPresences{
templates.TalkPresences{
Title: "Teste",
OwnerUsername: "Fulano",
ShortUrl: "https://www.google.com",
Presences: 10,
},
templates.TalkPresences{
Title: "Juca",
OwnerUsername: "Malaquias",
ShortUrl: "https://www.google.com",
Presences: 235235235325,
},
templates.TalkPresences{
Title: ":)",
OwnerUsername: "Thaís",
ShortUrl: "https://www.google.com",
Presences: 9034634,
},
}
templates.WriteIndex(ctx, presences)
}
func fooHanlder(ctx *fasthttp.RequestCtx) {
fmt.Fprintf(ctx, "%s\n", templates.Hello("Foo"))
}
func barHandler(ctx *fasthttp.RequestCtx) {
fmt.Fprintf(ctx, "%s\n", templates.Hello("Bar"))
}
func greetingsHandler(ctx *fasthttp.RequestCtx) {
names := []string{"Kate", "Go", "John", "Brad"}
// qtc creates Write* function for each template function.
// Such functions accept io.Writer as first parameter:
var buf bytes.Buffer
templates.WriteGreetings(&buf, names)
fmt.Fprintf(ctx, "%s", buf.Bytes())
}
//go:generate qtc -dir=./templates
func main() {
flag.Parse()
fmt.Printf("Listen on port: %s\n", *addr)
broker := sse.NewSSE()
handler := newHandler()
handler.AddRoute("/", indexHandler)
handler.AddRoute("/foo", fooHanlder)
handler.AddRoute("/bar", barHandler)
handler.AddRoute("/greetings", greetingsHandler)
handler.AddRoute("/sse", broker.ServeHTTP)
h := handler.RouteHandler
if *compress {
h = fasthttp.CompressHandler(h)
}
go func(){
i := 0
for {
i++
time.Sleep(600 * time.Millisecond)
fmt.Printf("Sending to broker: %d", i)
broker.Notifier <- []byte(fmt.Sprintf("Number %d\n", i))
}
}()
if err := fasthttp.ListenAndServe(*addr, h); err != nil {
log.Fatalf("Error in ListenAndServe: %s", err)
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.