text stringlengths 11 4.05M |
|---|
package Dogcat
import "fmt"
type Animal interface {
Bark()
}
type Dog struct {
}
func (d Dog) Bark() {
fmt.Println("dog")
}
type Cat struct {
}
func (c *Cat) Bark() {
fmt.Println("cat")
}
func Bark(a Animal) {
a.Bark()
}
func getDog() Dog {
return Dog{}
}
func getCat() Cat {
return Cat{}
}
func main() {
dp := &Dog{}
d := Dog{}
dp.Bark() // (1) 通过
d.Bark() // (2) 通过
Bark(dp)
// (3) 通过,上面说了类型*Dog的方法集合包含接收者为*Dog和Dog的方法
Bark(d) // (4) 通过
cp := &Cat{}
c := Cat{}
cp.Bark() // (5) 通过
c.Bark() // (6) 通过
Bark(cp) // (7) 通过
//Bark(c)
// (8) 编译错误,值类型Cat的方法集合只包含接收者为Cat的方法
// 所以T并没有实现Animal接口 应该说c并没有实现animal方法,实现animal必须提供Bark方法,但是只有指针接收者方法(c *Cat) Bark()
getDog().Bark() // (9) 通过
//getCat().Bark()
// (10) 编译错误,
// 上面说了,getCat()是不可地址的
// 所以不能调用接收者为*Cat的方法
} |
package main
import (
"fmt"
"github.com/brianseitel/charlatan"
)
// Product ...
type Product struct {
UUID string `charlatan:"uuid"`
Name string `charlatan:"name"`
Brand string `charlatan:"name"`
Price float64 `charlatan:"price"`
Categories struct {
Name string `charlatan:"word"`
} `charlatan:"struct"`
Image string `charlatan:"url"`
OnSale bool `charlatan:"boolean"`
UpdatedAt string `charlatan:"datetime"`
}
func main() {
c := charlatan.New()
foo, err := c.Generate(&Product{})
if err != nil {
panic(err)
}
fmt.Printf("%#v", foo)
}
|
package gopipe_test
import (
"io/ioutil"
"testing"
"github.com/bingoohuang/golog"
"github.com/bingoohuang/gopipe/pkg/gopipe"
"github.com/stretchr/testify/assert"
)
func TestParsePipelineConfig(t *testing.T) {
config, err := ioutil.ReadFile("testdata/a.yaml")
assert.Nil(t, err)
c := &gopipe.PipelineConfig{}
assert.Nil(t, c.Parse(config))
assert.Equal(t, &gopipe.PipelineConfig{
Stages: []string{"build", "test", "deploy"},
Jobs: []gopipe.Job{
{
Name: "job 1",
Stage: "build",
Scripts: []string{
"mkdir .public",
"cp -r * .public",
"mv .public public",
},
},
{
Name: "job 2",
Stage: "test",
Scripts: []string{
"make test",
},
},
{
Name: "job 4",
Stage: "deploy",
Scripts: []string{
"make deploy",
},
},
},
}, c)
golog.SetupLogrus(nil, "")
c.Run()
}
|
// Copyright 2021 Comcast Cable Communications Management, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package docs
// swagger:route PUT /v1/orgs/{orgId}/applications/{appId}/routes/{routeId} routes putRoute
// Adds a new route to the routing table or updates an existing route. Route ID can be given in the body. If it is omitted a hash will be calculated and used instead.
// responses:
// 200: RouteResponse
// 500: RouteErrorResponse
// Item response containing a complete route configuration including sender, receiver and optional filter chain.
// swagger:response routeResponse
type routeResponseWrapper struct {
// in: body
Body RouteResponse
}
// Item response containing a message.
// swagger:response postRouteEventResponse
type successResponseWrapper struct {
// in: body
Body SuccessResponse
}
// Item response containing a message.
// swagger:response postRouteEventResponse
type errorResponseWrapper struct {
// in: body
Body ErrorResponse
}
// Item response containing a route error.
// swagger:response routeErrorResponse
type routeErrorResponseWrapper struct {
// in: body
Body RouteErrorResponse
}
// swagger:parameters putRoute postRoute
type routeParamWrapper struct {
// Route configuration including sender, receiver and optional filter chain.
// in: body
// required: true
Body RouteConfig
}
// swagger:parameters putRoute getRoute deleteRoute postRouteEvent
type routeIdParamWrapper struct {
// Route ID
// in: path
// required: true
RouteId string `json:"routeId"`
}
type RouteResponse struct {
Status responseStatus `json:"status"`
Item RouteConfig `json:"item"`
}
type SuccessResponse struct {
Status responseStatus `json:"status"`
Item string `json:"item"`
}
type ErrorResponse struct {
Status responseStatus `json:"status"`
Item string `json:"item"`
}
type RouteErrorResponse struct {
Status responseStatus `json:"status"`
Item string `json:"item"`
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package externaldata
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"net"
"net/http"
"net/url"
"sync"
"chromiumos/tast/errors"
"chromiumos/tast/testing"
)
type httpHandler struct {
logger *testing.Logger
policies map[string][]byte
mu sync.Mutex // Protects policies.
}
func (h *httpHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if h.logger != nil {
h.logger.Printf("ExternalDataServer received HTTP request for %q", req.URL.Path)
}
h.mu.Lock()
policy, ok := h.policies[req.URL.Path]
h.mu.Unlock()
if !ok {
if h.logger != nil {
h.logger.Print("Failed to find: ", req.URL.Path)
}
http.Error(w, http.StatusText(404), 404)
return
}
w.Write(policy)
}
// Server is a http server that helps serve data for policies that load their data from an external source.
type Server struct {
httpServer *http.Server
handler httpHandler
}
// NewServer creates a new external data server.
func NewServer(ctx context.Context) (*Server, error) {
logger, ok := testing.ContextLogger(ctx)
if !ok {
// To allow golang testing
logger = nil
}
srv := Server{
httpServer: &http.Server{},
handler: httpHandler{
policies: make(map[string][]byte),
logger: logger,
},
}
srv.httpServer.Handler = &srv.handler
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return nil, errors.Wrap(err, "failed to create listener for http server")
}
port := listener.Addr().(*net.TCPAddr).Port
srv.httpServer.Addr = fmt.Sprintf("127.0.0.1:%d", port)
go func() {
if err := srv.httpServer.Serve(listener); err != http.ErrServerClosed {
testing.ContextLog(ctx, "ExternalDataServer HTTP server failed: ", err)
}
}()
return &srv, nil
}
// ServePolicyData starts serving content and returns the URL and hash to be set in the policy.
func (s *Server) ServePolicyData(data []byte) (address, hash string) {
sum := sha256.Sum256(data)
hash = hex.EncodeToString(sum[:])
// Using len(s.policies) as a prefix ensures a unique URL.
// Part of the hash is used to lengthen the path to be more realistic.
path := fmt.Sprintf("/%d-%s", len(s.handler.policies), hash[:5])
s.handler.mu.Lock()
s.handler.policies[path] = append([]byte(nil), data...)
s.handler.mu.Unlock()
url := url.URL{
Host: s.httpServer.Addr,
Path: path,
Scheme: "http",
}
return url.String(), hash
}
// Stop shuts down the server.
func (s *Server) Stop(ctx context.Context) error {
if err := s.httpServer.Shutdown(ctx); err != nil {
return errors.Wrap(err, "failed to shutdown HTTP server")
}
return nil
}
|
package html5
const (
preambleTmpl = `{{ if .Wrapper }}<div id="preamble">
<div class="sectionbody">
{{ end }}{{ .Content }}{{ if .Wrapper }}</div>
{{ if .ToC }}{{ .ToC }}{{ end }}</div>
{{ end }}`
)
|
package main
import (
"fmt"
"log"
"net/http"
"path/filepath"
"time"
"github.com/docopt/docopt-go"
"github.com/gin-gonic/gin"
ginprometheus "github.com/mcuadros/go-gin-prometheus"
"github.com/rmrf/robo/cli"
"github.com/rmrf/robo/config"
)
var version = "0.5.7"
const usage = `
Usage:
robo [--config file]
robo <task> [<arg>...] [--config file]
robo help [<task>] [--config file]
robo variables [--config file]
robo startweb [--config file]
robo -h | --help
robo --version
Options:
-c, --config file config file to load [default: robo.yml]
-h, --help output help information
-v, --version output version
Examples:
output tasks
$ robo
output task help
$ robo help mytask
`
func main() {
args, err := docopt.Parse(usage, nil, true, version, true)
if err != nil {
cli.Fatalf("error parsing arguments: %s", err)
}
abs, err := filepath.Abs(args["--config"].(string))
if err != nil {
cli.Fatalf("cannot resolve --config: %s", err)
}
c, err := config.New(abs)
if err != nil {
cli.Fatalf("error loading configuration: %s", err)
}
switch {
case args["help"].(bool):
if name, ok := args["<task>"].(string); ok {
cli.Help(c, name)
} else {
cli.List(c)
}
case args["variables"].(bool):
cli.ListVariables(c)
case args["startweb"].(bool):
roboV := c.Variables["robo"].(map[interface{}]interface{})
startWeb(c, roboV["web-addr"].(string), roboV["token"].(string))
default:
if name, ok := args["<task>"].(string); ok {
cli.Run(c, name, args["<arg>"].([]string))
} else {
cli.List(c)
}
}
}
func startWeb(conf *config.Config, addr, token string) {
type PostBody struct {
Token string `json:"token"" binding:"required"`
Args []string `json:"args"" binding:"required"`
}
r := gin.New()
p := ginprometheus.NewPrometheus("robo")
p.Use(r)
r.Use(gin.LoggerWithFormatter(func(param gin.LogFormatterParams) string {
// your custom format
return fmt.Sprintf("%s - [%s] \"%s %s %s %d %s \"%s\" %s\"\n",
param.ClientIP,
param.TimeStamp.Format(time.RFC1123),
param.Method,
param.Path,
param.Request.Proto,
param.StatusCode,
param.Latency,
param.Request.UserAgent(),
param.ErrorMessage,
)
}))
r.Use(gin.Recovery())
r.POST("/task/:taskname", func(gc *gin.Context) {
var pBody PostBody
if err := gc.ShouldBindJSON(&pBody); err != nil {
gc.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
if pBody.Token != token {
log.Printf("Wrong token: %s", token)
gc.JSON(http.StatusForbidden, gin.H{"message": "bad token"})
return
}
taskName := gc.Param("taskname")
info := fmt.Sprintf("%s: %s", taskName, pBody.Args)
log.Println(info)
err := cli.Run(conf, taskName, pBody.Args)
if err != nil {
log.Printf("client run failed: %s", err.Error())
gc.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
return
}
gc.JSON(http.StatusOK, gin.H{"message": info})
})
s := &http.Server{Addr: addr,
Handler: r,
ReadTimeout: 6 * time.Second,
WriteTimeout: 6 * time.Second}
s.ListenAndServe()
}
|
package main
import (
"flag"
"net/http"
"os"
"strconv"
"github.com/fils/goobjectweb/internal/api/graph"
"github.com/fils/goobjectweb/internal/api/sitemaps"
"github.com/fils/goobjectweb/internal/api/tika"
"github.com/fils/goobjectweb/internal/digitalobjects"
"github.com/fils/goobjectweb/internal/fileobjects"
"github.com/gorilla/mux"
minio "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
log "github.com/sirupsen/logrus"
)
var s3addressVal, s3bucketVal, s3prefixVal, domainVal, keyVal, secretVal string
var localVal, s3SSLVal bool
// MyServer is the Gorilla mux router structure
type MyServer struct {
r *mux.Router
}
func init() {
// name the log file with the date and time
//const layout = "2006-01-02-15-04-05"
//t := time.Now()
//lf := fmt.Sprintf("grow-%s.log", t.Format(layout))
//logFile, err := os.OpenFile(lf, os.O_APPEND|os.O_RDWR|os.O_CREATE, 0644)
//if err != nil {
//log.Panic(err)
//return
//}
log.SetFormatter(&log.JSONFormatter{}) // Log as JSON instead of the default ASCII formatter.
log.SetReportCaller(true) // include file name and line number
//log.SetOutput(logFile)
log.SetOutput(os.Stdout)
//log.SetOutput(io.MultiWriter(logFile, os.Stdout))
flag.BoolVar(&localVal, "local", false, "Serve file local over object store, false by default")
flag.BoolVar(&s3SSLVal, "ssl", false, "S3 access is SSL, false by default for docker network backend")
flag.StringVar(&s3addressVal, "server", "0.0.0.0:0000", "Address of the object server with port")
flag.StringVar(&s3bucketVal, "bucket", "website", "bucket which holds the web site objects")
flag.StringVar(&s3prefixVal, "prefix", "website", "bucket prefix for the objects")
flag.StringVar(&domainVal, "domain", "example.org", "domain of our served web site")
flag.StringVar(&keyVal, "key", "config", "Object server key")
flag.StringVar(&secretVal, "secret", "config", "Object server secret")
}
func main() {
// parse environment vars
s3addressVal = os.Getenv("S3ADDRESS")
s3bucketVal = os.Getenv("S3BUCKET")
s3prefixVal = os.Getenv("S3PREFIX")
domainVal = os.Getenv("DOMAIN")
keyVal = os.Getenv("S3KEY")
secretVal = os.Getenv("S3SECRET")
s3SSLVal, err := strconv.ParseBool(os.Getenv("S3SSL"))
if err != nil {
log.Println("Error reading SSL bool flag")
}
// TODO move to viper config for this app (pass tika URL)
// Parse the flags if any, will override the environment vars
flag.Parse() // parse any command line flags...
log.Printf("a: %s b %s p %s d %s k %s s %s ssl %v \n", s3addressVal, s3bucketVal, s3prefixVal, domainVal, keyVal, secretVal, s3SSLVal)
// Need to convert this to gocloud.dev bloc (https://gocloud.dev/howto/blob/)
//mc, err := minio.New(s3addressVal, keyVal, secretVal, s3SSLVal)
mc, err := minio.New(s3addressVal,
&minio.Options{Creds: credentials.NewStaticV4(keyVal, secretVal, ""),
Secure: s3SSLVal})
if err != nil {
log.Println(err)
}
// Handler sm: builds sitemaps
sm := mux.NewRouter()
sm.PathPrefix("/api/sitemap").Handler(http.StripPrefix("/api/", minioHandler(mc, s3bucketVal, s3prefixVal, domainVal, sitemaps.Build)))
sm.PathPrefix("/api/graph").Handler(http.StripPrefix("/api/", minioHandler(mc, s3bucketVal, s3prefixVal, domainVal, graph.Build)))
sm.PathPrefix("/api/fulltext").Handler(http.StripPrefix("/api/", minioHandler(mc, s3bucketVal, s3prefixVal, domainVal, tika.Build)))
sm.NotFoundHandler = http.HandlerFunc(notFound)
http.Handle("/api/", &MyServer{sm})
// Handler doc: addresses the /id/* request path
doc := mux.NewRouter()
doc.PathPrefix("/id/").Handler(http.StripPrefix("/id/", minioHandler(mc, s3bucketVal, s3prefixVal, domainVal, digitalobjects.DO)))
doc.NotFoundHandler = http.HandlerFunc(notFound)
http.Handle("/id/", &MyServer{doc})
// Handler dr: addresses the / request path
dr := mux.NewRouter()
if localVal {
dr.PathPrefix("/").Handler(http.StripPrefix("/", http.FileServer(http.Dir("./local"))))
} else {
dr.PathPrefix("/").Handler(http.StripPrefix("/", minioHandler(mc, s3bucketVal, s3prefixVal, domainVal, fileobjects.FileObjects)))
}
dr.NotFoundHandler = http.HandlerFunc(notFound)
http.Handle("/", &MyServer{dr})
// Start the server...
log.Printf("About to listen on 8080. Go to http://127.0.0.1:8080/")
err = http.ListenAndServe(":8080", nil)
if err != nil {
log.Fatal(err)
}
}
func notFound(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/404.html", 303)
}
func minioHandler(minioClient *minio.Client, bucket, prefix, domain string, f func(minioClient *minio.Client, bucket, prefix, domain string, w http.ResponseWriter, r *http.Request)) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { f(minioClient, bucket, prefix, domain, w, r) })
}
func (s *MyServer) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
rw.Header().Set("Access-Control-Allow-Origin", "*")
rw.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
rw.Header().Set("Access-Control-Allow-Headers",
"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
// Stop here if its Preflighted OPTIONS request
if req.Method == "OPTIONS" {
return
}
// Let Gorilla work
s.r.ServeHTTP(rw, req)
}
func addDefaultHeaders(fn http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
fn(w, r)
}
}
|
package validate
import (
"errors"
"fmt"
"regexp"
"github.com/jrapoport/gothic/config"
)
// Username validates a username.
func Username(c *config.Config, username string) error {
if c.Security.Validation.UsernameRegex == "" {
return nil
} else if username == "" {
return errors.New("invalid username")
}
rx, err := regexp.Compile(c.Security.Validation.UsernameRegex)
if err != nil {
return err
}
if !rx.MatchString(username) {
return fmt.Errorf("invalid username: %s", username)
}
return nil
}
|
package lc
// Time: O(n^2)
// Benchmark: 0ms 2mb | 100%
func maxLengthBetweenEqualCharacters(s string) int {
max := -1
for i := 0; i < len(s)/2; i++ {
for j := len(s) - 1; j >= len(s)/2; j-- {
if s[i] == s[j] {
if j-i-1 > max {
max = j - i - 1
}
break
}
}
}
return max
}
|
package sqlite
import (
"database/sql"
"encoding/json"
"log"
"github.com/edznux/wonderxss/config"
"github.com/edznux/wonderxss/storage/models"
sqlite3 "github.com/mattn/go-sqlite3"
)
type Sqlite struct {
file string
db *sql.DB
}
func New() (*Sqlite, error) {
cfg := config.Current
file := cfg.Storages["sqlite"].File
log.Printf("Setup SQLite, using file: %+v\n", file)
s := Sqlite{file: file}
return &s, nil
}
func (s *Sqlite) Init() error {
log.Println("Init sqlite")
var err error
s.db, err = sql.Open("sqlite3", s.file)
if err != nil {
return err
}
return nil
}
func (s *Sqlite) Setup() error {
//return last error, but keep executing all instruction
var lastErr error
log.Println("Creating users' table")
_, err := s.db.Exec(CREATE_TABLE_USERS)
if err != nil {
log.Println(err)
lastErr = err
}
log.Println("Creating payloads' table")
_, err = s.db.Exec(CREATE_TABLE_PAYLOADS)
if err != nil {
log.Println(err)
lastErr = err
}
log.Println("Creating aliases' table")
_, err = s.db.Exec(CREATE_TABLE_ALIASES)
if err != nil {
log.Println(err)
lastErr = err
}
log.Println("Creating Executions' table")
_, err = s.db.Exec(CREATE_TABLE_EXECUTIONS)
if err != nil {
log.Println(err)
lastErr = err
}
log.Println("Creating Injections' table")
_, err = s.db.Exec(CREATE_TABLE_INJECTIONS)
if err != nil {
log.Println(err)
lastErr = err
}
log.Println("Creating Collectors' table")
_, err = s.db.Exec(CREATE_TABLE_COLLECTORS)
if err != nil {
log.Println(err)
lastErr = err
}
return lastErr
}
//Create
func (s *Sqlite) CreatePayload(payload models.Payload) (models.Payload, error) {
_, err := s.db.Exec(INSERT_PAYLOAD, payload.ID, payload.Name, payload.Hashes.String(), payload.Content)
if err != nil {
log.Println(err)
return models.Payload{}, err
}
return s.GetPayload(payload.ID)
}
func (s *Sqlite) CreateUser(user models.User) (models.User, error) {
_, err := s.db.Exec(INSERT_USER, user.ID, user.Username, user.Password)
if err != nil {
log.Println(err)
return models.User{}, err
}
return s.GetUser(user.ID)
}
func (s *Sqlite) CreateOTP(user models.User, TOTPSecret string) (models.User, error) {
_, err := s.db.Exec(UPDATE_ADD_TOTP, 1, TOTPSecret, user.ID)
if err != nil {
log.Println(err)
return models.User{}, err
}
return s.GetUser(user.ID)
}
func (s *Sqlite) RemoveOTP(user models.User) (models.User, error) {
_, err := s.db.Exec(UPDATE_ADD_TOTP, 0, "", user.ID)
if err != nil {
log.Println(err)
return models.User{}, err
}
return s.GetUser(user.ID)
}
func (s *Sqlite) CreateAlias(alias models.Alias) (models.Alias, error) {
_, err := s.db.Exec(INSERT_ALIAS, alias.ID, alias.PayloadID, alias.Short)
if sqliteErr, ok := err.(sqlite3.Error); ok {
if sqliteErr.ExtendedCode == sqlite3.ErrConstraintUnique {
log.Println(err)
return models.Alias{}, models.AlreadyExist
}
}
if err != nil {
log.Println(err)
return models.Alias{}, err
}
return s.GetAlias(alias.ID)
}
func (s *Sqlite) CreateCollector(collector models.Collector) (models.Collector, error) {
_, err := s.db.Exec(INSERT_COLLECTOR, collector.ID, collector.Data)
if err != nil {
log.Println(err)
return models.Collector{}, err
}
return s.GetCollector(collector.ID)
}
func (s *Sqlite) CreateInjection(injection models.Injection) (models.Injection, error) {
_, err := s.db.Exec(INSERT_INJECTION, injection.ID, injection.Name, injection.Content)
if err != nil {
log.Println("CreateInjection failed:", err)
return models.Injection{}, err
}
return s.GetInjection(injection.ID)
}
func (s *Sqlite) CreateExecution(execution models.Execution, payloadIDOrAlias string) (models.Execution, error) {
// id, payload_id, alias_id
// TODO : store the alias_ID and not the alias directly
_, err := s.db.Exec(INSERT_EXECUTION, execution.ID, execution.PayloadID, payloadIDOrAlias)
if err != nil {
log.Println(err)
return models.Execution{}, err
}
return s.GetExecution(execution.ID)
}
// Read
func (s *Sqlite) GetPayloads() ([]models.Payload, error) {
log.Println("sqlite.GetPayloads")
res := []models.Payload{}
rows, err := s.db.Query(SELECT_ALL_PAYLOADS)
if err != nil {
log.Println(err)
return nil, err
}
var tmpRes models.Payload
var hashes string
for rows.Next() {
rows.Scan(&tmpRes.ID, &tmpRes.Name, &hashes, &tmpRes.Content, &tmpRes.CreatedAt, &tmpRes.ModifiedAt)
err := json.Unmarshal([]byte(hashes), &tmpRes.Hashes)
if err != nil {
log.Println(err)
}
res = append(res, tmpRes)
}
if err == sql.ErrNoRows {
return nil, models.NoSuchItem
}
if err != nil {
log.Println(err)
return nil, err
}
log.Println(res)
return res, nil
}
func (s *Sqlite) GetPayload(id string) (models.Payload, error) {
row := s.db.QueryRow(SELECT_PAYLOAD_BY_ID, id)
var res models.Payload
var hashes string
err := row.Scan(&res.ID, &res.Name, &hashes, &res.Content, &res.CreatedAt, &res.ModifiedAt)
if err == sql.ErrNoRows {
return models.Payload{}, models.NoSuchItem
}
if err != nil {
log.Println(err)
return models.Payload{}, err
}
err = json.Unmarshal([]byte(hashes), &res.Hashes)
if err != nil {
log.Println(err)
return models.Payload{}, err
}
return res, nil
}
func (s *Sqlite) GetPayloadByAlias(short string) (models.Payload, error) {
row := s.db.QueryRow(SELECT_PAYLOAD_BY_ALIAS, short)
var res models.Payload
var hashes string
err := row.Scan(&res.ID, &res.Name, &hashes, &res.Content, &res.CreatedAt, &res.ModifiedAt)
if err == sql.ErrNoRows {
return models.Payload{}, models.NoSuchItem
}
if err != nil {
log.Println(err)
return models.Payload{}, err
}
err = json.Unmarshal([]byte(hashes), &res.Hashes)
if err != nil {
log.Println(err)
return models.Payload{}, err
}
return res, nil
}
func (s *Sqlite) GetInjection(id string) (models.Injection, error) {
row := s.db.QueryRow(SELECT_INJECTION, id)
var res models.Injection
err := row.Scan(&res.ID, &res.Name, &res.Content, &res.CreatedAt, &res.ModifiedAt)
if err == sql.ErrNoRows {
return models.Injection{}, models.NoSuchItem
}
if err != nil {
log.Println(err)
return models.Injection{}, err
}
return res, nil
}
func (s *Sqlite) GetInjectionByName(name string) (models.Injection, error) {
row := s.db.QueryRow(SELECT_INJECTION_BY_NAME, name)
var res models.Injection
err := row.Scan(&res.ID, &res.Name, &res.Content, &res.CreatedAt, &res.ModifiedAt)
if err == sql.ErrNoRows {
return models.Injection{}, models.NoSuchItem
}
if err != nil {
log.Println(err)
return models.Injection{}, err
}
return res, nil
}
func (s *Sqlite) GetInjections() ([]models.Injection, error) {
res := []models.Injection{}
rows, err := s.db.Query(SELECT_ALL_INJECTION)
if err != nil {
log.Println("Error querying the db (Injection):", err)
return nil, err
}
var tmpRes models.Injection
for rows.Next() {
rows.Scan(&tmpRes.ID, &tmpRes.Name, &tmpRes.Content, &tmpRes.CreatedAt, &tmpRes.ModifiedAt)
res = append(res, tmpRes)
}
if err == sql.ErrNoRows {
return nil, models.NoSuchItem
}
if err != nil {
log.Println(err)
return nil, err
}
log.Println(res)
return res, nil
}
func (s *Sqlite) GetAlias(alias string) (models.Alias, error) {
row := s.db.QueryRow(SELECT_ALIAS_BY_SHORTNAME, alias)
var res models.Alias
err := row.Scan(&res.ID, &res.PayloadID, &res.Short, &res.CreatedAt, &res.ModifiedAt)
if err == sql.ErrNoRows {
return models.Alias{}, models.NoSuchItem
}
if err != nil {
log.Println(err)
return models.Alias{}, err
}
return res, nil
}
func (s *Sqlite) GetAliasByID(id string) (models.Alias, error) {
row := s.db.QueryRow(SELECT_ALIAS_BY_ID, id)
var res models.Alias
err := row.Scan(&res.ID, &res.PayloadID, &res.Short, &res.CreatedAt, &res.ModifiedAt)
if err == sql.ErrNoRows {
return models.Alias{}, models.NoSuchItem
}
if err != nil {
log.Println(err)
return models.Alias{}, err
}
return res, nil
}
func (s *Sqlite) GetAliasByPayloadID(id string) (models.Alias, error) {
row := s.db.QueryRow(SELECT_ALIAS_BY_PAYLOAD_ID, id)
var res models.Alias
err := row.Scan(&res.ID, &res.PayloadID, &res.Short, &res.CreatedAt, &res.ModifiedAt)
if err == sql.ErrNoRows {
return models.Alias{}, models.NoSuchItem
}
if err != nil {
log.Println(err)
return models.Alias{}, err
}
return res, nil
}
func (s *Sqlite) GetExecution(id string) (models.Execution, error) {
log.Println("GetExecution(", id, ")")
row := s.db.QueryRow(SELECT_EXECUTION, id)
var res models.Execution
err := row.Scan(&res.ID, &res.PayloadID, &res.AliasID, &res.TriggeredAt)
if err == sql.ErrNoRows {
return models.Execution{}, models.NoSuchItem
}
if err != nil {
log.Println(err)
return models.Execution{}, err
}
return res, nil
}
func (s *Sqlite) GetExecutions() ([]models.Execution, error) {
log.Println("GetExecutions")
res := []models.Execution{}
rows, err := s.db.Query(SELECT_ALL_EXECUTIONS)
if err != nil {
log.Println("Error querying the db (Execution):", err)
return nil, err
}
var tmpRes models.Execution
for rows.Next() {
rows.Scan(&tmpRes.ID, &tmpRes.PayloadID, &tmpRes.AliasID, &tmpRes.TriggeredAt)
res = append(res, tmpRes)
}
if err == sql.ErrNoRows {
return nil, models.NoSuchItem
}
if err != nil {
log.Println(err)
return nil, err
}
log.Println(res)
return res, nil
}
func (s *Sqlite) GetCollector(id string) (models.Collector, error) {
log.Println("GetCollector(", id, ")")
row := s.db.QueryRow(SELECT_COLLECTOR, id)
var res models.Collector
err := row.Scan(&res.ID, &res.Data, &res.CreatedAt)
if err == sql.ErrNoRows {
return models.Collector{}, models.NoSuchItem
}
if err != nil {
log.Println(err)
return models.Collector{}, err
}
return res, nil
}
func (s *Sqlite) GetCollectors() ([]models.Collector, error) {
log.Println("GetCollectors")
res := []models.Collector{}
rows, err := s.db.Query(SELECT_ALL_COLLECTOR)
if err != nil {
log.Println("Error querying the db (Collector):", err)
return nil, err
}
var tmpRes models.Collector
for rows.Next() {
rows.Scan(&tmpRes.ID, &tmpRes.Data, &tmpRes.CreatedAt)
res = append(res, tmpRes)
}
if err == sql.ErrNoRows {
return nil, models.NoSuchItem
}
if err != nil {
log.Println(err)
return nil, err
}
log.Println(res)
return res, nil
}
func (s *Sqlite) GetAliases() ([]models.Alias, error) {
log.Println("GetAliases")
res := []models.Alias{}
rows, err := s.db.Query(SELECT_ALL_ALIASES)
if err != nil {
log.Println(err)
return nil, err
}
var tmpRes models.Alias
for rows.Next() {
rows.Scan(&tmpRes.ID, &tmpRes.PayloadID, &tmpRes.Short, &tmpRes.CreatedAt, &tmpRes.ModifiedAt)
res = append(res, tmpRes)
}
if err == sql.ErrNoRows {
return nil, models.NoSuchItem
}
if err != nil {
log.Println(err)
return nil, err
}
log.Println(res)
return res, nil
}
func (s *Sqlite) GetUser(id string) (models.User, error) {
var user models.User
var TOTPSecret sql.NullString
var TFEnabled int
row := s.db.QueryRow(SELECT_USER, id)
err := row.Scan(&user.ID, &user.Username, &user.Password, &TFEnabled, &TOTPSecret, &user.CreatedAt, &user.ModifiedAt)
if err == sql.ErrNoRows {
return user, models.NoSuchItem
}
if TFEnabled == 1 {
user.TwoFactorEnabled = true
}
if TOTPSecret.Valid {
user.TOTPSecret = TOTPSecret.String
}
if err != nil {
log.Println(err)
return user, err
}
return user, nil
}
func (s *Sqlite) GetUserByName(name string) (models.User, error) {
var user models.User
var TOTPSecret sql.NullString
var TFEnabled int
row := s.db.QueryRow(SELECT_USER_BY_NAME, name)
err := row.Scan(&user.ID, &user.Username, &user.Password, &TFEnabled, &TOTPSecret, &user.CreatedAt, &user.ModifiedAt)
if err == sql.ErrNoRows {
return user, models.NoSuchItem
}
if TFEnabled == 1 {
user.TwoFactorEnabled = true
}
if TOTPSecret.Valid {
user.TOTPSecret = TOTPSecret.String
}
if err != nil {
log.Println(err)
return user, err
}
return user, nil
}
//Update
func (s *Sqlite) UpdatePayload(models.Payload) error {
return nil
}
func (s *Sqlite) UpdateUser(models.User) error {
return nil
}
//Delete
func (s *Sqlite) DeletePayload(p models.Payload) error {
_, err := s.db.Exec(DELETE_PAYLOAD, p.ID)
return err
}
func (s *Sqlite) DeleteUser(u models.User) error {
_, err := s.db.Exec(DELETE_USER, u.ID)
return err
}
func (s *Sqlite) DeleteAlias(a models.Alias) error {
_, err := s.db.Exec(DELETE_ALIAS, a.ID)
return err
}
func (s *Sqlite) DeleteExecution(e models.Execution) error {
_, err := s.db.Exec(DELETE_EXECUTION, e.ID)
return err
}
func (s *Sqlite) DeleteCollector(c models.Collector) error {
_, err := s.db.Exec(DELETE_COLLECTOR, c.ID)
return err
}
func (s *Sqlite) DeleteInjection(i models.Injection) error {
_, err := s.db.Exec(DELETE_INJECTION, i.ID)
return err
}
|
package domain
import "github.com/traPtitech/trap-collection-server/src/domain/values"
type Seat struct {
id values.SeatID
status values.SeatStatus
}
func NewSeat(id values.SeatID, status values.SeatStatus) *Seat {
return &Seat{
id: id,
status: status,
}
}
func (s *Seat) ID() values.SeatID {
return s.id
}
func (s *Seat) Status() values.SeatStatus {
return s.status
}
func (s *Seat) SetStatus(status values.SeatStatus) {
s.status = status
}
|
package main
import (
"12306.com/12306/common/middleware"
"12306.com/12306/stations"
"12306.com/12306/trains"
"12306.com/12306/users"
"github.com/gin-gonic/gin"
)
func CollectRoute(r *gin.Engine) *gin.Engine {
//users
//注册
r.POST("/user/api/v1/register/", users.Register)
//登录
r.POST("/user/api/v1/login/", users.Login)
//添加乘车人
r.POST("/user/api/v1/passenger/", middleware.AuthMiddleware(), users.AddPassenger)
//修改乘车人
r.PUT("/user/api/v1/passenger/", middleware.AuthMiddleware(), users.UpdatePassenger)
//查询乘车人
r.GET("/user/api/v1/passenger/", middleware.AuthMiddleware(), users.QueryPassenger)
//stations
//查找所有站点
r.GET("/search/api/v1/queryAllStations/", stations.AllStationsList)
//trains
//查某车次经过的站点
r.GET("/search/api/v1/queryStation/", trains.TrainStationList)
//查票
r.POST("/search/api/v1/remainder/", trains.TicketList)
//买票
r.GET("/buy/ticket/", middleware.AuthMiddleware(), trains.TicketBuy)
// r.GET("/buy/ticket/", middleware.AuthMiddleware(), trains.TicketBuy)
//退票
r.POST("/reticket/api/v1/", middleware.AuthMiddleware(), trains.TicketCancel)
//改票
r.PUT("/change/order/", trains.TicketChange)
return r
}
|
package controller
import "time"
const (
// Time allowed to write a message to the peer.
writeWait = 10 * time.Second
// Time allowed to read the next pong message from the peer.
pongWait = 60 * time.Second
// Send pings to peer with this period. Must be less than pongWait.
pingPeriod = (pongWait * 9) / 10
// Maximum message size allowed from peer.
maxMessageSize = 512
)
type hub struct {
rooms map[string]map[*connection]bool
broadcast chan message
register chan subscription
unregister chan subscription
}
type message struct {
data []byte
room string
}
func (h *hub) Run() {
for {
select {
case s := <-h.register:
connections := h.rooms[s.room]
if connections == nil {
connections = make(map[*connection]bool)
h.rooms[s.room] = connections
}
h.rooms[s.room][s.conn] = true
case s := <-h.unregister:
connections := h.rooms[s.room]
if connections != nil {
if _, ok := connections[s.conn]; ok {
delete(connections, s.conn)
close(s.conn.send)
if len(connections) == 0 {
delete(h.rooms, s.room)
}
}
}
case m := <-h.broadcast:
connections := h.rooms[m.room]
for c := range connections {
select {
case c.send <- m.data:
default:
close(c.send)
delete(connections, c)
if len(connections) == 0 {
delete(h.rooms, m.room)
}
}
}
}
}
}
var H = hub{
broadcast: make(chan message),
register: make(chan subscription),
unregister: make(chan subscription),
rooms: make(map[string]map[*connection]bool),
}
|
// Generate SDK from Examle Doc site
// +build ignore
package main
import (
"bytes"
"flag"
"fmt"
"go/format"
"io/ioutil"
"log"
"os"
"path"
"strings"
"text/template"
"github.com/PuerkitoBio/goquery"
)
var (
docIndex = flag.String("doc", "./api/index.html", "Original doc from")
apis []*API
)
type Elem struct {
Name string
Type string
Doc string
Required bool
}
const apiRequestTpl = `
type {{.Name}} struct {
{{range $elem := .Parameters}}
{{ $elem.Name }} {{ $elem | convertElemType}} //{{$elem.Doc}}
{{end}}
}
func (r *{{.Name}}) GenURL() (v *url.Values) {
setAuth(v)
{{range $elem := .Parameters}}
{{if eq $elem.Type "Integer" }}
v.Set("{{ $elem.Name }}", intToString(r.{{$elem.Name}}))
{{else}}
v.Set("{{ $elem.Name }}", r.{{$elem.Name}})
{{end}}
{{end}}
v.Set("Action", "{{.Name}}")
return v
}
func (r *{{.Name}}) Do() (response *{{.Name}}Response, err error) {
v := r.GenURL()
resp, err := http.PostForm(Endpoint, *v)
if err != nil {
return
}
dec := json.NewDecoder(resp.Body)
dec.Decode(response)
return
}
type {{ .Name}}Response struct {
{{range $elem := .ResponseElements}}
{{ $elem.Name }} {{ $elem | convertElemType}} //{{$elem.Doc}}
{{end}}
}
{{if .ExtStructs}}
// Extra struct
{{range $key, $elem := .ExtStructs}}
type {{ $key }} struct {
{{range $i := $elem}}
{{ $i.Name }} {{ $i | convertElemType}} //{{$i.Doc}}
{{end}}
}
{{end}}
{{end}}
`
/*
TODO
Default Value
Validation
more type
*/
type API struct {
Name string
URL string
Doc string
Parameters []*Elem
ResponseElements []*Elem
ExtStructs map[string][]*Elem
}
func extractAPI(i int, sel *goquery.Selection) {
api := &API{}
doc := sel.Children()
api.Doc, _ = doc.Html()
href := doc.Next().Children()
api.URL, _ = href.Attr("href")
api.Name, _ = href.Html()
apis = append(apis, api)
}
func genRequest(api *API, doc *goquery.Document) {
// Request start
requestNodes := doc.Find("#request tbody tr")
requestParams := make([]*Elem, 0)
requestNodes.Each(func(i int, sel *goquery.Selection) {
p := &Elem{}
sel = sel.Children()
for i := 0; i < 4; i++ {
s, err := sel.Html()
if err != nil {
log.Fatal(err)
}
switch i {
case 0:
p.Name = s
case 1:
p.Type = s
case 2:
p.Doc = s
case 3:
if s == "Yes" {
p.Required = true
} else {
p.Required = false
}
}
sel = sel.Next()
}
log.Print("Request:", p)
requestParams = append(requestParams, p)
})
api.Parameters = requestParams
}
func genResponse(api *API, doc *goquery.Document) bool {
// Response
responseNodes := doc.Find("#response tbody tr")
responseElements := make([]*Elem, 0)
hasExt := false
responseNodes.Each(func(i int, sel *goquery.Selection) {
p := &Elem{}
sel = sel.Children()
for i := 0; i < 3; i++ {
s, err := sel.Html()
if err != nil {
log.Fatal(err)
}
switch i {
case 0:
p.Name = s
case 1:
if s == "Array" {
hasExt = true
}
p.Type = s
case 2:
p.Doc = s
}
sel = sel.Next()
}
log.Print("Request:", p)
responseElements = append(responseElements, p)
})
api.ResponseElements = responseElements
return hasExt
}
func genExtra(api *API, doc *goquery.Document) {
// Extra
extra := doc.Find(".extra")
api.ExtStructs = make(map[string][]*Elem)
extra.Each(func(i int, sel *goquery.Selection) {
log.Print(i, sel)
name, ok := sel.Attr("id")
if !ok {
return
}
Elements := make([]*Elem, 0)
sel.Find("tbody tr").Each(func(i int, sel *goquery.Selection) {
p := &Elem{}
sel = sel.Children()
for i := 0; i < 3; i++ {
s, err := sel.Html()
if err != nil {
log.Fatal(err)
}
switch i {
case 0:
p.Name = s
case 1:
p.Type = s
case 2:
p.Doc = s
}
sel = sel.Next()
}
log.Print("Extra:", p)
Elements = append(Elements, p)
})
api.ExtStructs[name] = Elements
})
}
func genAPI(api *API) (err error) {
log.Print("Generating ", api.Name)
buf := bytes.NewBuffer([]byte{})
buf.WriteString(header)
file, err := os.Open(path.Join(path.Dir(*docIndex), strings.Replace(api.URL, "/api", "", 1)))
doc, err := goquery.NewDocumentFromReader(file)
if err != nil {
log.Fatal(err)
}
genRequest(api, doc)
extra := genResponse(api, doc)
if extra {
genExtra(api, doc)
}
t := template.New("request")
maps := make(template.FuncMap)
maps["convertElemType"] = convertElemType
t, _ = t.Funcs(maps).Parse(apiRequestTpl)
tmp := bytes.NewBuffer([]byte{})
log.Print(t.Execute(tmp, api))
buf.ReadFrom(tmp)
// Format
formated, err := format.Source(buf.Bytes())
if err != nil {
log.Fatal(err)
}
ioutil.WriteFile(fmt.Sprintf("%s_gen.go", strings.ToLower(api.Name)), formated, 0644)
return
}
func main() {
log.SetFlags(0)
log.SetPrefix("doc gen: ")
flag.Parse()
file, err := os.Open(*docIndex)
if err != nil {
panic(err)
}
defer file.Close()
doc, _ := goquery.NewDocumentFromReader(file)
doc.Find("#api tbody tr").Each(extractAPI)
log.Printf("Total %d api", len(apis))
for _, a := range apis {
genAPI(a)
}
}
const header = `
// DO NOT EDIT
// This file is automatically generated by gen.go
// go run gen.go
//
package xc
import (
"net/url"
"encoding/json"
"net/http"
)
`
func convertElemType(p *Elem) (dst string) {
switch p.Type {
case "String":
dst = "string"
case "Integer":
dst = "int"
case "Array":
dst = fmt.Sprintf("[]*%s", p.Name) // Struct will form that
}
return
}
|
package requests
import (
"fmt"
"net/url"
"strings"
"github.com/google/go-querystring/query"
"github.com/atomicjolt/canvasapi"
)
// GetFormattedStudentNumericalAnswer Matches the intended behavior of the UI when a numerical answer is entered
// and returns the resulting formatted number
// https://canvas.instructure.com/doc/api/quiz_submission_questions.html
//
// Path Parameters:
// # Path.QuizSubmissionID (Required) ID
// # Path.ID (Required) ID
//
// Query Parameters:
// # Query.Answer (Required) no description
//
type GetFormattedStudentNumericalAnswer struct {
Path struct {
QuizSubmissionID string `json:"quiz_submission_id" url:"quiz_submission_id,omitempty"` // (Required)
ID string `json:"id" url:"id,omitempty"` // (Required)
} `json:"path"`
Query struct {
Answer float64 `json:"answer" url:"answer,omitempty"` // (Required)
} `json:"query"`
}
func (t *GetFormattedStudentNumericalAnswer) GetMethod() string {
return "GET"
}
func (t *GetFormattedStudentNumericalAnswer) GetURLPath() string {
path := "quiz_submissions/{quiz_submission_id}/questions/{id}/formatted_answer"
path = strings.ReplaceAll(path, "{quiz_submission_id}", fmt.Sprintf("%v", t.Path.QuizSubmissionID))
path = strings.ReplaceAll(path, "{id}", fmt.Sprintf("%v", t.Path.ID))
return path
}
func (t *GetFormattedStudentNumericalAnswer) GetQuery() (string, error) {
v, err := query.Values(t.Query)
if err != nil {
return "", err
}
return v.Encode(), nil
}
func (t *GetFormattedStudentNumericalAnswer) GetBody() (url.Values, error) {
return nil, nil
}
func (t *GetFormattedStudentNumericalAnswer) GetJSON() ([]byte, error) {
return nil, nil
}
func (t *GetFormattedStudentNumericalAnswer) HasErrors() error {
errs := []string{}
if t.Path.QuizSubmissionID == "" {
errs = append(errs, "'Path.QuizSubmissionID' is required")
}
if t.Path.ID == "" {
errs = append(errs, "'Path.ID' is required")
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *GetFormattedStudentNumericalAnswer) Do(c *canvasapi.Canvas) error {
_, err := c.SendRequest(t)
if err != nil {
return err
}
return nil
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package wifiutil
import (
"bytes"
"context"
"net"
"time"
"github.com/golang/protobuf/ptypes/empty"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"chromiumos/tast/common/network/iw"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/remote/wificell"
"chromiumos/tast/remote/wificell/hostapd"
"chromiumos/tast/remote/wificell/pcap"
"chromiumos/tast/remote/wificell/router/common/support"
"chromiumos/tast/services/cros/wifi"
"chromiumos/tast/testing"
)
// VerifyMACUsedForScan forces Scan, collects the pcap and checks for
// MAC address used in Probe Requests. If the randomize is turned on
// none of the macs should be used, and if it is turned off then all
// of the Probes should be using MAC from the first element.
func VerifyMACUsedForScan(ctx context.Context, tf *wificell.TestFixture, ap *wificell.APIface,
name string, randomize bool, macs []net.HardwareAddr) (retErr error) {
resp, err := tf.WifiClient().SetMACRandomize(ctx, &wifi.SetMACRandomizeRequest{Enable: randomize})
if err != nil {
return errors.Wrapf(err, "failed to set MAC randomization to: %t", randomize)
}
if resp.OldSetting != randomize {
testing.ContextLog(ctx, "Switched MAC randomization for scans to: ", randomize)
// Always restore the setting on leaving.
defer func(ctx context.Context, restore bool) {
if _, err := tf.WifiClient().SetMACRandomize(ctx, &wifi.SetMACRandomizeRequest{Enable: restore}); err != nil {
retErr = errors.Wrapf(err, "failed to restore MAC randomization setting back to %t", restore)
}
}(ctx, resp.OldSetting)
}
ctx, cancel := ctxutil.Shorten(ctx, time.Second)
defer cancel()
// Wait for the current scan to be done (if in progress) to avoid
// possible scan started before our setting.
if _, err := tf.WifiClient().WaitScanIdle(ctx, &empty.Empty{}); err != nil {
return errors.Wrap(err, "failed to wait for current scan to be done")
}
timeoutCtx, cancel := context.WithTimeout(ctx, 60*time.Second)
defer cancel()
pcapPath, err := ScanAndCollectPcap(timeoutCtx, tf, name, 5, ap.Config().Channel)
if err != nil {
return errors.Wrap(err, "failed to collect pcap")
}
testing.ContextLog(ctx, "Start analyzing pcap")
filters := []pcap.Filter{
pcap.RejectLowSignal(),
pcap.Dot11FCSValid(),
pcap.TypeFilter(
layers.LayerTypeDot11MgmtProbeReq,
func(layer gopacket.Layer) bool {
ssid, err := pcap.ParseProbeReqSSID(layer.(*layers.Dot11MgmtProbeReq))
if err != nil {
testing.ContextLogf(ctx, "Skipped malformed probe request %v: %v", layer, err)
return false
}
// Take the ones with wildcard SSID or SSID of the AP.
if ssid == "" || ssid == ap.Config().SSID {
return true
}
return false
},
),
}
packets, err := pcap.ReadPackets(pcapPath, filters...)
if err != nil {
return errors.Wrap(err, "failed to read packets")
}
if len(packets) == 0 {
return errors.New("no probe request found in pcap")
}
testing.ContextLogf(ctx, "Total %d probe requests found", len(packets))
for _, p := range packets {
// Get sender address.
layer := p.Layer(layers.LayerTypeDot11)
if layer == nil {
return errors.Errorf("ProbeReq packet %v does not have Dot11 layer", p)
}
dot11, ok := layer.(*layers.Dot11)
if !ok {
return errors.Errorf("Dot11 layer output %v not *layers.Dot11", p)
}
sender := dot11.Address2
if randomize {
// In this case we are checking if MAC from probe does not
// match any previously known (given in `macs` argument).
for _, mac := range macs {
if bytes.Equal(sender, mac) {
return errors.New("Found a probe request with a known MAC: " + mac.String())
}
}
} else if !bytes.Equal(sender, macs[0]) {
return errors.Errorf("found a probe request with a different MAC: got %s, want %s", sender, macs[0])
}
}
return nil
}
// ConnectAndCollectPcap sets up a WiFi AP and then asks DUT to connect.
// The path to the packet file and the config of the AP is returned.
// Note: This function assumes that TestFixture spawns Capturer for us.
func ConnectAndCollectPcap(ctx context.Context, tf *wificell.TestFixture, apOps []hostapd.Option) (pcapPath string, apConf *hostapd.Config, err error) {
// As we'll collect pcap file after APIface and Capturer closed, run it
// in an inner function so that we can clean up easier with defer.
capturer, conf, err := func(ctx context.Context) (ret *pcap.Capturer, retConf *hostapd.Config, retErr error) {
collectFirstErr := func(err error) {
if retErr == nil {
ret = nil
retConf = nil
retErr = err
}
testing.ContextLog(ctx, "Error in connectAndCollectPcap: ", err)
}
testing.ContextLog(ctx, "Configuring WiFi to connect")
ap, err := tf.ConfigureAP(ctx, apOps, nil)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to configure AP")
}
defer func(ctx context.Context) {
if err := tf.DeconfigAP(ctx, ap); err != nil {
collectFirstErr(errors.Wrap(err, "failed to deconfig AP"))
}
}(ctx)
ctx, cancel := tf.ReserveForDeconfigAP(ctx, ap)
defer cancel()
testing.ContextLog(ctx, "Connecting to WiFi")
if _, err := tf.ConnectWifiAP(ctx, ap); err != nil {
return nil, nil, err
}
defer func(ctx context.Context) {
if err := tf.CleanDisconnectWifi(ctx); err != nil {
collectFirstErr(errors.Wrap(err, "failed to disconnect"))
}
}(ctx)
ctx, cancel = tf.ReserveForDisconnect(ctx)
defer cancel()
capturer, ok := tf.Capturer(ap)
if !ok {
return nil, nil, errors.New("cannot get the capturer from TestFixture")
}
return capturer, ap.Config(), nil
}(ctx)
if err != nil {
return "", nil, err
}
pcapPath, err = capturer.PacketPath(ctx)
if err != nil {
return "", nil, err
}
return pcapPath, conf, nil
}
// ScanAndCollectPcap requests active scans and collect pcap file on channel ch.
// Path to the pcap file is returned.
func ScanAndCollectPcap(fullCtx context.Context, tf *wificell.TestFixture, name string, scanCount, ch int) (string, error) {
action := func(ctx context.Context) error {
testing.ContextLog(ctx, "Request active scans")
req := &wifi.RequestScansRequest{Count: int32(scanCount)}
if _, err := tf.WifiClient().RequestScans(ctx, req); err != nil {
return errors.Wrap(err, "failed to trigger active scans")
}
return nil
}
p, err := tf.StandardPcap()
if err != nil {
return "", errors.Wrap(err, "unable to get standard pcap device")
}
return CollectPcapForAction(fullCtx, p, name, ch, nil, action)
}
// CollectPcapForAction starts a capture on the specified channel, performs a
// custom action, and then stops the capture. The path to the pcap file is
// returned.
func CollectPcapForAction(fullCtx context.Context, rt support.Capture, name string, ch int, freqOps []iw.SetFreqOption, action func(context.Context) error) (string, error) {
capturer, err := func() (ret *pcap.Capturer, retErr error) {
capturer, err := rt.StartCapture(fullCtx, name, ch, freqOps)
if err != nil {
return nil, errors.Wrap(err, "failed to start capturer")
}
defer func() {
if err := rt.StopCapture(fullCtx, capturer); err != nil {
if retErr == nil {
ret = nil
retErr = errors.Wrap(err, "failed to stop capturer")
} else {
testing.ContextLog(fullCtx, "Failed to stop capturer: ", err)
}
}
}()
ctx, cancel := rt.ReserveForStopCapture(fullCtx, capturer)
defer cancel()
if err := action(ctx); err != nil {
return nil, err
}
return capturer, nil
}()
if err != nil {
return "", err
}
// Return the path where capturer saves the pcap.
return capturer.PacketPath(fullCtx)
}
|
package v1alpha1
import (
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"code.cloudfoundry.org/quarks-operator/pkg/kube/apis"
)
// This file is safe to edit
// It's used as input for the Kube code generator
// Run "make generate" after modifying this file
// DefaultZoneNodeLabel is the default node label for available zones
const DefaultZoneNodeLabel = "failure-domain.beta.kubernetes.io/zone"
var (
// AnnotationVersion is the annotation key for the StatefulSet version
AnnotationVersion = fmt.Sprintf("%s/version", apis.GroupName)
// AnnotationZones is an array of all zones
AnnotationZones = fmt.Sprintf("%s/zones", apis.GroupName)
// LabelAZIndex is the index of available zone
LabelAZIndex = fmt.Sprintf("%s/az-index", apis.GroupName)
// LabelAZName is the name of available zone
LabelAZName = fmt.Sprintf("%s/az-name", apis.GroupName)
// LabelPodOrdinal is the index of pod ordinal
LabelPodOrdinal = fmt.Sprintf("%s/pod-ordinal", apis.GroupName)
// LabelQStsName is the name of the QuarksStatefulSet owns this resource
LabelQStsName = fmt.Sprintf("%s/quarks-statefulset-name", apis.GroupName)
// LabelActivePod is the active pod on an active/passive setup
LabelActivePod = fmt.Sprintf("%s/pod-active", apis.GroupName)
)
// QuarksStatefulSetSpec defines the desired state of QuarksStatefulSet
type QuarksStatefulSetSpec struct {
// Indicates whether to update Pods in the StatefulSet when an env value or mount changes
UpdateOnConfigChange bool `json:"updateOnConfigChange"`
// Indicates the node label that a node locates
ZoneNodeLabel string `json:"zoneNodeLabel,omitempty"`
// Indicates the availability zones that the QuarksStatefulSet needs to span
Zones []string `json:"zones,omitempty"`
// Defines a regular StatefulSet template
Template appsv1.StatefulSet `json:"template"`
// Periodic probe for active/passive containers
// Only an active container will process request from a service
ActivePassiveProbes map[string]corev1.Probe `json:"activePassiveProbes,omitempty"`
}
// QuarksStatefulSetStatus defines the observed state of QuarksStatefulSet
type QuarksStatefulSetStatus struct {
// Timestamp for the last reconcile
LastReconcile *metav1.Time `json:"lastReconcile"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// QuarksStatefulSet is the Schema for the QuarksStatefulSet API
// +k8s:openapi-gen=true
type QuarksStatefulSet struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec QuarksStatefulSetSpec `json:"spec,omitempty"`
Status QuarksStatefulSetStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// QuarksStatefulSetList contains a list of QuarksStatefulSet
type QuarksStatefulSetList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []QuarksStatefulSet `json:"items"`
}
// GetMaxAvailableVersion gets the greatest available version owned by the QuarksStatefulSet
func (q *QuarksStatefulSet) GetMaxAvailableVersion(versions map[int]bool) int {
maxAvailableVersion := 0
for version, available := range versions {
if available && version > maxAvailableVersion {
maxAvailableVersion = version
}
}
return maxAvailableVersion
}
// GetNamespacedName returns the resource name with its namespace
func (q *QuarksStatefulSet) GetNamespacedName() string {
return fmt.Sprintf("%s/%s", q.Namespace, q.Name)
}
|
// Copyright 2021 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package componentactor
import (
"errors"
"fmt"
"time"
cfgpb "go.chromium.org/luci/cv/api/config/v2"
"go.chromium.org/luci/cv/internal/changelist"
"go.chromium.org/luci/cv/internal/prjmanager/prjpb"
"go.chromium.org/luci/cv/internal/run"
)
// triageDeps triages deps of a PCL. See triagedDeps for documentation.
func (a *Actor) triageDeps(pcl *prjpb.PCL, cgIndex int32) *triagedDeps {
cg := a.s.ConfigGroup(cgIndex).Content
res := &triagedDeps{}
for _, dep := range pcl.GetDeps() {
dPCL := a.s.PCL(dep.GetClid())
res.categorize(pcl, cgIndex, cg, dPCL, dep)
if tPB := dPCL.GetTrigger().GetTime(); tPB != nil {
if t := tPB.AsTime(); res.lastTriggered.IsZero() || res.lastTriggered.Before(t) {
res.lastTriggered = t
}
}
}
return res
}
// triagedDeps categorizes deps of a CL, referred to below as the "dependent" CL.
//
// Categories are exclusive. Non-submitted OK deps are not recorded here to
// avoid unnecesary allocations in the most common case, but they do affect
// lastTriggered time.
type triagedDeps struct {
// lastTriggered among *all* deps which are triggered. Can be Zero time if no
// dep is triggered.
lastTriggered time.Time
// submitted are already submitted deps watched by this project, though not
// necessarily the same config group as the dependent CL. These deps are OK.
submitted []*changelist.Dep
// notYetLoaded means that more specific category isn't yet known.
notYetLoaded []*changelist.Dep
// Not OK deps, see also OK() function.
// unwatched deps are not watched by the same project as the dependent CL.
unwatched []*changelist.Dep
// wrongConfigGroup deps is watched by at least 1 different config group.
wrongConfigGroup []*changelist.Dep
// incompatMode are deps, possibly not even triggered, whose mode is not
// compatible with the dependent CL.
incompatMode []*changelist.Dep
}
// OK is true if triagedDeps doesn't have any not-OK deps.
func (t *triagedDeps) OK() bool {
switch {
case len(t.unwatched) > 0:
return false
case len(t.wrongConfigGroup) > 0:
return false
case len(t.incompatMode) > 0:
return false
}
return true
}
func (t *triagedDeps) makePurgeReason() *changelist.CLError {
if t.OK() {
panic("makePurgeReason must be called only iff !OK")
}
return &changelist.CLError{
Kind: &changelist.CLError_InvalidDeps_{
InvalidDeps: &changelist.CLError_InvalidDeps{
Unwatched: t.unwatched,
IncompatMode: t.incompatMode,
WrongConfigGroup: t.wrongConfigGroup,
},
},
}
}
// categorize adds dep to the applicable slice (if any).
//
// pcl is dependent PCL, which must be triggered.
// Its dep is represented by dPCL.
func (t *triagedDeps) categorize(pcl *prjpb.PCL, cgIndex int32, cg *cfgpb.ConfigGroup, dPCL *prjpb.PCL, dep *changelist.Dep) {
if dPCL == nil {
t.notYetLoaded = append(t.notYetLoaded, dep)
return
}
switch s := dPCL.GetStatus(); s {
case prjpb.PCL_UNKNOWN:
t.notYetLoaded = append(t.notYetLoaded, dep)
return
case prjpb.PCL_UNWATCHED, prjpb.PCL_DELETED:
// PCL deleted from Datastore should not happen outside of project
// re-enablement, so it's OK to treat the same as PCL_UNWATCHED for
// simplicity.
t.unwatched = append(t.unwatched, dep)
return
case prjpb.PCL_OK:
// Happy path; continue after the switch.
default:
panic(fmt.Errorf("unrecognized CL %d dep %d status %s", pcl.GetClid(), dPCL.GetClid(), s))
}
// CL is watched by this LUCI project.
if dPCL.GetSubmitted() {
// Submitted CL may no longer be in the expected ConfigGroup,
// but since it's in the same project, it's OK to refer to it as it doesn't
// create an information leak.
t.submitted = append(t.submitted, dep)
return
}
switch cgIndexes := dPCL.GetConfigGroupIndexes(); len(cgIndexes) {
case 0:
panic(fmt.Errorf("At least one ConfigGroup index required for watched dep PCL %d", dPCL.GetClid()))
case 1:
if cgIndexes[0] != cgIndex {
t.wrongConfigGroup = append(t.wrongConfigGroup, dep)
return
}
// Happy path; continue after the switch.
default:
// Strictly speaking, it may be OK iff dependentCGIndex is matched among
// other config groups. However, there is no compelling use-case for
// depending on a CL which matches several config groups. So, for
// compatibility with CQDaemon, be strict.
t.wrongConfigGroup = append(t.wrongConfigGroup, dep)
return
}
tr := pcl.GetTrigger()
dtr := dPCL.GetTrigger()
if cg.GetCombineCls() == nil {
t.categorizeSingle(tr, dtr, dep)
} else {
t.categorizeCombinable(tr, dtr, dep)
}
}
func (t *triagedDeps) categorizeCombinable(tr, dtr *run.Trigger, dep *changelist.Dep) {
// During the `combine_cls.stablization_delay` since the last triggered CL in
// a group, a user can change their mind. Since the full group of CLs isn't
// known here, categorization decision may or may not be final.
switch {
case dtr.GetMode() == tr.GetMode():
return // Happy path.
case dtr == nil:
t.incompatMode = append(t.incompatMode, dep)
return
default:
// TODO(tandrii): support dry run on dependent and full Run on dep.
// For example, on a CL stack:
// CL | Mode
// D CQ+1
// C CQ+1
// B CQ+2
// A CQ+2
// (base) -
// D+C+B+A are can be dry-run-ed and B+A can be CQ+2ed at the same time
t.incompatMode = append(t.incompatMode, dep)
return
}
}
func (t *triagedDeps) categorizeSingle(tr, dtr *run.Trigger, dep *changelist.Dep) {
// dependent is guaranteed non-nil.
switch mode := run.Mode(tr.GetMode()); mode {
case run.DryRun, run.QuickDryRun:
return // OK.
case run.FullRun:
// TODO(tandrii): find bug about better handling of stacks in single-CL Run case.
// TODO(tandrii): allow this if dep's mode is also FullRun.
t.incompatMode = append(t.incompatMode, dep)
return
default:
panic(fmt.Errorf("unknown dependent mode %v", tr))
}
}
// iterateNotSubmitted calls clbk per each dep which isn't submitted.
//
// Must be called only if all deps are OK (submitted or notYetLoaded is fine)
// and with the same PCL as was used to construct the triagedDeps.
func (t *triagedDeps) iterateNotSubmitted(pcl *prjpb.PCL, clbk func(dep *changelist.Dep)) {
if !t.OK() {
panic(fmt.Errorf("iterateNotSubmitted called on non-OK triagedDeps (PCL %d)", pcl.GetClid()))
}
// Because construction of triagedDeps is in order of PCL's Deps, the
// submitted must be a sub-sequence of Deps and we can compare just Dep
// pointers.
all, subs := pcl.GetDeps(), t.submitted
for {
switch {
case len(subs) == 0:
for _, dep := range all {
clbk(dep)
}
return
case len(all) == 0:
panic(errors.New("must not happen because submitted must be a subset of all deps (wrong PCL?)"))
default:
if all[0] == subs[0] {
subs = subs[1:]
} else {
clbk(all[0])
}
all = all[1:]
}
}
}
|
package main
import (
"bufio"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
)
const MaxUint = ^uint(0)
const MaxInt = int(MaxUint >> 1)
type Point2d struct {
Id int
X int
Y int
}
func (p Point2d) String() string {
return fmt.Sprintf("{%d, %d, %d}", p.Id, p.X, p.Y)
}
func main() {
// parse input into array of Point2d
inputFile, _ := filepath.Abs("./testInput")
file, err := os.Open(inputFile)
if err != nil {
fmt.Println(err)
}
defer file.Close()
currentId := 0
var points []*Point2d
scanner := bufio.NewScanner(file)
for scanner.Scan() {
curString := scanner.Text()
parsedPoint := parsePoint(curString, currentId)
points = append(points, parsedPoint)
currentId += 1
}
maxX := 0
maxY := 0
for _, point := range points {
if point.X >= maxX {
maxX = point.X
}
if point.Y >= maxY {
maxY = point.Y
}
}
resultRegion := 0
for x := 0; x < maxX; x++ {
for y := 0; y < maxY; y++ {
resultSum := getDistanceFromAllPoints(Point2d{-1, x, y}, points)
if resultSum < 10000 {
resultRegion += 1
}
}
}
fmt.Printf("result region size: %d\n", resultRegion)
}
func getDistanceFromAllPoints(start Point2d, points []*Point2d) int {
resultSum := 0
for _, point := range points {
resultSum += point.taxiDistanceFrom(start)
}
return resultSum
}
func (p Point2d) taxiDistanceFrom(q Point2d) int {
first := p.X - q.X
if first < 0 {
first = first * -1
}
second := p.Y - q.Y
if second < 0 {
second = second * -1
}
return first + second
}
func parsePoint(rawPoint string, currentId int) *Point2d {
rawStrings := strings.Split(rawPoint, ", ")
convX, _ := strconv.Atoi(rawStrings[0])
convY, _ := strconv.Atoi(rawStrings[1])
return &Point2d{
Id: currentId,
X: convX,
Y: convY,
}
}
func isInfinite(point *Point2d, minX int, maxX int, minY int, maxY int) bool {
return (point.X == minX || point.X == maxX || point.Y == minY || point.Y == maxY)
}
|
// Copyright (c) 2018-present, MultiVAC Foundation.
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
package wire
import (
"fmt"
"io"
"github.com/multivactech/MultiVAC/model/shard"
"github.com/multivactech/MultiVAC/base/rlp"
)
// MsgBinaryBA indicates binary Byzantine agreement message
type MsgBinaryBA struct {
InShardProof []byte
SignedCredentialWithBA *SignedMsg `rlp:"nil"`
}
// BtcDecode decode the message.
func (m *MsgBinaryBA) BtcDecode(r io.Reader, _ uint32, _ MessageEncoding) error {
return rlp.Decode(r, m)
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
func (m *MsgBinaryBA) BtcEncode(w io.Writer, _ uint32, _ MessageEncoding) error {
return rlp.Encode(w, m)
}
// Command returns the protocol command string for the message.
func (m *MsgBinaryBA) Command() string {
return CmdBinaryBA
}
// MaxPayloadLength returns the maximum length the payload can be for the
// receiver.
func (m *MsgBinaryBA) MaxPayloadLength(uint32) uint32 {
// TODO Change it to a proper value.
return LargeMaxPayloadLength
}
// GetRawB get B of credentialwithBA.
func (m *MsgBinaryBA) GetRawB() []byte {
// TODO: decode m.B.Message to get a boolean value
return []byte{m.SignedCredentialWithBA.Message.CredentialWithBA.B}
}
// GetRawV get V of credentialwithBA.
func (m *MsgBinaryBA) GetRawV() []byte {
return (interface{})(m.SignedCredentialWithBA.Message.CredentialWithBA.V).([]byte)
}
// GetBValue returns the B value from credentialWithBA
func (m *MsgBinaryBA) GetBValue() byte {
return m.SignedCredentialWithBA.Message.CredentialWithBA.B
}
// GetByzAgreementValue get the agreement value from credential.
func (m *MsgBinaryBA) GetByzAgreementValue() ByzAgreementValue {
return m.SignedCredentialWithBA.Message.CredentialWithBA.V
}
// GetStep returns step.
func (m *MsgBinaryBA) GetStep() int {
return int(m.SignedCredentialWithBA.Message.CredentialWithBA.Step)
}
// GetRound to return msg.round.
func (m *MsgBinaryBA) GetRound() int {
return int(m.SignedCredentialWithBA.Message.CredentialWithBA.Round)
}
// GetShardIndex returns the shardIndex.
func (m *MsgBinaryBA) GetShardIndex() shard.Index {
return m.SignedCredentialWithBA.Message.CredentialWithBA.ShardIndex
}
// String returns the format string.
func (m MsgBinaryBA) String() string {
return fmt.Sprintf("MsgBinaryBA {shardIndex:%v, Round:%v, Step:%v, B:%v, v:%v, SignedCredentialWithBA:%v}",
m.GetShardIndex(), m.GetRound(), m.GetStep(), m.GetBValue(), m.GetByzAgreementValue(), m.SignedCredentialWithBA.Message)
}
// NewMessageBinaryBA create a MsgBinaryBA with given params.
func NewMessageBinaryBA(inshardProof []byte, signedMsg *SignedMsg) *MsgBinaryBA {
return &MsgBinaryBA{
InShardProof: inshardProof,
SignedCredentialWithBA: signedMsg,
}
}
// GetSignedCredential returns the SignedCredential.
func (m *MsgBinaryBA) GetSignedCredential() *SignedMsg {
return m.SignedCredentialWithBA
}
// GetInShardProofs returns message's InShardProof.
func (m *MsgBinaryBA) GetInShardProofs() [][]byte {
return [][]byte{m.InShardProof}
}
// IsValidated Verify the signature of ByzantineValue.
func (m *MsgBinaryBA) IsValidated() error {
return isValidSignedCredential(m.SignedCredentialWithBA.Message.CredentialWithBA, m.SignedCredentialWithBA)
}
// Sign Message.
func (m *MsgBinaryBA) Sign(sk []byte) (err error) {
return m.SignedCredentialWithBA.sign(m.SignedCredentialWithBA.Message.CredentialWithBA, sk)
}
// MsgBinaryBAFin indicates last step binary Byzantine agreement message.
type MsgBinaryBAFin struct {
*MsgBinaryBA
}
// BtcDecode decode the message.
func (m *MsgBinaryBAFin) BtcDecode(r io.Reader, _ uint32, _ MessageEncoding) error {
return rlp.Decode(r, m)
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
func (m *MsgBinaryBAFin) BtcEncode(w io.Writer, _ uint32, _ MessageEncoding) error {
return rlp.Encode(w, m)
}
// Command returns the protocol command string for the message.
func (m *MsgBinaryBAFin) Command() string {
return CmdBinaryBAFin
}
// NewMessageBinaryBAFin create a MsgBinaryBAFin with given MsgBinaryBA with steps less than 0.
func NewMessageBinaryBAFin(message *MsgBinaryBA) *MsgBinaryBAFin {
return &MsgBinaryBAFin{MsgBinaryBA: message}
}
|
package sqrtx
import (
"testing"
)
func TestMySqrt(t *testing.T) {
tests := []struct {
in int
want int
}{
{
in: 1000001,
want: 1000,
},
{
in: 8,
want: 2,
},
{
in: 144,
want: 12,
},
{
in: 0,
want: 0,
},
{
in: 4,
want: 2,
},
}
for _, test := range tests {
got := mySqrt(test.in)
if test.want != got {
t.Errorf("sqrtx(%d)=%d, want %d", test.in, got, test.want)
}
}
}
|
package consistent
import (
"bytes"
"encoding/binary"
"fmt"
"github.com/cespare/xxhash"
"sort"
"sync"
)
type Hashing interface {
AddNode(nodeID uint64)
RemoveNode(nodeID uint64)
GetNode(key interface{}) uint64
GetNodes(key interface{}, num int) []uint64
}
type uint64Slice []uint64
type Consistent struct {
mu sync.RWMutex
ring uint64Slice
numVNodes int
vNodeToNode map[uint64]uint64
nodeToVNodes map[uint64][]uint64
hash func(key []byte) uint64
}
func (r uint64Slice) Len() int { return len(r) }
func (r uint64Slice) Less(i, j int) bool { return r[i] < r[j] }
func (r uint64Slice) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func defaultHash(key []byte) uint64 {
return xxhash.Sum64(key)
}
func NewConsistent(numVNodes int) *Consistent {
return NewConsistentWithHash(numVNodes, defaultHash)
}
func NewConsistentWithHash(numVNodes int, hash func(key []byte) uint64) *Consistent {
return &Consistent{
ring: make(uint64Slice, 0),
numVNodes: numVNodes,
vNodeToNode: make(map[uint64]uint64),
nodeToVNodes: make(map[uint64][]uint64),
hash: hash,
}
}
func insert(slice []uint64, index int, value uint64) []uint64 {
if len(slice) == index {
return append(slice, value)
}
slice = append(slice[:index+1], slice[index:]...)
slice[index] = value
return slice
}
func (r *Consistent) hashUInt64(key uint64) uint64 {
bs := make([]byte, 8)
binary.LittleEndian.PutUint64(bs, key)
return r.hash(bs)
}
func (r *Consistent) hashSupported(key interface{}) uint64 {
switch v := key.(type) {
case int:
return r.hashUInt64(uint64(v))
case uint:
return r.hashUInt64(uint64(v))
case string:
return r.hash([]byte(v))
}
// binary.Write() can handle a fixed-size value,
// or a slice of fixed-size values,
// or a pointer to such data.
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.LittleEndian, key)
if err != nil {
// binary.Write to a Buffer should never fail
// with supported type, so error should not
// be considered as a runtime error
panic(fmt.Sprint("binary.Write failed: ", err))
}
return r.hash(buf.Bytes())
}
// find the index of the first VNode with ID >= key, len(r.ring) if no such node
func (r *Consistent) findVNode(key uint64) int {
return sort.Search(len(r.ring), func(i int) bool { return r.ring[i] >= key })
}
func (r *Consistent) addVNode(vNodeID uint64) {
index := r.findVNode(vNodeID)
r.ring = insert(r.ring, index, vNodeID)
}
func (r *Consistent) AddNode(nodeID uint64) {
r.mu.Lock()
defer r.mu.Unlock()
// do nothing if node already exists
if _, exists := r.nodeToVNodes[nodeID]; exists {
return
}
// generate a list of virtual nodes
vNodes := make([]uint64, r.numVNodes)
vNodeID := nodeID
for i := range vNodes {
vNodeID = r.hashUInt64(vNodeID)
if _, exists := r.vNodeToNode[vNodeID]; exists {
// This Case Should NOT Happen in Practice
//
// Since we are using 64-bit hash (assume uniformed),
// even with 6100 virtual nodes,
// the probability of collision is < 10^(-12).
// see Birthday Attack for more details
//
// And the consistent hashing is only controlled by the system,
// and only modified when changing cluster configuration,
// so it is not exposed to external hash collision attack.
//
// Due to the low possibility, just let it CRASH.
// Because handling it may cause more problems,
// for example, if rehashing is used, adding nodes in different
// order may result in inconsistent hash function
panic("duplicated vNode")
}
vNodes[i] = vNodeID
// save mapping from virtual node to node
r.vNodeToNode[vNodeID] = nodeID
// add virtual node to Consistent
r.addVNode(vNodeID)
}
// save mapping from node to virtual nodes
r.nodeToVNodes[nodeID] = vNodes
}
func remove(slice []uint64, index int) []uint64 {
return append(slice[:index], slice[index+1:]...)
}
func (r *Consistent) removeVNode(vNodeID uint64) {
index := r.findVNode(vNodeID)
r.ring = remove(r.ring, index)
}
func (r *Consistent) RemoveNode(nodeID uint64) {
r.mu.Lock()
defer r.mu.Unlock()
vNodes, ok := r.nodeToVNodes[nodeID]
if !ok {
return
}
for _, vNodeID := range vNodes {
// remove node from ring
r.removeVNode(vNodeID)
// delete vNode to Node mapping
delete(r.vNodeToNode, vNodeID)
}
// delete Node to VNodes mapping
delete(r.nodeToVNodes, nodeID)
}
func (r *Consistent) GetNode(key interface{}) uint64 {
r.mu.RLock()
defer r.mu.RUnlock()
nodes := r.GetNodes(key, 1)
if len(nodes) != 1 {
panic("getting node on an empty consistent hashing")
}
return nodes[0]
}
func (r *Consistent) GetNodes(key interface{}, num int) []uint64 {
r.mu.RLock()
defer r.mu.RUnlock()
// return empty list if empty
numNodes := len(r.nodeToVNodes)
if numNodes == 0 {
return make([]uint64, 0)
}
if numNodes < num {
num = numNodes
}
// get key hash in uint64
keyHash := r.hashSupported(key)
// find the first node
numVNodes := len(r.vNodeToNode)
firstVNodeIndex := r.findVNode(keyHash) % numVNodes
// find list of nodes
nodes := make([]uint64, 0, num)
nodeSet := make(map[uint64]interface{})
for i := 0; len(nodes) < num; i++ {
vNodeIndex := (firstVNodeIndex + i) % numVNodes
node := r.vNodeToNode[r.ring[vNodeIndex]]
// prevent duplicate physical node
if _, exists := nodeSet[node]; exists {
continue
}
nodes = append(nodes, node)
nodeSet[node] = nil
}
return nodes
}
func (r *Consistent) ExportVirtualNodes() map[uint64][]uint64 {
r.mu.RLock()
defer r.mu.RUnlock()
result := make(map[uint64][]uint64)
for nodeID, vNodes := range r.nodeToVNodes {
vNodesCopy := make([]uint64, 0, len(vNodes))
vNodesCopy = append(vNodesCopy, vNodes...)
result[nodeID] = vNodesCopy
}
return result
}
|
package main
import (
"bufio"
"fmt"
"io"
"os"
)
func main() {
f, err := os.Open("input.csv")
if err != nil {
panic(err)
}
r := bufio.NewReader(f)
m := make(map[string]map[string]int)
for {
date, err := r.ReadString(',')
if err == io.EOF {
break
}
date = date[:10]
if err != nil {
panic(err)
}
memberSeq, err := r.ReadString('\n')
if err != nil {
panic(err)
}
memberSeq = memberSeq[:len(memberSeq)-1]
if _, ok := m[date]; !ok {
m[date] = make(map[string]int)
}
m[date][memberSeq] = 1
}
for k, v := range m {
fmt.Println(k, len(v))
}
}
|
import "strconv"
/*
* @lc app=leetcode id=150 lang=golang
*
* [150] Evaluate Reverse Polish Notation
*
* https://leetcode.com/problems/evaluate-reverse-polish-notation/description/
*
* algorithms
* Medium (35.80%)
* Likes: 1031
* Dislikes: 471
* Total Accepted: 229.5K
* Total Submissions: 635.5K
* Testcase Example: '["2","1","+","3","*"]'
*
* Evaluate the value of an arithmetic expression in Reverse Polish Notation.
*
* Valid operators are +, -, *, /. Each operand may be an integer or another
* expression.
*
* Note:
*
*
* Division between two integers should truncate toward zero.
* The given RPN expression is always valid. That means the expression would
* always evaluate to a result and there won't be any divide by zero
* operation.
*
*
* Example 1:
*
*
* Input: ["2", "1", "+", "3", "*"]
* Output: 9
* Explanation: ((2 + 1) * 3) = 9
*
*
* Example 2:
*
*
* Input: ["4", "13", "5", "/", "+"]
* Output: 6
* Explanation: (4 + (13 / 5)) = 6
*
*
* Example 3:
*
*
* Input: ["10", "6", "9", "3", "+", "-11", "*", "/", "*", "17", "+", "5", "+"]
* Output: 22
* Explanation:
* ((10 * (6 / ((9 + 3) * -11))) + 17) + 5
* = ((10 * (6 / (12 * -11))) + 17) + 5
* = ((10 * (6 / -132)) + 17) + 5
* = ((10 * 0) + 17) + 5
* = (0 + 17) + 5
* = 17 + 5
* = 22
*
*
*/
// @lc code=start
func evalRPN(tokens []string) int {
stack := []int{}
operatorHash := map[string]bool{
"+": true,
"-": true,
"*": true,
"/": true,
}
for i := 0; i < len(tokens); i++ {
item := tokens[i]
if _, ok := operatorHash[item]; ok {
left := stack[len(stack)-2]
right := stack[len(stack)-1]
tmp := 0
switch item {
case "+":
tmp = left + right
case "-":
tmp = left - right
case "*":
tmp = left * right
case "/":
tmp = left / right
}
stack[len(stack)-2] = tmp
stack = stack[:len(stack)-1]
} else {
intVal, _ := strconv.Atoi(item)
stack = append(stack, intVal)
}
}
return stack[0]
}
// @lc code=end |
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package workload
import (
"context"
"sync"
"sync/atomic"
"github.com/cockroachdb/cockroach-go/crdb"
"github.com/jackc/pgx"
"golang.org/x/sync/errgroup"
)
// MultiConnPool maintains a set of pgx ConnPools (to different servers).
type MultiConnPool struct {
Pools []*pgx.ConnPool
// Atomic counter used by Get().
counter uint32
}
// MultiConnPoolCfg encapsulates the knobs passed to NewMultiConnPool.
type MultiConnPoolCfg struct {
// MaxTotalConnections is the total maximum number of connections across all
// pools.
MaxTotalConnections int
// MaxConnsPerPool is the maximum number of connections in any single pool.
// Limiting this is useful especially for prepared statements, which are
// prepared on each connection inside a pool (serially).
// If 0, there is no per-pool maximum (other than the total maximum number of
// connections which still applies).
MaxConnsPerPool int
}
// NewMultiConnPool creates a new MultiConnPool.
//
// Each URL gets one or more pools, and each pool has at most MaxConnsPerPool
// connections.
//
// The pools have approximately the same number of max connections, adding up to
// MaxTotalConnections.
func NewMultiConnPool(cfg MultiConnPoolCfg, urls ...string) (*MultiConnPool, error) {
m := &MultiConnPool{}
connsPerURL := distribute(cfg.MaxTotalConnections, len(urls))
maxConnsPerPool := cfg.MaxConnsPerPool
if maxConnsPerPool == 0 {
maxConnsPerPool = cfg.MaxTotalConnections
}
var warmupConns [][]*pgx.Conn
for i := range urls {
connCfg, err := pgx.ParseConnectionString(urls[i])
if err != nil {
return nil, err
}
connsPerPool := distributeMax(connsPerURL[i], maxConnsPerPool)
for _, numConns := range connsPerPool {
p, err := pgx.NewConnPool(pgx.ConnPoolConfig{
ConnConfig: connCfg,
MaxConnections: numConns,
})
if err != nil {
return nil, err
}
warmupConns = append(warmupConns, make([]*pgx.Conn, numConns))
m.Pools = append(m.Pools, p)
}
}
// "Warm up" the pools so we don't have to establish connections later (which
// would affect the observed latencies of the first requests, especially when
// prepared statements are used). We do this by
// acquiring connections (in parallel), then releasing them back to the
// pool.
var g errgroup.Group
// Limit concurrent connection establishment. Allowing this to run
// at maximum parallelism would trigger syn flood protection on the
// host, which combined with any packet loss could cause Acquire to
// return an error and fail the whole function. The value 100 is
// chosen because it is less than the default value for SOMAXCONN
// (128).
sem := make(chan struct{}, 100)
for i, p := range m.Pools {
p := p
conns := warmupConns[i]
for j := range conns {
j := j
sem <- struct{}{}
g.Go(func() error {
var err error
conns[j], err = p.Acquire()
<-sem
return err
})
}
}
if err := g.Wait(); err != nil {
return nil, err
}
for i, p := range m.Pools {
for _, c := range warmupConns[i] {
p.Release(c)
}
}
return m, nil
}
// Get returns one of the pools, in round-robin manner.
func (m *MultiConnPool) Get() *pgx.ConnPool {
if len(m.Pools) == 1 {
return m.Pools[0]
}
i := atomic.AddUint32(&m.counter, 1) - 1
return m.Pools[i%uint32(len(m.Pools))]
}
// PrepareEx prepares the given statement on all the pools.
func (m *MultiConnPool) PrepareEx(
ctx context.Context, name, sql string, opts *pgx.PrepareExOptions,
) (*pgx.PreparedStatement, error) {
var res *pgx.PreparedStatement
var once sync.Once
var g errgroup.Group
for _, p := range m.Pools {
p := p
g.Go(func() error {
ps, err := p.PrepareEx(ctx, name, sql, opts)
if err == nil {
// It doesn't matter which PreparedStatement we return, they should
// contain the same information.
once.Do(func() { res = ps })
}
return err
})
}
err := g.Wait()
return res, err
}
// Close closes all the pools.
func (m *MultiConnPool) Close() {
for _, p := range m.Pools {
p.Close()
}
}
// PgxTx is a thin wrapper that implements the crdb.Tx interface, allowing pgx
// transactions to be used with ExecuteInTx. The cockroach-go library has native
// support for pgx in crdb/pgx, but only for pgx v4. CRDB is stuck for now using
// pgx v3, as v4 needs Go modules.
type PgxTx pgx.Tx
var _ crdb.Tx = &PgxTx{}
// Exec is part of the crdb.Tx interface.
func (tx *PgxTx) Exec(ctx context.Context, sql string, args ...interface{}) error {
_, err := (*pgx.Tx)(tx).ExecEx(ctx, sql, nil /* QueryExOptions */, args...)
return err
}
// Commit is part of the crdb.Tx interface.
func (tx *PgxTx) Commit(context.Context) error {
return (*pgx.Tx)(tx).Commit()
}
// Rollback is part of the crdb.Tx interface.
func (tx *PgxTx) Rollback(context.Context) error {
return (*pgx.Tx)(tx).Rollback()
}
// distribute returns a slice of <num> integers that add up to <total> and are
// within +/-1 of each other.
func distribute(total, num int) []int {
res := make([]int, num)
for i := range res {
// Use the average number of remaining connections.
div := len(res) - i
res[i] = (total + div/2) / div
total -= res[i]
}
return res
}
// distributeMax returns a slice of integers that are at most `max` and add up
// to <total>. The slice is as short as possible and the values are within +/-1
// of each other.
func distributeMax(total, max int) []int {
return distribute(total, (total+max-1)/max)
}
|
package string
import (
"fmt"
"github.com/project-flogo/core/data/coerce"
"strings"
"github.com/project-flogo/core/data"
"github.com/project-flogo/core/data/expression/function"
)
func init() {
function.Register(&fnReplace{})
}
type fnReplace struct {
}
func (fnReplace) Name() string {
return "replace"
}
func (fnReplace) Sig() (paramTypes []data.Type, isVariadic bool) {
return []data.Type{data.TypeString, data.TypeString, data.TypeString, data.TypeInt}, false
}
func (fnReplace) Eval(params ...interface{}) (interface{}, error) {
s1, err := coerce.ToString(params[0])
if err != nil {
return nil, fmt.Errorf("string.replace function first parameter [%+v] must be string", params[0])
}
s2, err := coerce.ToString(params[1])
if err != nil {
return nil, fmt.Errorf("string.replace function second parameter [%+v] must be string", params[1])
}
s3, err := coerce.ToString(params[2])
if err != nil {
return nil, fmt.Errorf("string.replace function third parameter [%+v] must be string", params[2])
}
s4, err := coerce.ToInt(params[3])
if err != nil {
return nil, fmt.Errorf("string.replace function last parameter [%+v] must be int", params[3])
}
return strings.Replace(s1, s2, s3, s4), nil
}
|
// Copyright (c) 2018, Sylabs Inc. All rights reserved.
// This software is licensed under a 3-clause BSD license. Please consult the
// LICENSE.md file distributed with the sources of this project regarding your
// rights to use or distribute this software.
package client
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
jsonresp "github.com/sylabs/json-resp"
"golang.org/x/sync/errgroup"
)
// UploadCallback defines an interface used to perform a call-out to
// set up the source file Reader.
type UploadCallback interface {
// Initializes the callback given a file size and source file Reader
InitUpload(int64, io.Reader)
// (optionally) can return a proxied Reader
GetReader() io.Reader
// called when the upload operation is complete
Finish()
}
// Default upload callback
type defaultUploadCallback struct {
r io.Reader
}
func (c *defaultUploadCallback) InitUpload(s int64, r io.Reader) {
c.r = r
}
func (c *defaultUploadCallback) GetReader() io.Reader {
return c.r
}
func (c *defaultUploadCallback) Finish() {
}
// calculateChecksums uses a TeeReader to calculate MD5 and SHA256
// checksums concurrently
func calculateChecksums(r io.Reader) (string, string, int64, error) {
pr, pw := io.Pipe()
tr := io.TeeReader(r, pw)
var g errgroup.Group
var md5checksum string
var sha256checksum string
var fileSize int64
// compute MD5 checksum for comparison with S3 checksum
g.Go(func() error {
// The pipe writer must be closed so sha256 computation gets EOF and will
// complete.
defer pw.Close()
var err error
md5checksum, fileSize, err = md5sum(tr)
if err != nil {
return fmt.Errorf("error calculating MD5 checksum: %v", err)
}
return nil
})
// Compute sha256
g.Go(func() error {
var err error
sha256checksum, _, err = sha256sum(pr)
if err != nil {
return fmt.Errorf("error calculating SHA checksum: %v", err)
}
return nil
})
err := g.Wait()
return md5checksum, sha256checksum, fileSize, err
}
// UploadImage will push a specified image from an io.ReadSeeker up to the
// Container Library, The timeout value for this operation is set within
// the context. It is recommended to use a large value (ie. 1800 seconds) to
// prevent timeout when uploading large images.
func (c *Client) UploadImage(ctx context.Context, r io.ReadSeeker, path, arch string, tags []string, description string, callback UploadCallback) error {
if !IsLibraryPushRef(path) {
return fmt.Errorf("malformed image path: %s", path)
}
entityName, collectionName, containerName, parsedTags := ParseLibraryPath(path)
if len(parsedTags) != 0 {
return fmt.Errorf("malformed image path: %s", path)
}
// calculate sha256 and md5 checksums for Reader
md5Checksum, imageHash, fileSize, err := calculateChecksums(r)
if err != nil {
return fmt.Errorf("error calculating checksums: %v", err)
}
// rollback to top of file
if _, err = r.Seek(0, io.SeekStart); err != nil {
return fmt.Errorf("error seeking to start stream: %v", err)
}
c.Logger.Logf("Image hash computed as %s", imageHash)
// Find or create entity
entity, err := c.getEntity(ctx, entityName)
if err != nil {
if err != ErrNotFound {
return err
}
c.Logger.Logf("Entity %s does not exist in library - creating it.", entityName)
entity, err = c.createEntity(ctx, entityName)
if err != nil {
return err
}
}
// Find or create collection
qualifiedCollectionName := fmt.Sprintf("%s/%s", entityName, collectionName)
collection, err := c.getCollection(ctx, qualifiedCollectionName)
if err != nil {
if err != ErrNotFound {
return err
}
// create collection
c.Logger.Logf("Collection %s does not exist in library - creating it.", collectionName)
collection, err = c.createCollection(ctx, collectionName, entity.ID)
if err != nil {
return err
}
}
// Find or create container
computedName := fmt.Sprintf("%s/%s", qualifiedCollectionName, containerName)
container, err := c.GetContainer(ctx, computedName)
if err != nil {
if err != ErrNotFound {
return err
}
// Create container
c.Logger.Logf("Container %s does not exist in library - creating it.", containerName)
container, err = c.createContainer(ctx, containerName, collection.ID)
if err != nil {
return err
}
}
// Find or create image
image, err := c.GetImage(ctx, arch, computedName+":"+imageHash)
if err != nil {
if err != ErrNotFound {
return err
}
// Create image
c.Logger.Logf("Image %s does not exist in library - creating it.", imageHash)
image, err = c.createImage(ctx, imageHash, container.ID, description)
if err != nil {
return err
}
}
if !image.Uploaded {
c.Logger.Log("Now uploading to the library")
if c.apiAtLeast(ctx, APIVersionV2Upload) {
// use v2 post file api
metadata := map[string]string{
"md5sum": md5Checksum,
}
if err := c.postFileV2(ctx, r, fileSize, image.ID, callback, metadata); err != nil {
return err
}
} else if err := c.postFile(ctx, r, fileSize, image.ID, callback); err != nil {
return err
}
c.Logger.Logf("Upload completed OK")
} else {
c.Logger.Logf("Image is already present in the library - not uploading.")
}
c.Logger.Logf("Setting tags against uploaded image")
if c.apiAtLeast(ctx, APIVersionV2ArchTags) {
return c.setTagsV2(ctx, container.ID, arch, image.ID, append(tags, parsedTags...))
}
c.Logger.Logf("This library does not support multiple architecture per tag.")
c.Logger.Logf("This tag will replace any already uploaded with the same name.")
return c.setTags(ctx, container.ID, image.ID, append(tags, parsedTags...))
}
func (c *Client) postFile(ctx context.Context, r io.Reader, fileSize int64, imageID string, callback UploadCallback) error {
postURL := "/v1/imagefile/" + imageID
c.Logger.Logf("postFile calling %s", postURL)
if callback == nil {
// fallback to default upload callback
callback = &defaultUploadCallback{}
}
// use callback to set up source file reader
callback.InitUpload(fileSize, r)
defer callback.Finish()
// Make an upload request
req, _ := c.newRequest(http.MethodPost, postURL, "", callback.GetReader())
// Content length is required by the API
req.ContentLength = fileSize
res, err := c.HTTPClient.Do(req.WithContext(ctx))
if err != nil {
return fmt.Errorf("error uploading file to server: %s", err.Error())
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
err := jsonresp.ReadError(res.Body)
if err != nil {
return fmt.Errorf("sending file did not succeed: %v", err)
}
return fmt.Errorf("sending file did not succeed: http status code %d", res.StatusCode)
}
return nil
}
// postFileV2 uses V2 API to upload images to SCS library server. This is
// a three step operation: "create" upload image request, which returns a
// URL to issue an http PUT operation against, and then finally calls the
// completion endpoint once upload is complete.
func (c *Client) postFileV2(ctx context.Context, r io.Reader, fileSize int64, imageID string, callback UploadCallback, metadata map[string]string) error {
if callback == nil {
// fallback to default upload callback
callback = &defaultUploadCallback{}
}
postURL := "/v2/imagefile/" + imageID
c.Logger.Logf("postFileV2 calling %s", postURL)
// issue upload request (POST) to obtain presigned S3 URL
body := UploadImageRequest{
Size: fileSize,
MD5Checksum: metadata["md5sum"],
}
objJSON, err := c.apiCreate(ctx, postURL, body)
if err != nil {
return err
}
var res UploadImageResponse
if err := json.Unmarshal(objJSON, &res); err != nil {
return nil
}
// set up source file reader
callback.InitUpload(fileSize, r)
// upload (PUT) directly to S3 presigned URL provided above
presignedURL := res.Data.UploadURL
if presignedURL == "" {
return fmt.Errorf("error getting presigned URL")
}
req, err := http.NewRequest(http.MethodPut, presignedURL, callback.GetReader())
if err != nil {
return fmt.Errorf("error creating request: %v", err)
}
req.ContentLength = fileSize
req.Header.Set("Content-Type", "application/octet-stream")
resp, err := http.DefaultClient.Do(req.WithContext(ctx))
callback.Finish()
if err != nil {
return fmt.Errorf("error uploading image: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("error uploading image: HTTP status %d", resp.StatusCode)
}
// send (PUT) image upload completion
_, err = c.apiUpdate(ctx, postURL+"/_complete", UploadImageCompleteRequest{})
return err
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package text_test
import (
"bytes"
"strings"
"testing"
"github.com/google/gapid/core/text"
)
func TestLimit(t *testing.T) {
buf := &bytes.Buffer{}
for _, test := range []struct {
limit int
writes string
expect string
}{
{8, "123", "123"},
{8, "1234", "1234"},
{8, "12345", "12345"},
{8, "123456", "123456"},
{8, "1234567", "1234567"},
{8, "12345678", "12345678"},
{8, "123456789", "12345abc"},
{8, "1234567890123", "12345abc"},
{8, "12|34", "1234"},
{8, "123|45", "12345"},
{8, "1234|56", "123456"},
{8, "12345|67", "1234567"},
{8, "12345|", "12345"},
{8, "123456|78", "12345678"},
{8, "1234567|89", "12345abc"},
{8, "12345678|90123", "12345abc"},
{8, "123456789|0123", "12345abc"},
} {
buf.Reset()
writer := text.NewLimitWriter(buf, 8, "abc")
for _, v := range strings.Split(test.writes, "|") {
writer.Write(([]byte)(v))
}
writer.Flush()
got := buf.String()
if got != test.expect {
t.Errorf("Expected %q got %q", test.expect, got)
}
}
}
|
package proc
import (
"github.com/MagalixCorp/magalix-agent/v2/watcher"
karma "github.com/reconquest/karma-go"
)
// GetPodStatus a helper function to get the status of a pod
func GetPodStatus(pod Pod) watcher.Status {
context := karma.
Describe("application_id", pod.ApplicationID).
Describe("service_id", pod.ServiceID).
Describe("kubernetes/status", pod.Status.String())
if pod.Status == watcher.StatusTerminated {
debugf(
context,
"pod: %s (%s) status: %s",
pod.ID,
pod.Name,
pod.Status.String(),
)
return pod.Status
}
var running int
var pending int
var completed int
var errors int
for container, state := range pod.Containers {
// handle case when all container terminated
status, _ := GetContainerStateStatus(state)
switch {
case status == watcher.StatusRunning:
running++
case status == watcher.StatusPending:
pending++
case status == watcher.StatusCompleted:
completed++
case status == watcher.StatusUnknown:
warningf(
nil,
"container: %s unknown status, proceeding as error anyway",
container,
)
fallthrough
default:
errors++
}
}
total := len(pod.Containers)
context = context.
Describe("containers/running", running).
Describe("containers/pending", pending).
Describe("containers/completed", completed).
Describe("containers/errors", errors).
Describe("containers/total", total)
newStatus := pod.Status
switch {
case errors > 0:
newStatus = watcher.StatusError
case pending > 0:
newStatus = watcher.StatusPending
case completed == total && total > 0:
newStatus = watcher.StatusCompleted
case running == total && total > 0:
newStatus = watcher.StatusRunning
case running > 0 && completed > 0 && running+completed == total:
newStatus = watcher.StatusRunning
}
debugf(
context,
"pod: %s (%s) status: %s",
pod.ID, pod.Name, newStatus.String(),
)
return newStatus
}
|
package main
import (
"log"
"net/http"
"github.com/SamuelRamond/xauth"
"github.com/SamuelRamond/xauth/store/boltdb"
"github.com/SamuelRamond/xauth/web"
"github.com/gorilla/mux"
"github.com/rs/cors"
)
func makeHandler(h func(w http.ResponseWriter, r *http.Request)) http.Handler {
return http.HandlerFunc(h)
}
func main() {
log.Println("Starting Xauth server")
ak, err := xauth.New(
"./jwt/test/xauth_test.rsa",
"./jwt/test/xauth_test.rsa.pub",
boltdb.New(),
)
if err != nil {
log.Fatal(err)
}
// Web
hds := web.New(ak)
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
Debug: true,
AllowCredentials: true,
AllowedMethods: []string{"GET", "POST", "OPTIONS"},
AllowedHeaders: []string{"*"},
})
// Game
g := &SpaceCorsair{make(chan []byte), []*WSocketClient{}}
go g.Run()
router := mux.NewRouter()
// HTTP
router.Methods("POST", "OPTIONS").Path("/auth/v1/register").Name("Register").Handler(c.Handler(makeHandler(hds.Register)))
router.Methods("POST", "OPTIONS").Path("/auth/v1/login").Name("login").Handler(c.Handler(makeHandler(hds.Login)))
router.Methods("GET", "OPTIONS").Path("/auth/v1/whoaim").Name("Whoaim").Handler(c.Handler(makeHandler(hds.Whois)))
// WS
wsh := &SocketBrokerWsHandler{}
wsh.Init(SbWsHReadWrite, g)
router.Methods("GET").Path("/ws/game").Name("GameOn").Handler(makeHandler(wsh.Handle))
log.Println("Xauth server: Ready @ :8089")
log.Fatal(http.ListenAndServe(":8089", router))
}
/*
> CURL Test:
curl 127.0.0.1:8080/auth/v1/register -X POST -d'{"username":"sr@alk","password":"toast"}'
curl 127.0.0.1:8080/auth/v1/login -X POST -d'{"username":"sr@alk","password":"toast"}'
curl 127.0.0.1:8080/auth/v1/whoaim -H 'Authorization: Bearer XXX'
*/
|
package main
import (
"fmt"
"unsafe"
)
// unsafe包提供了一些跳过go语言类型安全限制的操作
func main() {
var hello = Hello{}
// 返回类型v本身数据所占用的字节数
// 返回值是“顶层”的数据占有的字节数
// 例如,若v是一个切片,它会返回该切片描述符的大小,而非该切片底层引用的内存的大小
s := unsafe.Sizeof(hello)
fmt.Println(s)
// 返回类型v所代表的结构体字段在结构体中的偏移量,它必须为结构体类型的字段的形式
// 换句话说,它返回该结构起始处与该字段起始处之间的字节数
f := unsafe.Offsetof(hello.b)
fmt.Println(f)
// 返回类型v的对齐方式(即类型v在内存中占用的字节数)
// 若是结构体类型的字段的形式,它会返回字段f在该结构体中的对齐方式
a := unsafe.Alignof(hello)
fmt.Println(a)
}
type Hello struct {
a bool
b string
c int
d []float64
}
|
package pgpmail
import (
"bytes"
"io"
"io/ioutil"
"strings"
"testing"
"github.com/ProtonMail/go-crypto/openpgp"
pgperrors "github.com/ProtonMail/go-crypto/openpgp/errors"
)
func checkSignature(t *testing.T, md *openpgp.MessageDetails) {
primaryKeyId := testPrivateKey.PrimaryKey.KeyId
if md.SignatureError != nil {
t.Errorf("MessageDetails.SignatureError = %v", md.SignatureError)
}
if !md.IsSigned {
t.Errorf("MessageDetails.IsSigned != true")
}
if md.SignedBy == nil {
t.Errorf("MessageDetails.SignedBy == nil")
}
if md.SignedByKeyId != primaryKeyId {
t.Errorf("MessageDetails.SignedByKeyId = %v, want %v", md.SignedByKeyId, primaryKeyId)
}
}
func checkEncryption(t *testing.T, md *openpgp.MessageDetails) {
encryptedTo := testPrivateKey.Subkeys[0].PublicKey.KeyId
if !md.IsEncrypted {
t.Errorf("MessageDetails.IsEncrypted != true")
}
if len(md.EncryptedToKeyIds) != 1 {
t.Errorf("MessageDetails.EncryptedToKeyIds = %v, want exactly one key", md.EncryptedToKeyIds)
} else if md.EncryptedToKeyIds[0] != encryptedTo {
t.Errorf("MessageDetails.EncryptedToKeyIds = %v, want key %v", md.EncryptedToKeyIds, encryptedTo)
}
}
func TestReader_encryptedSignedPGPMIME(t *testing.T) {
sr := strings.NewReader(testPGPMIMEEncryptedSigned)
r, err := Read(sr, openpgp.EntityList{testPrivateKey}, nil, nil)
if err != nil {
t.Fatalf("pgpmail.Read() = %v", err)
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, r.MessageDetails.UnverifiedBody); err != nil {
t.Fatalf("io.Copy() = %v", err)
}
checkSignature(t, r.MessageDetails)
checkEncryption(t, r.MessageDetails)
if s := buf.String(); s != testEncryptedBody {
t.Errorf("MessagesDetails.UnverifiedBody = \n%v\n but want \n%v", s, testEncryptedBody)
}
}
func TestReader_encryptedSignedEncapsulatedPGPMIME(t *testing.T) {
sr := strings.NewReader(testPGPMIMEEncryptedSignedEncapsulated)
r, err := Read(sr, openpgp.EntityList{testPrivateKey}, nil, nil)
if err != nil {
t.Fatalf("pgpmail.Read() = %v", err)
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, r.MessageDetails.UnverifiedBody); err != nil {
t.Fatalf("io.Copy() = %v", err)
}
checkSignature(t, r.MessageDetails)
checkEncryption(t, r.MessageDetails)
if s := buf.String(); s != testSignedBody {
t.Errorf("MessagesDetails.UnverifiedBody = \n%v\n but want \n%v", s, testSignedBody)
}
}
func TestReader_signedPGPMIME(t *testing.T) {
sr := strings.NewReader(testPGPMIMESigned)
r, err := Read(sr, openpgp.EntityList{testPublicKey}, nil, nil)
if err != nil {
t.Fatalf("pgpmail.Read() = %v", err)
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, r.MessageDetails.UnverifiedBody); err != nil {
t.Fatalf("io.Copy() = %v", err)
}
if r.MessageDetails.IsEncrypted {
t.Errorf("MessageDetails.IsEncrypted != false")
}
checkSignature(t, r.MessageDetails)
if s := buf.String(); s != testSignedBody {
t.Errorf("MessagesDetails.UnverifiedBody = \n%v\n but want \n%v", s, testSignedBody)
}
}
func TestReader_signedPGPMIMEInvalid(t *testing.T) {
sr := strings.NewReader(testPGPMIMESignedInvalid)
r, err := Read(sr, openpgp.EntityList{testPrivateKey}, nil, nil)
if err != nil {
t.Fatalf("pgpmail.Read() = %v", err)
}
if _, err := io.Copy(ioutil.Discard, r.MessageDetails.UnverifiedBody); err != nil {
t.Fatalf("io.Copy() = %v", err)
}
if err := r.MessageDetails.SignatureError; err == nil {
t.Errorf("MessageDetails.SignatureError = nil")
}
}
func TestReader_signedPGPMIMEUnknownIssuer(t *testing.T) {
sr := strings.NewReader(testPGPMIMESigned)
r, err := Read(sr, openpgp.EntityList{}, nil, nil)
if err != nil {
t.Fatalf("pgpmail.Read() = %v", err)
}
if _, err := io.Copy(ioutil.Discard, r.MessageDetails.UnverifiedBody); err != nil {
t.Fatalf("io.Copy() = %v", err)
}
if err := r.MessageDetails.SignatureError; err != pgperrors.ErrUnknownIssuer {
t.Errorf("MessageDetails.SignatureError = %v, want ErrUnknownIssuer", err)
}
}
func TestReader_plaintext(t *testing.T) {
sr := strings.NewReader(testPlaintext)
r, err := Read(sr, openpgp.EntityList(nil), nil, nil)
if err != nil {
t.Fatalf("pgpmail.Read() = %v", err)
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, r.MessageDetails.UnverifiedBody); err != nil {
t.Fatalf("io.Copy() = %v", err)
}
if r.MessageDetails.IsEncrypted {
t.Errorf("MessageDetails.IsEncrypted != false")
}
if r.MessageDetails.IsSigned {
t.Errorf("MessageDetails.IsSigned != false")
}
if s := buf.String(); s != testPlaintext {
t.Errorf("MessagesDetails.UnverifiedBody = \n%v\n but want \n%v", s, testPlaintext)
}
}
var testEncryptedBody = toCRLF(`Content-Type: text/plain
This is an encrypted message!
`)
var testSignedBody = toCRLF(`Content-Type: text/plain
This is a signed message!
`)
var testPGPMIMEEncryptedSigned = toCRLF(`From: John Doe <john.doe@example.org>
To: John Doe <john.doe@example.org>
Mime-Version: 1.0
Content-Type: multipart/encrypted; boundary=foo;
protocol="application/pgp-encrypted"
--foo
Content-Type: application/pgp-encrypted
Version: 1
--foo
Content-Type: application/octet-stream
-----BEGIN PGP MESSAGE-----
hQEMAxF0jxulHQ8+AQf/SBK2FIIgMA4OkCvlqty/1GmAumWq6J0T+pRLppXHvYFb
jbXRzz2h3pE/OoouI6vWzBwb8xU/5f8neen+fvdsF1N6PyLjZcHRB91oPvP8TuHA
0vEpiQDbP+0wlQ8BmMnnV06HokWJoKXGmIle0L4QszT/QCbrT80UgKrqXNVHKQtN
DUcytFsUCmolZRj074FEpEetjH6QGEX5hAYNBUJziXmOv7vdd4AFgNbbgC5j5ezz
h8tCAKUqeUiproYaAMrI0lfqh/t8bacJNkljI2LOxYfdJ/2317Npwly0OqpCM3YT
Q4dHuuGM6IuZHtIc9sneIBRhKf8WnWt14hLkHUT80dLA/AHKl0jGYqO34Dxd9JNB
EEwQ4j6rxauOEbKLAuYYaEqCzNYBasBrPmpNb4Fx2syWkCoYzwvzv7nj4I8vIBmm
FGsAQLX4c18qtZI4XaG4FPUvFQ01Y0rjTxAV3u51lrYjCxFuI5ZEtiT0J/Tv2Unw
R6xwtARkEf3W0agegmohEjjkAexKNxGrlulLiPk2j9/dnlAxeGpOuhYuYU2kYbKq
x3TkcVYRs1FkmCX0YHNJ2zVWLfDYd2f3UVkXINe7mODGx2A2BxvK9Ig7NMuNmWZE
ELiLSIvQk9jlgqWUMwSGPQKaHPrac02EjcBHef2zCoFbTg0TXQeDr5SV7yguX8jB
zZnoNs+6+GR1gA6poKzFdiG4NRr0SNgEHazPPkXp3P2KyOINyFJ7SA+HX8iegTqL
CTPYPK7UNRmb5s2u5B4e9NiQB9L85W4p7p7uemCSu9bxjs8rkCJpvx9Kb8jzPW17
wnEUe10A4JNDBhxiMg+Fm5oM2VxQVy+eDVFOOq7pDYVcSmZc36wO+EwAKph9shby
O4sDS4l/8eQTEYUxTavdtQ9O9ZMXvf/L3Rl1uFJXw1lFwPReXwtpA485e031/A==
=P0jf
-----END PGP MESSAGE-----
--foo--
`)
var testPGPMIMEEncryptedSignedEncapsulated = toCRLF(`From: John Doe <john.doe@example.org>
To: John Doe <john.doe@example.org>
Mime-Version: 1.0
Content-Type: multipart/encrypted; boundary=foo;
protocol="application/pgp-encrypted"
--foo
Content-Type: application/pgp-encrypted
Version: 1
--foo
Content-Type: application/octet-stream
-----BEGIN PGP MESSAGE-----
hQEMAxF0jxulHQ8+AQf9FCth8p+17rzWL0AtKP+aWndvVUYmaKiUZd+Ya8D9cRnc
FAP//JnRvTPhdOyl8x1FQkVxyuKcgpjaClb6/OLgD0lGYLC15p43G4QyU+jtOOQW
FFjZj2z8wUuiev8ejNd7DMiOQRSm4d+IIK+Qa2BJ10Y9AuLQtMI8D+joP1D11NeX
4FO3SYFEuwH5VWlXGo3bRjg8fKFVG/r/xCwBibqRpfjVnS4EgI04XCsnhqdaCRvE
Bw2XEaF62m2MUNbaan410WajzVSbSIqIHw8U7vpR/1nisS+SZmScuCXWFa6W9YgR
0nSWi1io2Ratf4F9ORCy0o7QPh7FlpsIUGmp4paF39LpAQ2q0OUnFhkIdLVQscQT
JJXLbZwp0CYTAgqwdRWFwY7rEPm2k/Oe4cHKJLEn0hS+X7wch9FAYEMifeqa0FcZ
GjxocAlyhmlM0sXIDYP8xx49t4O8JIQU1ep/SX2+rUAKIh2WRdYDy8GrrHba8V8U
aBCU9zIMhmOtu7r+FE1djMUhcaSbbvC9zLDMLV8QxogGhxrqaUM8Pj+q1H6myaAr
o1xd65b6r2Bph6GUmcMwl28i78u9bKoM0mI+EdUuLwS9EbmjtIwEgxNv4LqK8xw2
/tjCe9JSqg+HDaBYnO4QTM29Y+PltRIe6RxpnBcYULTLcSt1UK3YV1KvhqfXMjoZ
THsvtxLbmPYFv+g0hiUpuKtyG9NGidKCxrjvNq30KCSUWzNFkh+qv6CPm26sXr5F
DTsVpFTM/lomg4Po8sE20BZsk/9IzEh4ERSOu3k0m3mI4QAyJmrOpVGUjd//4cqz
Zhhc3tV78BtEYNh0a+78fAHGtdLocLj5IfOCYQWW//EtOY93TnVAtP0puaiNOc8q
Vvb5WMamiRJZ9nQXP3paDoqD14B9X6bvNWsDQDkkrWls2sYg7KzqpOM/nlXLBKQd
Ok4EJfOpd0hICPwo6tJ6sK2meRcDLxtGJybADE7UHJ4t0SrQBfn/sQhRytQtg2wr
U1Thy6RujlrrrdUryo3Mi+xc9Ot1o35JszCjNQGL6BCFsGi9fx5pjWM+lLiJ15aJ
jh02mSd/8j7IaJCGgTuyq6uK45EoVqWd1WRSYl4s5tg1g1jckigYYjJdAKNnU/rZ
iTk5F8GSyv30EXnqvrs=
=Ibxd
-----END PGP MESSAGE-----
--foo--
`)
var testPGPMIMESigned = toCRLF(`From: John Doe <john.doe@example.org>
To: John Doe <john.doe@example.org>
Mime-Version: 1.0
Content-Type: multipart/signed; boundary=bar; micalg=pgp-SHA256;
protocol="application/pgp-signature"
--bar
Content-Type: text/plain
This is a signed message!
--bar
Content-Type: application/pgp-signature
-----BEGIN PGP SIGNATURE-----
iQEzBAABCAAdFiEEsahmk1QVO3mfIhe/MHIVwT33qWQFAl5FRLgACgkQMHIVwT33
qWSEQQf/YgRlKlQzSyvm6A52lGIRU3F/z9EGjhCryxj+hSdPlk8O7iZFIjnco4Ea
7QIlsOj6D4AlLdhyK6c8IZV7rZoTNE5rc6I5UZjM4Qa0XoyLjao28zR252TtwwWJ
e4+wrTQKcVhCyHO6rkvcCpru4qF5CU+Mi8+sf8CNJJyBgw1Pri35rJWMdoTPTqqz
kcIGN1JySaI8bbVitJQmnm0FtFTiB7zznv94rMBCiPmPUWd9BSpSBJteJoBLZ+K7
Y7ws2Dzp2sBo/RLUM18oXd0N9PLXvFGI3IuF8ey1SPzQH3QbBdJSTmLzRlPjK7A1
HVHFb3vTjd71z9j5IGQQ3Awdw30zMg==
=gOul
-----END PGP SIGNATURE-----
--bar--
`)
var testPGPMIMESignedInvalid = toCRLF(`From: John Doe <john.doe@example.org>
To: John Doe <john.doe@example.org>
Mime-Version: 1.0
Content-Type: multipart/signed; boundary=bar; micalg=pgp-sha256;
protocol="application/pgp-signature"
--bar
Content-Type: text/plain
This is a signed message, but the signature is invalid.
--bar
Content-Type: application/pgp-signature
-----BEGIN PGP SIGNATURE-----
iQEzBAABCAAdFiEEsahmk1QVO3mfIhe/MHIVwT33qWQFAl5FRLgACgkQMHIVwT33
qWSEQQf/YgRlKlQzSyvm6A52lGIRU3F/z9EGjhCryxj+hSdPlk8O7iZFIjnco4Ea
7QIlsOj6D4AlLdhyK6c8IZV7rZoTNE5rc6I5UZjM4Qa0XoyLjao28zR252TtwwWJ
e4+wrTQKcVhCyHO6rkvcCpru4qF5CU+Mi8+sf8CNJJyBgw1Pri35rJWMdoTPTqqz
kcIGN1JySaI8bbVitJQmnm0FtFTiB7zznv94rMBCiPmPUWd9BSpSBJteJoBLZ+K7
Y7ws2Dzp2sBo/RLUM18oXd0N9PLXvFGI3IuF8ey1SPzQH3QbBdJSTmLzRlPjK7A1
HVHFb3vTjd71z9j5IGQQ3Awdw30zMg==
=gOul
-----END PGP SIGNATURE-----
--bar--
`)
var testPlaintext = toCRLF(`From: John Doe <john.doe@example.org>
To: John Doe <john.doe@example.org>
Mime-Version: 1.0
Content-Type: text/plain
This is a plaintext message!
`)
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package executor
import (
"bytes"
"fmt"
"os"
"sync"
"time"
"code.google.com/p/go-uuid/uuid"
"code.google.com/p/gogoprotobuf/proto"
log "github.com/golang/glog"
"github.com/yifan-gu/go-mesos/mesosproto"
"github.com/yifan-gu/go-mesos/messenger"
"github.com/yifan-gu/go-mesos/upid"
)
const (
// TODO(yifan): Make them as flags.
defaultRecoveryTimeout = time.Minute * 15
defaultHealthCheckDuration = time.Second * 1
defaultHealthCheckThreshold = 10
// MesosVersion indicates the supported mesos version.
MesosVersion = "0.20.0"
)
// Executor interface defines all the functions that are needed to implement
// a mesos executor.
type Executor interface {
Registered(ExecutorDriver, *mesosproto.ExecutorInfo, *mesosproto.FrameworkInfo, *mesosproto.SlaveInfo)
Reregistered(ExecutorDriver, *mesosproto.SlaveInfo)
Disconnected(ExecutorDriver)
LaunchTask(ExecutorDriver, *mesosproto.TaskInfo)
KillTask(ExecutorDriver, *mesosproto.TaskID)
FrameworkMessage(ExecutorDriver, string)
Shutdown(ExecutorDriver)
Error(ExecutorDriver, string)
}
// ExecutorDriver interface defines the functions that are needed to implement
// a mesos executor driver.
type ExecutorDriver interface {
Start() (mesosproto.Status, error)
Stop() (mesosproto.Status, error)
Abort() (mesosproto.Status, error)
Join() (mesosproto.Status, error)
SendStatusUpdate(*mesosproto.TaskStatus) (mesosproto.Status, error)
SendFrameworkMessage(string) (mesosproto.Status, error)
Destroy() error
}
// MesosExecutorDriver is a implementation of the ExecutorDriver.
type MesosExecutorDriver struct {
self *upid.UPID
Executor Executor
mutex *sync.Mutex
cond *sync.Cond
status mesosproto.Status
messenger messenger.Messenger
slaveUPID *upid.UPID
slaveID *mesosproto.SlaveID
frameworkID *mesosproto.FrameworkID
executorID *mesosproto.ExecutorID
workDir string
connected bool
connection uuid.UUID
local bool
directory string
checkpoint bool
recoveryTimeout time.Duration
slaveHealthChecker *SlaveHealthChecker
updates map[string]*mesosproto.StatusUpdate // Key is a UUID string.
tasks map[string]*mesosproto.TaskInfo // Key is a UUID string.
}
// NewMesosExecutorDriver creates a new mesos executor driver.
func NewMesosExecutorDriver() *MesosExecutorDriver {
driver := &MesosExecutorDriver{
status: mesosproto.Status_DRIVER_NOT_STARTED,
mutex: new(sync.Mutex),
updates: make(map[string]*mesosproto.StatusUpdate),
tasks: make(map[string]*mesosproto.TaskInfo),
}
driver.cond = sync.NewCond(driver.mutex)
// TODO(yifan): Set executor cnt.
driver.messenger = messenger.NewMesosMessenger(&upid.UPID{ID: "executor(1)"})
return driver
}
// init initializes the driver.
func (driver *MesosExecutorDriver) init() error {
log.Infof("Init mesos executor driver\n")
log.Infof("Version: %v\n", MesosVersion)
// Install handlers.
// TODO(yifan): Check errors.
driver.messenger.Install(driver.registered, &mesosproto.ExecutorRegisteredMessage{})
driver.messenger.Install(driver.reregistered, &mesosproto.ExecutorReregisteredMessage{})
driver.messenger.Install(driver.reconnect, &mesosproto.ReconnectExecutorMessage{})
driver.messenger.Install(driver.runTask, &mesosproto.RunTaskMessage{})
driver.messenger.Install(driver.killTask, &mesosproto.KillTaskMessage{})
driver.messenger.Install(driver.statusUpdateAcknowledgement, &mesosproto.StatusUpdateAcknowledgementMessage{})
driver.messenger.Install(driver.frameworkMessage, &mesosproto.FrameworkToExecutorMessage{})
driver.messenger.Install(driver.shutdown, &mesosproto.ShutdownExecutorMessage{})
return nil
}
// Start starts the driver.
func (driver *MesosExecutorDriver) Start() (mesosproto.Status, error) {
log.Infoln("Start mesos executor driver")
driver.mutex.Lock()
defer driver.mutex.Unlock()
if driver.status != mesosproto.Status_DRIVER_NOT_STARTED {
return driver.status, nil
}
if err := driver.parseEnviroments(); err != nil {
log.Errorf("Failed to parse environments: %v\n", err)
return mesosproto.Status_DRIVER_NOT_STARTED, err
}
if err := driver.init(); err != nil {
log.Errorf("Failed to initialize the driver: %v\n", err)
return mesosproto.Status_DRIVER_NOT_STARTED, err
}
// Start monitoring the slave.
go driver.monitorSlave()
// Start the messenger.
if err := driver.messenger.Start(); err != nil {
log.Errorf("Failed to start the messenger: %v\n", err)
return mesosproto.Status_DRIVER_NOT_STARTED, err
}
driver.self = driver.messenger.UPID()
// Register with slave.
message := &mesosproto.RegisterExecutorMessage{
FrameworkId: driver.frameworkID,
ExecutorId: driver.executorID,
}
if err := driver.messenger.Send(driver.slaveUPID, message); err != nil {
log.Errorf("Failed to send %v: %v\n", message, err)
return mesosproto.Status_DRIVER_NOT_STARTED, err
}
// Set status.
driver.status = mesosproto.Status_DRIVER_RUNNING
log.Infoln("Mesos executor is running")
return driver.status, nil
}
// Stop stops the driver.
func (driver *MesosExecutorDriver) Stop() (mesosproto.Status, error) {
log.Infoln("Stop mesos executor driver")
driver.mutex.Lock()
defer func() {
driver.cond.Signal()
driver.mutex.Unlock()
}()
if driver.status != mesosproto.Status_DRIVER_RUNNING && driver.status != mesosproto.Status_DRIVER_ABORTED {
return driver.status, nil
}
driver.messenger.Stop()
aborted := false
if driver.status == mesosproto.Status_DRIVER_ABORTED {
aborted = true
}
driver.status = mesosproto.Status_DRIVER_STOPPED
if aborted {
return mesosproto.Status_DRIVER_ABORTED, nil
}
return mesosproto.Status_DRIVER_STOPPED, nil
}
// Abort aborts the driver.
func (driver *MesosExecutorDriver) Abort() (mesosproto.Status, error) {
log.Infoln("Abort mesos executor driver")
driver.mutex.Lock()
defer func() {
driver.cond.Signal()
driver.mutex.Unlock()
}()
if driver.status != mesosproto.Status_DRIVER_RUNNING {
return driver.status, nil
}
driver.messenger.Stop()
driver.status = mesosproto.Status_DRIVER_ABORTED
return driver.status, nil
}
// Join blocks the driver until it's either stopped or aborted.
func (driver *MesosExecutorDriver) Join() (mesosproto.Status, error) {
log.Infoln("Join is called for mesos executor driver")
driver.mutex.Lock()
defer driver.mutex.Unlock()
if driver.status != mesosproto.Status_DRIVER_RUNNING {
return driver.status, nil
}
for driver.status == mesosproto.Status_DRIVER_RUNNING {
driver.cond.Wait()
}
return driver.status, nil
}
// SendStatusUpdate sends a StatusUpdate message to the slave.
func (driver *MesosExecutorDriver) SendStatusUpdate(taskStatus *mesosproto.TaskStatus) (mesosproto.Status, error) {
log.Infoln("Sending status update")
driver.mutex.Lock()
defer driver.mutex.Unlock()
if taskStatus.GetState() == mesosproto.TaskState_TASK_STAGING {
log.Errorf("Executor is not allowed to send TASK_STAGING status update. Aborting!\n")
driver.Abort()
err := fmt.Errorf("Attempted to send TASK_STAGING status update")
driver.Executor.Error(driver, err.Error())
return driver.status, err
}
// Set up status update.
update := driver.makeStatusUpdate(taskStatus)
log.Infof("Executor sending status update %v\n", update.String())
// Capture the status update.
driver.updates[uuid.UUID(update.GetUuid()).String()] = update
// Put the status update in the message.
message := &mesosproto.StatusUpdateMessage{
Update: update,
Pid: proto.String(driver.self.String()),
}
// Send the message.
if err := driver.messenger.Send(driver.slaveUPID, message); err != nil {
log.Errorf("Failed to send %v: %v\n")
return driver.status, err
}
return driver.status, nil
}
// SendFrameworkMessage sends a FrameworkMessage to the slave.
func (driver *MesosExecutorDriver) SendFrameworkMessage(data string) (mesosproto.Status, error) {
log.Infoln("Send framework message")
driver.mutex.Lock()
defer driver.mutex.Unlock()
if driver.status != mesosproto.Status_DRIVER_RUNNING {
return driver.status, nil
}
message := &mesosproto.ExecutorToFrameworkMessage{
SlaveId: driver.slaveID,
FrameworkId: driver.frameworkID,
ExecutorId: driver.executorID,
Data: []byte(data),
}
// Send the message.
if err := driver.messenger.Send(driver.slaveUPID, message); err != nil {
log.Errorf("Failed to send %v: %v\n")
return driver.status, err
}
return driver.status, nil
}
// Destroy destroys the driver. No-op for now.
func (driver *MesosExecutorDriver) Destroy() error {
return nil
}
func (driver *MesosExecutorDriver) parseEnviroments() error {
var value string
value = os.Getenv("MESOS_LOCAL")
if len(value) > 0 {
driver.local = true
}
value = os.Getenv("MESOS_SLAVE_PID")
if len(value) == 0 {
return fmt.Errorf("Cannot find MESOS_SLAVE_PID in the environment")
}
upid, err := upid.Parse(value)
if err != nil {
log.Errorf("Cannot parse UPID %v\n", err)
return err
}
driver.slaveUPID = upid
value = os.Getenv("MESOS_SLAVE_ID")
if len(value) == 0 {
return fmt.Errorf("Cannot find MESOS_SLAVE_ID in the environment")
}
driver.slaveID = &mesosproto.SlaveID{Value: proto.String(value)}
value = os.Getenv("MESOS_FRAMEWORK_ID")
if len(value) == 0 {
return fmt.Errorf("Cannot find MESOS_FRAMEWORK_ID in the environment")
}
driver.frameworkID = &mesosproto.FrameworkID{Value: proto.String(value)}
value = os.Getenv("MESOS_EXECUTOR_ID")
if len(value) == 0 {
return fmt.Errorf("Cannot find MESOS_EXECUTOR_ID in the environment")
}
driver.executorID = &mesosproto.ExecutorID{Value: proto.String(value)}
value = os.Getenv("MESOS_DIRECTORY")
// TODO(yifan): Check if the value exists?
driver.workDir = value
value = os.Getenv("MESOS_CHECKPOINT")
if value == "1" {
driver.checkpoint = true
}
// TODO(yifan): Parse the duration. For now just use default.
return nil
}
func (driver *MesosExecutorDriver) registered(from *upid.UPID, pbMsg proto.Message) {
// Lock is still needed to avoid health check race. Thought it will rarely happen.
// TODO(yifan): serialize these function calls.
driver.mutex.Lock()
defer driver.mutex.Unlock()
msg := pbMsg.(*mesosproto.ExecutorRegisteredMessage)
slaveID := msg.GetSlaveId()
executorInfo := msg.GetExecutorInfo()
frameworkInfo := msg.GetFrameworkInfo()
slaveInfo := msg.GetSlaveInfo()
if driver.status == mesosproto.Status_DRIVER_ABORTED {
log.Infof("Ignoring registered message from slave %v, because the driver is aborted!\n", slaveID)
return
}
log.Infof("Executor registered on slave %v\n", slaveID)
driver.connected = true
driver.connection = uuid.NewUUID()
driver.Executor.Registered(driver, executorInfo, frameworkInfo, slaveInfo)
}
func (driver *MesosExecutorDriver) reregistered(from *upid.UPID, pbMsg proto.Message) {
// Lock is still needed to avoid health check race. Thought it will rarely happen.
// TODO(yifan): serialize these function calls.
driver.mutex.Lock()
defer driver.mutex.Unlock()
msg := pbMsg.(*mesosproto.ExecutorReregisteredMessage)
slaveID := msg.GetSlaveId()
slaveInfo := msg.GetSlaveInfo()
if driver.status == mesosproto.Status_DRIVER_ABORTED {
log.Infof("Ignoring re-registered message from slave %v, because the driver is aborted!\n", slaveID)
return
}
log.Infof("Executor re-registered on slave %v\n", slaveID)
driver.connected = true
driver.connection = uuid.NewUUID()
driver.Executor.Reregistered(driver, slaveInfo)
}
func (driver *MesosExecutorDriver) reconnect(from *upid.UPID, pbMsg proto.Message) {
msg := pbMsg.(*mesosproto.ReconnectExecutorMessage)
slaveID := msg.GetSlaveId()
if driver.status == mesosproto.Status_DRIVER_ABORTED {
log.Infof("Ignoring reconnect message from slave %v, because the driver is aborted!\n", slaveID)
return
}
log.Infof("Received reconnect request from slave %v\n", slaveID)
driver.slaveUPID = from
message := &mesosproto.ReregisterExecutorMessage{
ExecutorId: driver.executorID,
FrameworkId: driver.frameworkID,
}
// Send all unacknowledged updates.
for _, u := range driver.updates {
message.Updates = append(message.Updates, u)
}
// Send all unacknowledged tasks.
for _, t := range driver.tasks {
message.Tasks = append(message.Tasks, t)
}
// Send the message.
if err := driver.messenger.Send(driver.slaveUPID, message); err != nil {
log.Errorf("Failed to send %v: %v\n")
}
// Start monitoring the slave again.
go driver.monitorSlave()
}
func (driver *MesosExecutorDriver) runTask(from *upid.UPID, pbMsg proto.Message) {
msg := pbMsg.(*mesosproto.RunTaskMessage)
task := msg.GetTask()
taskID := task.GetTaskId()
if driver.status == mesosproto.Status_DRIVER_ABORTED {
log.Infof("Ignoring run task message for task %v because the driver is aborted!\n", taskID)
return
}
if _, ok := driver.tasks[taskID.String()]; ok {
log.Fatalf("Unexpected duplicate task %v\n", taskID)
}
log.Infof("Executor asked to run task '%v'\n", taskID)
driver.tasks[taskID.String()] = task
driver.Executor.LaunchTask(driver, task)
}
func (driver *MesosExecutorDriver) killTask(from *upid.UPID, pbMsg proto.Message) {
msg := pbMsg.(*mesosproto.KillTaskMessage)
taskID := msg.GetTaskId()
if driver.status == mesosproto.Status_DRIVER_ABORTED {
log.Infof("Ignoring kill task message for task %v, because the driver is aborted!\n", taskID)
return
}
log.Infof("Executor asked to kill task '%v'\n", taskID)
driver.Executor.KillTask(driver, taskID)
}
func (driver *MesosExecutorDriver) statusUpdateAcknowledgement(from *upid.UPID, pbMsg proto.Message) {
msg := pbMsg.(*mesosproto.StatusUpdateAcknowledgementMessage)
log.Infof("Receiving status update acknowledgement %v", msg)
frameworkID := msg.GetFrameworkId()
taskID := msg.GetTaskId()
uuid := uuid.UUID(msg.GetUuid())
if driver.status == mesosproto.Status_DRIVER_ABORTED {
log.Infof("Ignoring status update acknowledgement %v for task %v of framework %v because the driver is aborted!\n",
uuid, taskID, frameworkID)
}
// Remove the corresponding update.
delete(driver.updates, uuid.String())
// Remove the corresponding task.
delete(driver.tasks, taskID.String())
}
func (driver *MesosExecutorDriver) frameworkMessage(from *upid.UPID, pbMsg proto.Message) {
msg := pbMsg.(*mesosproto.FrameworkToExecutorMessage)
data := msg.GetData()
if driver.status == mesosproto.Status_DRIVER_ABORTED {
log.Infof("Ignoring framework message because the driver is aborted!\n")
return
}
log.Infof("Executor received framework message\n")
driver.Executor.FrameworkMessage(driver, string(data))
}
func (driver *MesosExecutorDriver) shutdown(from *upid.UPID, pbMsg proto.Message) {
_, ok := pbMsg.(*mesosproto.ShutdownExecutorMessage)
if !ok {
panic("Not a ShutdownExecutorMessage! This should not happen")
}
if driver.status == mesosproto.Status_DRIVER_ABORTED {
log.Infof("Ignoring shutdown message because the driver is aborted!\n")
return
}
log.Infof("Executor asked to shutdown\n")
if !driver.local {
// TODO(yifan): go kill.
}
driver.Executor.Shutdown(driver)
driver.status = mesosproto.Status_DRIVER_ABORTED
driver.Stop()
}
// TODO(yifan): There is some race condition here because when slaveExited is called
// the reregistered may be also running. We cannot use a lock here because driver.Stop
// also aquires a lock.
// One way to fix this is to serialize these racy functions.
func (driver *MesosExecutorDriver) slaveExited() {
if driver.status == mesosproto.Status_DRIVER_ABORTED {
log.Infof("Ignoring slave exited event because the driver is aborted!\n")
return
}
if driver.checkpoint && driver.connected {
driver.connected = false
log.Infof("Slave exited, but framework has checkpointing enabled. Waiting %v to reconnect with slave %v",
driver.recoveryTimeout, driver.slaveID)
time.AfterFunc(driver.recoveryTimeout, func() { driver.recoveryTimeouts(driver.connection) })
return
}
log.Infof("Slave exited ... shutting down\n")
driver.connected = false
// Clean up
driver.Executor.Shutdown(driver)
driver.status = mesosproto.Status_DRIVER_ABORTED
driver.Stop()
}
func (driver *MesosExecutorDriver) monitorSlave() {
driver.slaveHealthChecker = NewSlaveHealthChecker(driver.slaveUPID, defaultHealthCheckThreshold, defaultHealthCheckDuration)
<-driver.slaveHealthChecker.C
log.Warningf("Slave unhealthy count exceeds the threshold, assuming it has exited\n")
driver.slaveHealthChecker.Stop()
driver.slaveExited()
}
// TODO(yifan): There is some race condition here because when recoveryTimeouts is called
// the reregistered may be also running. We cannot use a lock here because driver.Stop
// also aquires a lock.
// One way to fix this is to serialize these racy functions.
func (driver *MesosExecutorDriver) recoveryTimeouts(connection uuid.UUID) {
if driver.connected {
return
}
if bytes.Equal(connection, driver.connection) {
log.Infof("Recovery timeout of %v exceeded; Shutting down\n", driver.recoveryTimeout)
// Clean up
driver.Executor.Shutdown(driver)
driver.status = mesosproto.Status_DRIVER_ABORTED
driver.Stop()
}
}
func (driver *MesosExecutorDriver) makeStatusUpdate(taskStatus *mesosproto.TaskStatus) *mesosproto.StatusUpdate {
now := float64(time.Now().Unix())
// Fill in all the fields.
taskStatus.Timestamp = proto.Float64(now)
taskStatus.SlaveId = driver.slaveID
update := &mesosproto.StatusUpdate{
FrameworkId: driver.frameworkID,
ExecutorId: driver.executorID,
SlaveId: driver.slaveID,
Status: taskStatus,
Timestamp: proto.Float64(now),
Uuid: uuid.NewUUID(),
}
return update
}
|
package gotojs
import (
"net/http"
"time"
)
func ExampleContainer_handlerbinding() {
// Initialize the container.
container := NewContainer()
// Declare a Hello World handler function.
container.ExposeHandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Hello World! This data is not transformed into a JS object."))
}, "main", "hello")
// Declare a fake handler that always returns "404 page not found".
container.ExposeHandler(http.NotFoundHandler(), "main", "notfound")
// Start the server is separate go routine in parallel.
go func() { container.Start(":8792", "/gotojs") }()
time.Sleep(1 * time.Second) // Wait for the other go routine having the server up and running.
dump(http.Get("http://localhost:8792/gotojs/main/hello"))
dump(http.Get("http://localhost:8792/gotojs/main/notfound"))
// Output:
// Hello World! This data is not transformed into a JS object.
// 404 page not found
}
|
package httpexpect
import (
"math"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNumberFailed(t *testing.T) {
chain := makeChain(newMockReporter(t))
chain.fail("fail")
value := &Number{chain, 0}
value.chain.assertFailed(t)
value.Path("$").chain.assertFailed(t)
value.Schema("")
value.Equal(0)
value.NotEqual(0)
value.Gt(0)
value.Ge(0)
value.Lt(0)
value.Le(0)
value.InRange(0, 0)
}
func TestNumberGetters(t *testing.T) {
reporter := newMockReporter(t)
value := NewNumber(reporter, 123.0)
assert.Equal(t, 123.0, value.Raw())
value.chain.assertOK(t)
value.chain.reset()
assert.Equal(t, 123.0, value.Path("$").Raw())
value.chain.assertOK(t)
value.chain.reset()
value.Schema(`{"type": "number"}`)
value.chain.assertOK(t)
value.chain.reset()
value.Schema(`{"type": "object"}`)
value.chain.assertFailed(t)
value.chain.reset()
}
func TestNumberEqual(t *testing.T) {
reporter := newMockReporter(t)
value := NewNumber(reporter, 1234)
assert.Equal(t, 1234, int(value.Raw()))
value.Equal(1234)
value.chain.assertOK(t)
value.chain.reset()
value.Equal(4321)
value.chain.assertFailed(t)
value.chain.reset()
value.NotEqual(4321)
value.chain.assertOK(t)
value.chain.reset()
value.NotEqual(1234)
value.chain.assertFailed(t)
value.chain.reset()
}
func TestNumberEqualDelta(t *testing.T) {
reporter := newMockReporter(t)
value := NewNumber(reporter, 1234.5)
value.EqualDelta(1234.3, 0.3)
value.chain.assertOK(t)
value.chain.reset()
value.EqualDelta(1234.7, 0.3)
value.chain.assertOK(t)
value.chain.reset()
value.EqualDelta(1234.3, 0.1)
value.chain.assertFailed(t)
value.chain.reset()
value.EqualDelta(1234.7, 0.1)
value.chain.assertFailed(t)
value.chain.reset()
value.NotEqualDelta(1234.3, 0.3)
value.chain.assertFailed(t)
value.chain.reset()
value.NotEqualDelta(1234.7, 0.3)
value.chain.assertFailed(t)
value.chain.reset()
value.NotEqualDelta(1234.3, 0.1)
value.chain.assertOK(t)
value.chain.reset()
value.NotEqualDelta(1234.7, 0.1)
value.chain.assertOK(t)
value.chain.reset()
}
func TestNumberEqualNaN(t *testing.T) {
reporter := newMockReporter(t)
v1 := NewNumber(reporter, math.NaN())
v1.Equal(1234.5)
v1.chain.assertFailed(t)
v2 := NewNumber(reporter, 1234.5)
v2.Equal(math.NaN())
v2.chain.assertFailed(t)
v3 := NewNumber(reporter, math.NaN())
v3.EqualDelta(1234.0, 0.1)
v3.chain.assertFailed(t)
v4 := NewNumber(reporter, 1234.5)
v4.EqualDelta(math.NaN(), 0.1)
v4.chain.assertFailed(t)
v5 := NewNumber(reporter, 1234.5)
v5.EqualDelta(1234.5, math.NaN())
v5.chain.assertFailed(t)
v6 := NewNumber(reporter, math.NaN())
v6.NotEqualDelta(1234.0, 0.1)
v6.chain.assertFailed(t)
v7 := NewNumber(reporter, 1234.5)
v7.NotEqualDelta(math.NaN(), 0.1)
v7.chain.assertFailed(t)
v8 := NewNumber(reporter, 1234.5)
v8.NotEqualDelta(1234.5, math.NaN())
v8.chain.assertFailed(t)
}
func TestNumberGreater(t *testing.T) {
reporter := newMockReporter(t)
value := NewNumber(reporter, 1234)
value.Gt(1234 - 1)
value.chain.assertOK(t)
value.chain.reset()
value.Gt(1234)
value.chain.assertFailed(t)
value.chain.reset()
value.Ge(1234 - 1)
value.chain.assertOK(t)
value.chain.reset()
value.Ge(1234)
value.chain.assertOK(t)
value.chain.reset()
value.Ge(1234 + 1)
value.chain.assertFailed(t)
value.chain.reset()
}
func TestNumberLesser(t *testing.T) {
reporter := newMockReporter(t)
value := NewNumber(reporter, 1234)
value.Lt(1234 + 1)
value.chain.assertOK(t)
value.chain.reset()
value.Lt(1234)
value.chain.assertFailed(t)
value.chain.reset()
value.Le(1234 + 1)
value.chain.assertOK(t)
value.chain.reset()
value.Le(1234)
value.chain.assertOK(t)
value.chain.reset()
value.Le(1234 - 1)
value.chain.assertFailed(t)
value.chain.reset()
}
func TestNumberInRange(t *testing.T) {
reporter := newMockReporter(t)
value := NewNumber(reporter, 1234)
value.InRange(1234, 1234)
value.chain.assertOK(t)
value.chain.reset()
value.InRange(1234-1, 1234)
value.chain.assertOK(t)
value.chain.reset()
value.InRange(1234, 1234+1)
value.chain.assertOK(t)
value.chain.reset()
value.InRange(1234+1, 1234+2)
value.chain.assertFailed(t)
value.chain.reset()
value.InRange(1234-2, 1234-1)
value.chain.assertFailed(t)
value.chain.reset()
value.InRange(1234+1, 1234-1)
value.chain.assertFailed(t)
value.chain.reset()
}
func TestNumberConvertEqual(t *testing.T) {
reporter := newMockReporter(t)
value := NewNumber(reporter, 1234)
value.Equal(int64(1234))
value.chain.assertOK(t)
value.chain.reset()
value.Equal(float32(1234))
value.chain.assertOK(t)
value.chain.reset()
value.Equal("1234")
value.chain.assertFailed(t)
value.chain.reset()
value.NotEqual(int64(4321))
value.chain.assertOK(t)
value.chain.reset()
value.NotEqual(float32(4321))
value.chain.assertOK(t)
value.chain.reset()
value.NotEqual("4321")
value.chain.assertFailed(t)
value.chain.reset()
}
func TestNumberConvertGreater(t *testing.T) {
reporter := newMockReporter(t)
value := NewNumber(reporter, 1234)
value.Gt(int64(1233))
value.chain.assertOK(t)
value.chain.reset()
value.Gt(float32(1233))
value.chain.assertOK(t)
value.chain.reset()
value.Gt("1233")
value.chain.assertFailed(t)
value.chain.reset()
value.Ge(int64(1233))
value.chain.assertOK(t)
value.chain.reset()
value.Ge(float32(1233))
value.chain.assertOK(t)
value.chain.reset()
value.Ge("1233")
value.chain.assertFailed(t)
value.chain.reset()
}
func TestNumberConvertLesser(t *testing.T) {
reporter := newMockReporter(t)
value := NewNumber(reporter, 1234)
value.Lt(int64(1235))
value.chain.assertOK(t)
value.chain.reset()
value.Lt(float32(1235))
value.chain.assertOK(t)
value.chain.reset()
value.Lt("1235")
value.chain.assertFailed(t)
value.chain.reset()
value.Le(int64(1235))
value.chain.assertOK(t)
value.chain.reset()
value.Le(float32(1235))
value.chain.assertOK(t)
value.chain.reset()
value.Le("1235")
value.chain.assertFailed(t)
value.chain.reset()
}
func TestNumberConvertInRange(t *testing.T) {
reporter := newMockReporter(t)
value := NewNumber(reporter, 1234)
value.InRange(int64(1233), float32(1235))
value.chain.assertOK(t)
value.chain.reset()
value.InRange(int64(1233), "1235")
value.chain.assertFailed(t)
value.chain.reset()
value.InRange(nil, 1235)
value.chain.assertFailed(t)
value.chain.reset()
}
|
package pixivapi
import (
"os"
"testing"
)
func TestClient_IllustDetail(t *testing.T) {
c := New()
_, err := c.Login(os.Getenv("PIXIV_USERNAME"), os.Getenv("PIXIV_PASSWORD"))
if err != nil {
t.Errorf("Client.Login() experienced error %v", err)
}
x, err := c.IllustDetail(54642357)
t.Log(x.Illust.PageCount)
if err != nil {
t.Errorf("Client.Error() experienced error %v", err)
}
x, err = c.IllustDetail(62783959)
t.Log(x.Illust.PageCount)
if err != nil {
t.Errorf("Client.Error() experienced error %v", err)
}
}
func TestIllustDetail(t *testing.T) {
c := New()
_, err := c.Login(os.Getenv("PIXIV_USERNAME"), os.Getenv("PIXIV_PASSWORD"))
if err != nil {
t.Errorf("Client.Login() experienced error %v", err)
}
x, err := c.IllustDetail(54642357)
t.Log(x.Illust.PageCount)
if err != nil {
t.Errorf("Client.Error() experienced error %v", err)
}
x, err = c.IllustDetail(62783959)
t.Log(x.Illust.PageCount)
if err != nil {
t.Errorf("Client.Error() experienced error %v", err)
}
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package analysis_test
import (
"strings"
"testing"
"github.com/google/gapid/core/assert"
"github.com/google/gapid/core/log"
"github.com/google/gapid/gapil/analysis"
)
func TestReferenceGlobalAnalysis(t *testing.T) {
ctx := log.Testing(t)
common := `
class X {
u32 a
Y b
ref!Y c
}
class Y {
u32 p
}
ref!X G
`
for _, test := range []struct {
source string
expected string
}{
{
``,
`<nil>`,
}, {
`cmd void c() { G = new!X(1, Y(2), null) }`,
`ref!X{ a: [0x1] b: Y{ p: [0x2] } c: <nil> }`,
}, {
`cmd void c() { G = new!X() }`,
`ref!X{ a: [0x0] b: Y{ p: [0x0] } c: <nil> }`,
}, {
`cmd void c() { x := new!X(1, Y(2), null) G = x x.a = 2 x.c = new!Y(3)}`,
`ref!X{ a: [0x2] b: Y{ p: [0x2] } c: ref!Y{ p: [0x3] } }`,
}, {
`cmd void c() { G = new!X(1, Y(2), new!Y(3)) }
cmd void d() { G = new!X(2, Y(3), new!Y(4)) }`,
`ref!X{ a: [0x1-0x2] b: Y{ p: [0x2-0x3] } c: ref!Y{ p: [0x3-0x4] } }`,
}, {
`cmd void c() { G = new!X(1, Y(2), new!Y(3)) }
cmd void d() { p := G q := p r := q r.a = 3 }`,
`ref!X{ a: [0x1] [0x3] b: Y{ p: [0x2] } c: ref!Y{ p: [0x3] } }`,
}, {
`cmd void c() { G = null }`,
`<nil>`,
}, {
`cmd void c() { if G == null { G = new!X(1, Y(2), null) } }`,
`ref!X{ a: [0x1] b: Y{ p: [0x2] } c: <nil> }`,
}, {
`cmd void c() { if G != null { G = new!X(1, Y(2), null) } }`,
`<nil>`,
}, {
`sub void uncalled(ref!X x) { G = x }`,
`<nil>`,
}, {
`sub void s(ref!X x) { G = x }
cmd void c() { s(new!X(1, Y(2), null)) }`,
`ref!X{ a: [0x1] b: Y{ p: [0x2] } c: <nil> }`,
}, {
`sub ref!X s() { return new!X(1, Y(2), null) }
cmd void c() { G = s() }`,
`ref!X{ a: [0x1] b: Y{ p: [0x2] } c: <nil> }`,
},
} {
ctx := log.V{"source": test.source}.Bind(ctx)
api, mappings, err := compile(ctx, common+" "+test.source)
assert.For(ctx, "err").ThatError(err).Succeeded()
res := analysis.Analyze(api, mappings)
got := res.Globals[api.Globals[0]].(*analysis.ReferenceValue)
s := strings.Join(strings.Fields(got.Print(res)), " ")
assert.For(ctx, "s").ThatString(s).Equals(test.expected)
}
}
|
package main
import (
"fmt"
"github.com/BTBurke/gitkit"
"log"
"os"
)
func main() {
fmt.Println("This example shows a basic dual HTTP and SSH server running on ports 8080 and 2222 respectively.\n\n**Warning** Don't use this model in production as nothing is secured. You should look at the other examples for how to configure authorization functions, TLS, and other security measures.\n")
log.New(os.Stdout, "", 0)
server := gitkit.New()
server.Run()
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package channel provides the implemention of channel-based data-link layer
// endpoints. Such endpoints allow injection of inbound packets and store
// outbound packets in a channel.
package channel
import (
"context"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/stack"
)
// Notification is the interface for receiving notification from the packet
// queue.
type Notification interface {
// WriteNotify will be called when a write happens to the queue.
WriteNotify()
}
// NotificationHandle is an opaque handle to the registered notification target.
// It can be used to unregister the notification when no longer interested.
//
// +stateify savable
type NotificationHandle struct {
n Notification
}
type queue struct {
// c is the outbound packet channel.
c chan stack.PacketBufferPtr
mu sync.RWMutex
// +checklocks:mu
notify []*NotificationHandle
// +checklocks:mu
closed bool
}
func (q *queue) Close() {
q.mu.Lock()
defer q.mu.Unlock()
close(q.c)
q.closed = true
}
func (q *queue) Read() stack.PacketBufferPtr {
select {
case p := <-q.c:
return p
default:
return nil
}
}
func (q *queue) ReadContext(ctx context.Context) stack.PacketBufferPtr {
select {
case pkt := <-q.c:
return pkt
case <-ctx.Done():
return nil
}
}
func (q *queue) Write(pkt stack.PacketBufferPtr) tcpip.Error {
// q holds the PacketBuffer.
q.mu.RLock()
if q.closed {
q.mu.RUnlock()
return &tcpip.ErrClosedForSend{}
}
wrote := false
select {
case q.c <- pkt.IncRef():
wrote = true
default:
pkt.DecRef()
}
notify := q.notify
q.mu.RUnlock()
if wrote {
// Send notification outside of lock.
for _, h := range notify {
h.n.WriteNotify()
}
return nil
}
return &tcpip.ErrNoBufferSpace{}
}
func (q *queue) Num() int {
return len(q.c)
}
func (q *queue) AddNotify(notify Notification) *NotificationHandle {
q.mu.Lock()
defer q.mu.Unlock()
h := &NotificationHandle{n: notify}
q.notify = append(q.notify, h)
return h
}
func (q *queue) RemoveNotify(handle *NotificationHandle) {
q.mu.Lock()
defer q.mu.Unlock()
// Make a copy, since we reads the array outside of lock when notifying.
notify := make([]*NotificationHandle, 0, len(q.notify))
for _, h := range q.notify {
if h != handle {
notify = append(notify, h)
}
}
q.notify = notify
}
var _ stack.LinkEndpoint = (*Endpoint)(nil)
var _ stack.GSOEndpoint = (*Endpoint)(nil)
// Endpoint is link layer endpoint that stores outbound packets in a channel
// and allows injection of inbound packets.
type Endpoint struct {
mtu uint32
linkAddr tcpip.LinkAddress
LinkEPCapabilities stack.LinkEndpointCapabilities
SupportedGSOKind stack.SupportedGSO
mu sync.RWMutex
// +checklocks:mu
dispatcher stack.NetworkDispatcher
// Outbound packet queue.
q *queue
}
// New creates a new channel endpoint.
func New(size int, mtu uint32, linkAddr tcpip.LinkAddress) *Endpoint {
return &Endpoint{
q: &queue{
c: make(chan stack.PacketBufferPtr, size),
},
mtu: mtu,
linkAddr: linkAddr,
}
}
// Close closes e. Further packet injections will return an error, and all pending
// packets are discarded. Close may be called concurrently with WritePackets.
func (e *Endpoint) Close() {
e.q.Close()
e.Drain()
}
// Read does non-blocking read one packet from the outbound packet queue.
func (e *Endpoint) Read() stack.PacketBufferPtr {
return e.q.Read()
}
// ReadContext does blocking read for one packet from the outbound packet queue.
// It can be cancelled by ctx, and in this case, it returns nil.
func (e *Endpoint) ReadContext(ctx context.Context) stack.PacketBufferPtr {
return e.q.ReadContext(ctx)
}
// Drain removes all outbound packets from the channel and counts them.
func (e *Endpoint) Drain() int {
c := 0
for pkt := e.Read(); !pkt.IsNil(); pkt = e.Read() {
pkt.DecRef()
c++
}
return c
}
// NumQueued returns the number of packet queued for outbound.
func (e *Endpoint) NumQueued() int {
return e.q.Num()
}
// InjectInbound injects an inbound packet. If the endpoint is not attached, the
// packet is not delivered.
func (e *Endpoint) InjectInbound(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {
e.mu.RLock()
d := e.dispatcher
e.mu.RUnlock()
if d != nil {
d.DeliverNetworkPacket(protocol, pkt)
}
}
// Attach saves the stack network-layer dispatcher for use later when packets
// are injected.
func (e *Endpoint) Attach(dispatcher stack.NetworkDispatcher) {
e.mu.Lock()
defer e.mu.Unlock()
e.dispatcher = dispatcher
}
// IsAttached implements stack.LinkEndpoint.IsAttached.
func (e *Endpoint) IsAttached() bool {
e.mu.RLock()
defer e.mu.RUnlock()
return e.dispatcher != nil
}
// MTU implements stack.LinkEndpoint.MTU. It returns the value initialized
// during construction.
func (e *Endpoint) MTU() uint32 {
return e.mtu
}
// Capabilities implements stack.LinkEndpoint.Capabilities.
func (e *Endpoint) Capabilities() stack.LinkEndpointCapabilities {
return e.LinkEPCapabilities
}
// GSOMaxSize implements stack.GSOEndpoint.
func (*Endpoint) GSOMaxSize() uint32 {
return 1 << 15
}
// SupportedGSO implements stack.GSOEndpoint.
func (e *Endpoint) SupportedGSO() stack.SupportedGSO {
return e.SupportedGSOKind
}
// MaxHeaderLength returns the maximum size of the link layer header. Given it
// doesn't have a header, it just returns 0.
func (*Endpoint) MaxHeaderLength() uint16 {
return 0
}
// LinkAddress returns the link address of this endpoint.
func (e *Endpoint) LinkAddress() tcpip.LinkAddress {
return e.linkAddr
}
// WritePackets stores outbound packets into the channel.
// Multiple concurrent calls are permitted.
func (e *Endpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) {
n := 0
for _, pkt := range pkts.AsSlice() {
if err := e.q.Write(pkt); err != nil {
if _, ok := err.(*tcpip.ErrNoBufferSpace); !ok && n == 0 {
return 0, err
}
break
}
n++
}
return n, nil
}
// Wait implements stack.LinkEndpoint.Wait.
func (*Endpoint) Wait() {}
// AddNotify adds a notification target for receiving event about outgoing
// packets.
func (e *Endpoint) AddNotify(notify Notification) *NotificationHandle {
return e.q.AddNotify(notify)
}
// RemoveNotify removes handle from the list of notification targets.
func (e *Endpoint) RemoveNotify(handle *NotificationHandle) {
e.q.RemoveNotify(handle)
}
// ARPHardwareType implements stack.LinkEndpoint.ARPHardwareType.
func (*Endpoint) ARPHardwareType() header.ARPHardwareType {
return header.ARPHardwareNone
}
// AddHeader implements stack.LinkEndpoint.AddHeader.
func (*Endpoint) AddHeader(stack.PacketBufferPtr) {}
// ParseHeader implements stack.LinkEndpoint.ParseHeader.
func (*Endpoint) ParseHeader(stack.PacketBufferPtr) bool { return true }
|
package main
import "fmt"
// func IsEqual(x, y int) bool {
// return x == y
// }
// func IsEqual(x, y interface{}) bool {
// func IsEqual(x, y any) bool {
// return x == y
// }
func IsEqual[T comparable](x, y T) bool {
return x == y
}
func main() {
fmt.Println(IsEqual(10, 12))
fmt.Println(IsEqual("hello", "hello"))
// fmt.Println(IsEqual("hello", 12))
}
|
package tezos
import (
"fmt"
"net/url"
"github.com/trustwallet/blockatlas/pkg/blockatlas"
)
type Client struct {
blockatlas.Request
}
func (c *Client) GetTxsOfAddress(address string) ([]Tx, error) {
var account Op
path := fmt.Sprintf("account/%s/op", address)
err := c.Get(&account, path, url.Values{"limit": {"1000"}, "offset": {"0"}})
return account.Txs, err
}
func (c *Client) GetCurrentBlock() (int64, error) {
var head Head
err := c.Get(&head, "block/head", url.Values{"limit": {"1000"}, "offset": {"0"}})
return head.Height, err
}
func (c *Client) GetBlockByNumber(num int64) ([]Tx, error) {
var block Op
path := fmt.Sprintf("block/%d/op", num)
err := c.Get(&block, path, url.Values{"limit": {"1000"}, "offset": {"0"}})
return block.Txs, err
}
func (c *Client) GetAccount(address string) (result Account, err error) {
path := fmt.Sprintf("account/%s", address)
return result, c.Get(&result, path, nil)
}
|
package main
import (
"flag"
"fmt"
"log"
"os"
"runtime"
"runtime/pprof"
"sort"
"strconv"
"time"
)
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file")
var memprofile = flag.String("memprofile", "", "write memory profile to `file`")
//ProcessFiles reads dictionaty from dictPath and calculate 10 closest words to words
//from wordPath file; writes results to outputPath file
func ProcessFiles(dictPath, wordPath, outputPath string) bool {
dictLines, err := ReadLinesFromFile(dictPath)
if err != nil {
log.Fatalf("ReadLinesFromFile: %s", err)
}
mainwordLines, err := ReadLinesFromFile(wordPath)
if err != nil {
log.Fatalf("ReadLinesFromFile: %s", err)
}
for index, mainword := range mainwordLines {
if index%100 == 0 {
fmt.Println("Processed " + strconv.Itoa(index) + " words")
}
var sWords = CreateWordsWithDistanceFromLines(dictLines, mainword, LevenshteinDistance)
var outStrings []string
outStrings = append(outStrings, "\nWord # "+strconv.Itoa(index)+"\nList of top 10 nearest words by Levenstein Distance to word "+mainword)
sort.Sort(sWords)
for i, sWord := range sWords[:10] {
outStrings = append(outStrings, "\n#"+strconv.Itoa(i)+"\t"+sWord.name+"\t"+strconv.Itoa(sWord.distance))
}
err := WriteLinesToFile(outputPath, outStrings)
if err != nil {
log.Fatalf("WriteLinesToFile: %s", err)
}
}
return true
}
func main() {
var dictPath = "/home/max/go/src/lab1_arch/20k.txt"
var wordPath = "/home/max/go/src/lab1_arch/1-1000.txt"
var outputPath = "/home/max/go/src/lab1_arch/find_lev_" + time.Now().String() + ".txt"
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
log.Fatal("could not create CPU profile: ", err)
}
if err := pprof.StartCPUProfile(f); err != nil {
log.Fatal("could not start CPU profile: ", err)
}
defer pprof.StopCPUProfile()
}
var isProcessed = ProcessFiles(dictPath, wordPath, outputPath)
if isProcessed {
fmt.Println("Work finished!!")
}
if *memprofile != "" {
f1, err := os.Create(*memprofile)
if err != nil {
log.Fatal("could not create memory profile: ", err)
}
runtime.GC() // get up-to-date statistics
if err := pprof.WriteHeapProfile(f1); err != nil {
log.Fatal("could not write memory profile: ", err)
}
f1.Close()
}
}
|
package controllers
import (
"github.com/revel/revel"
"github.com/MoonBabyLabs/kekcontact"
"encoding/json"
"github.com/MoonBabyLabs/kekaccess"
"github.com/MoonBabyLabs/kekspace"
)
type App struct {
*revel.Controller
}
func (c App) Index() revel.Result {
return c.Render()
}
func (c App) Token() revel.Result {
content := make(map[string]string)
json.Unmarshal(c.Params.JSON, &content)
passedSecret := content["access_token"]
delete(content, "access_token")
token, err := kekaccess.Access{}.NewJwt(passedSecret, content)
if err != nil {
c.RenderError(err)
}
jsonString := map[string]string{}
jsonString["token"] = token
return c.RenderJSON(jsonString)
}
func (c App) Install() revel.Result {
ks, _ := kekspace.Kekspace{}.Load()
if ks.Name != "" {
return c.NotFound("page not found")
}
return c.Render()
}
func (c App) SaveInstall() revel.Result {
data := c.Params.Form
owner := kekcontact.Contact{}
owner.Name = data.Get("name")
owner.Email = data.Get("email")
owner.Phone = data.Get("phone")
owner.Address = data.Get("address")
owner.City = data.Get("city")
owner.CountryCode = data.Get("country_code")
owner.PostalCode = data.Get("postal_code")
owner.Region = data.Get("region")
owner.Company = kekcontact.Company{
Name: data.Get("company_name"),
Phone: data.Get("company_phone"),
Email: data.Get("company_email"),
Address: data.Get("company_address"),
City: data.Get("company_city"),
CountryCode: data.Get("company_country_code"),
PostalCode: data.Get("company_postal_code"),
Region: data.Get("company_region"),
}
ks, ksErr := kekspace.Kekspace{}.New(data.Get("kekspace"), "", owner, []kekcontact.Contact{owner})
if ksErr != nil {
return c.RenderError(ksErr)
}
ka := kekaccess.Access{}
ka.GenerateSecret(ks.KekId + "/")
token := ka.AddAccessToken(true, true, true, true, true)
return c.RenderHTML("<p>Your KekBoom Installation was a success. Store this Access code in a safe place. Your apps will need to pass it through via different requests: <br />" + token + "</p>")
}
|
package model
// Type Type
type Type interface {
// @GetName 名称
GetName() string
// @GetValue 值
GetValue() int
// @GetPkgPath pkgPath
GetPkgPath() string
// @IsPtrType 是否指针类型
IsPtrType() bool
// @Interface 实例化一个类型对应的数据值
Interface() (Value, error)
// Elem 获取要素类型(如果非slice,则返回的是本身,如果是slice,则返回slice的elem类型)
Elem() Type
// @IsBasic 判断是否基础类型(不是struct,也不是slice struct)
IsBasic() bool
}
func CompareType(l, r Type) bool {
return l.GetName() == r.GetName() && l.GetValue() == r.GetValue() && l.GetPkgPath() == r.GetPkgPath() && l.IsPtrType() == r.IsPtrType()
}
|
package server
import (
"github.com/gin-gonic/gin"
"go.rock.com/rock-platform/rock/server/database"
"go.rock.com/rock-platform/rock/server/log"
middleware "go.rock.com/rock-platform/rock/server/middleware"
"go.rock.com/rock-platform/rock/server/routerEngine"
)
type Server struct {
Logger *log.Logger `json:"logger"`
RouterEngine *routerEngine.Routers `json:"router_engine"`
DBEngine *database.DBEngine `json:"db_engine"`
}
var SingleServer *Server
var skipLogPath = []string{"/health", "/swagger/index.html", "/swagger/swagger-ui.css",
"/swagger/swagger-ui-standalone-preset.js", "/swagger/swagger-ui-bundle.js", "/swagger/swagger-ui.css.map",
"/swagger/doc.json", "/swagger/swagger-ui-standalone-preset.js.map", "/swagger/swagger-ui-bundle.js.map",
"/swagger/favicon-32x32.png", "/swagger/favicon-16x16.png"}
var skipAuthPath = []string{"/health", "/v1/auth/login", "/v1/auth/reset", "/v1/auth/pwd", "/swagger/index.html", "/swagger/swagger-ui.css",
"/swagger/swagger-ui-standalone-preset.js", "/swagger/swagger-ui-bundle.js", "/swagger/swagger-ui.css.map",
"/swagger/doc.json", "/swagger/swagger-ui-standalone-preset.js.map", "/swagger/swagger-ui-bundle.js.map",
"/swagger/favicon-32x32.png", "/swagger/favicon-16x16.png"}
func GetServer() *Server {
if SingleServer == nil {
SingleServer = &Server{
Logger: log.GetLogger(), // 实例化logrus.Logger对象
RouterEngine: routerEngine.GetRouterEngine(), // 实例化一个没有中间件的空白路由(r := gin.New()代替r := gin.Default())
DBEngine: database.GetDBEngine(), // 实例化gorm数据库
}
}
return SingleServer
}
// 初始化日志配置、中间件、路由、数据库、validator(不明白)
func (s *Server) InitServer() {
s.Logger.InitLogger() // 初始化日志配置(日志级别、日志文件、日志分割、日志格式)
s.addMiddleWare(
middleware.AccessLog(skipLogPath...),
middleware.Auth(skipAuthPath...),
middleware.NoCache, // 阻止缓存响应
middleware.ErrorHandler(),
)
s.InitRouters() // 初始化路由(定义所有的url)
s.DBEngine.InitDB() // 同步库表
s.initDBData() // 初始化admin用户、role角色(管理员、开发者)
}
// use middleware
func (s *Server) addMiddleWare(mds ...gin.HandlerFunc) {
s.RouterEngine.Use(mds...)
}
func (s *Server) initDBData() {
e := database.GetDBEngine()
roles := GetRolesInitData()
existOrInsert(e, roles)
records := GetUsersInitData()
existOrInsert(e, records)
}
|
package main
import (
"io/ioutil"
"net/http"
"html/template"
"regexp"
"./handlers.go"
)
type Page struct {
Title string
Body []byte
}
// Global cache variable.
var templates = template.Must(template.ParseFiles("edit.html", "view.html"))
var validPath = regexp.MustCompile("^/(edit|save|view)/([a-zA-Z0-9]+)$")
// make Handlers.
func renderTemplate(w http.ResponseWriter, tmpl string, p *Page) {
err := templates.ExecuteTemplate(w, "html/"+tmpl+".html", p)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func makeHandler(fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
m := validPath.FindStringSubmatch(r.URL.Path)
if m == nil {
http.NotFound(w, r)
return
}
fn(w, r, m[2])
}
}
func main() {
http.HandleFunc("/view/", makeHandler(handlers.ViewHandler))
http.HandleFunc("/edit/", makeHandler(handlers.EditHandler))
http.HandleFunc("/save/", makeHandler(handlers.SaveHandler))
http.ListenAndServe(":8080", nil)
}
|
package setting
import (
"os"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// InitLogger custom logger for app
func InitLogger() *zap.Logger {
encoder := zapcore.NewJSONEncoder(zapcore.EncoderConfig{
MessageKey: "msg",
LevelKey: "level",
EncodeLevel: zapcore.CapitalLevelEncoder,
TimeKey: "time",
EncodeTime: zapcore.ISO8601TimeEncoder,
EncodeDuration: zapcore.StringDurationEncoder,
CallerKey: "caller",
EncodeCaller: zapcore.ShortCallerEncoder,
})
core := zapcore.NewCore(
encoder,
zapcore.AddSync(os.Stderr),
zapcore.DebugLevel,
)
return zap.New(core, zap.AddCaller())
}
|
package docker_test
import (
"bytes"
"io"
"io/ioutil"
"os"
"os/exec"
"github.com/cloudcredo/cloudfocker/config"
"github.com/cloudcredo/cloudfocker/docker"
. "github.com/cloudcredo/cloudfocker/Godeps/_workspace/src/github.com/onsi/ginkgo"
. "github.com/cloudcredo/cloudfocker/Godeps/_workspace/src/github.com/onsi/gomega"
"github.com/cloudcredo/cloudfocker/Godeps/_workspace/src/github.com/onsi/gomega/gbytes"
"github.com/cloudcredo/cloudfocker/Godeps/_workspace/src/github.com/onsi/gomega/gexec"
)
type FakeDockerClient struct {
cmdVersionCalled bool
cmdImportArgs []string
cmdRunArgs []string
cmdStopArgs []string
cmdRmArgs []string
cmdKillArgs []string
cmdBuildArgs []string
cmdPsCalled bool
}
func (f *FakeDockerClient) CmdVersion(_ ...string) error {
f.cmdVersionCalled = true
return nil
}
func (f *FakeDockerClient) CmdImport(args ...string) error {
f.cmdImportArgs = args
return nil
}
func (f *FakeDockerClient) CmdRun(args ...string) error {
f.cmdRunArgs = args
return nil
}
func (f *FakeDockerClient) CmdStop(args ...string) error {
f.cmdStopArgs = args
return nil
}
func (f *FakeDockerClient) CmdRm(args ...string) error {
f.cmdRmArgs = args
return nil
}
func (f *FakeDockerClient) CmdKill(args ...string) error {
f.cmdKillArgs = args
return nil
}
func (f *FakeDockerClient) CmdBuild(args ...string) error {
f.cmdBuildArgs = args
return nil
}
func (f *FakeDockerClient) CmdPs(_ ...string) error {
f.cmdPsCalled = true
return nil
}
var _ = Describe("Docker", func() {
var (
fakeDockerClient *FakeDockerClient
buffer *gbytes.Buffer
)
BeforeEach(func() {
buffer = gbytes.NewBuffer()
})
Describe("Displaying the Docker version", func() {
It("should tell Docker to output its version", func() {
fakeDockerClient = new(FakeDockerClient)
stdout, stdoutPipe := io.Pipe()
docker.PrintVersion(fakeDockerClient, stdout, stdoutPipe, buffer)
Expect(fakeDockerClient.cmdVersionCalled).To(Equal(true))
})
})
Describe("Bootstrapping the Docker environment", func() {
It("should tell Docker to import the rootfs from the supplied URL", func() {
url := "http://test.com/test-img"
fakeDockerClient = new(FakeDockerClient)
stdout, stdoutPipe := io.Pipe()
docker.ImportRootfsImage(fakeDockerClient, stdout, stdoutPipe, buffer, url)
Expect(len(fakeDockerClient.cmdImportArgs)).To(Equal(2))
Expect(fakeDockerClient.cmdImportArgs[0]).To(Equal("http://test.com/test-img"))
Expect(fakeDockerClient.cmdImportArgs[1]).To(Equal("cloudfocker-base"))
})
})
Describe("Running a configured container", func() {
It("should tell Docker to run the container with the correct arguments", func() {
fakeDockerClient = new(FakeDockerClient)
stdout, stdoutPipe := io.Pipe()
docker.RunConfiguredContainer(fakeDockerClient, stdout, stdoutPipe, buffer, config.NewStageContainerConfig(config.NewDirectories("test")))
Expect(len(fakeDockerClient.cmdRunArgs)).To(Equal(10))
Expect(fakeDockerClient.cmdRunArgs[9]).To(Equal("internal"))
})
})
Describe("Stopping the docker container", func() {
It("should tell Docker to stop the container", func() {
fakeDockerClient = new(FakeDockerClient)
stdout, stdoutPipe := io.Pipe()
docker.StopContainer(fakeDockerClient, stdout, stdoutPipe, buffer, "cloudfocker-container")
Expect(len(fakeDockerClient.cmdStopArgs)).To(Equal(1))
Expect(fakeDockerClient.cmdStopArgs[0]).To(Equal("cloudfocker-container"))
})
})
Describe("Killing the docker container", func() {
It("should tell Docker to kill the container", func() {
fakeDockerClient = new(FakeDockerClient)
stdout, stdoutPipe := io.Pipe()
docker.KillContainer(fakeDockerClient, stdout, stdoutPipe, buffer, "cloudfocker-container")
Expect(len(fakeDockerClient.cmdKillArgs)).To(Equal(1))
Expect(fakeDockerClient.cmdKillArgs[0]).To(Equal("cloudfocker-container"))
})
})
Describe("Deleting the docker container", func() {
It("should tell Docker to delete the container", func() {
fakeDockerClient = new(FakeDockerClient)
stdout, stdoutPipe := io.Pipe()
docker.DeleteContainer(fakeDockerClient, stdout, stdoutPipe, buffer, "cloudfocker-container")
Expect(len(fakeDockerClient.cmdRmArgs)).To(Equal(1))
Expect(fakeDockerClient.cmdRmArgs[0]).To(Equal("cloudfocker-container"))
})
})
Describe("Building a runtime image", func() {
var (
dropletDir string
fakeDockerClient *FakeDockerClient
)
BeforeEach(func() {
fakeDockerClient = new(FakeDockerClient)
stdout, stdoutPipe := io.Pipe()
tmpDir, _ := ioutil.TempDir(os.TempDir(), "docker-runtime-image-test")
cp("fixtures/build/droplet", tmpDir)
dropletDir = tmpDir + "/droplet"
docker.BuildRuntimeImage(fakeDockerClient, stdout, stdoutPipe, buffer, config.NewRuntimeContainerConfig(dropletDir))
})
It("should create a tarred version of the droplet mount, for extraction in the container, so as to not have AUFS permissions issues in https://github.com/docker/docker/issues/783", func() {
dropletDirFile, err := os.Open(dropletDir)
Expect(err).ShouldNot(HaveOccurred())
dropletDirContents, err := dropletDirFile.Readdirnames(0)
Expect(err).ShouldNot(HaveOccurred())
Expect(dropletDirContents, err).Should(ContainElement("droplet.tgz"))
})
It("should write a valid Dockerfile to the filesystem", func() {
result, err := ioutil.ReadFile(dropletDir + "/Dockerfile")
Expect(err).ShouldNot(HaveOccurred())
expected, err := ioutil.ReadFile("fixtures/build/Dockerfile")
Expect(err).ShouldNot(HaveOccurred())
Expect(result).To(Equal(expected))
})
It("should tell Docker to build the container from the Dockerfile", func() {
Expect(len(fakeDockerClient.cmdBuildArgs)).To(Equal(1))
Expect(fakeDockerClient.cmdBuildArgs[0]).To(Equal(dropletDir))
})
})
Describe("Getting a cloudfocker runtime container ID", func() {
Context("with no cloudfocker runtime container running", func() {
It("should return empty string", func() {
fakeDockerClient = new(FakeDockerClient)
stdout, stdoutPipe := io.Pipe()
containerId := make(chan string)
go func() {
containerId <- docker.GetContainerId(fakeDockerClient, stdout, stdoutPipe, "cloudfocker-runtime")
}()
io.Copy(stdoutPipe, bytes.NewBufferString("CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\n"))
Eventually(fakeDockerClient.cmdPsCalled).Should(Equal(true))
Eventually(containerId).Should(Receive(Equal("")))
})
})
Context("with a cloudfocker runtime container running", func() {
It("should return the container ID", func() {
fakeDockerClient = new(FakeDockerClient)
stdout, stdoutPipe := io.Pipe()
containerId := make(chan string)
go func() {
containerId <- docker.GetContainerId(fakeDockerClient, stdout, stdoutPipe, "cloudfocker-runtime")
}()
io.Copy(stdoutPipe, bytes.NewBufferString("CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\n180e16d9ef28 cloudfocker:latest /usr/sbin/nginx -c / 13 minutes ago Up 13 minutes 0.0.0.0:8080->8080/tcp cloudfocker-runtime\n"))
Eventually(fakeDockerClient.cmdPsCalled).Should(Equal(true))
Eventually(containerId).Should(Receive(Equal("180e16d9ef28")))
})
})
})
Describe("Getting a Docker client", func() {
It("should return a usable docker client on unix", func() {
cli, stdout, stdoutpipe := docker.GetNewClient()
docker.PrintVersion(cli, stdout, stdoutpipe, buffer)
Eventually(buffer).Should(gbytes.Say(`Client API version: `))
})
})
Describe("Container I/O plumbing", func() {
It("Copies from a pipe to a writer without waiting for the pipe to close", func() {
stdout, stdoutPipe := io.Pipe()
go func() {
docker.CopyFromPipeToPipe(buffer, stdout)
}()
io.Copy(stdoutPipe, bytes.NewBufferString("THIS IS A TEST STRING\n"))
Eventually(buffer).Should(gbytes.Say(`THIS IS A TEST STRING`))
io.Copy(stdoutPipe, bytes.NewBufferString("THIS IS ANOTHER TEST STRING\n"))
stdoutPipe.Close()
Eventually(buffer).Should(gbytes.Say(`THIS IS ANOTHER TEST STRING`))
})
})
})
func cp(src string, dst string) {
session, err := gexec.Start(
exec.Command("cp", "-a", src, dst),
GinkgoWriter,
GinkgoWriter,
)
Ω(err).ShouldNot(HaveOccurred())
Eventually(session).Should(gexec.Exit(0))
}
|
package bigger
import (
"bytes"
"compress/gzip"
"crypto/md5"
"encoding/hex"
"github.com/labstack/echo/v4"
"github.com/sxueck/k8sodep/model"
"io"
"log"
"net/http"
"os"
"path"
"strconv"
)
func DecompressData(compressedData []byte) ([]byte, error) {
reader, err := gzip.NewReader(bytes.NewReader(compressedData))
if err != nil {
return nil, err
}
decompressedData, err := io.ReadAll(reader)
if err != nil {
return nil, err
}
defer func(r *gzip.Reader) {
err := r.Close()
if err != nil {
log.Println("reader.Close err:", err)
}
}(reader)
return decompressedData, nil
}
func ComputeMD5HashString(data []byte) string {
hash := md5.Sum(data)
return hex.EncodeToString(hash[:])
}
// WriteBytesToFile debug
func WriteBytesToFile(filename string, data []byte) error {
err := os.WriteFile(filename, data, 0644)
return err
}
func StartRecvUploadHandle() echo.MiddlewareFunc {
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// 获取文件名和分片编号
log.Println("r.Header:", r.Header)
fn := path.Base(r.Header.Get("File-Name"))
fileSize, _ := strconv.ParseInt(r.Header.Get("Content-Range"), 10, 64)
partNumber, _ := strconv.Atoi(r.Header.Get("Part-Number"))
svcName := r.Header.Get("Service-Name")
log.Println(imageUploadDaemon[svcName])
isEnd := r.Header.Get("Last-Part")
chunkSize, _ := strconv.ParseInt(r.Header.Get("Origin-Size"), 10, 64)
// 以读写模式打开文件
file, err := os.OpenFile(fn, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if partNumber == 0 {
log.Println("the first slice")
// 如果是第一片,则创建一个新文件
err := os.Truncate(fn, fileSize)
if err != nil {
log.Println(err)
return
}
}
defer file.Close()
// 将文件指针移动到指定位置
offset := int64(partNumber) * chunkSize
// 如果为最后一片,则chunkSize为非标准大小
// 则使用part*size为offset会导致不正常的覆盖写入
if len(isEnd) != 0 {
log.Println("TaskCachePath:", fn)
_, err = file.Seek(func() int64 { // 对小文件的适配
if offset == 0 {
return 0
}
return fileSize - chunkSize
}(), io.SeekStart)
} else {
_, err = file.Seek(offset, io.SeekStart)
}
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var bs []byte
bs, err = io.ReadAll(r.Body)
if err != nil {
log.Println("io.ReadAll err:", err)
return
}
m5 := r.Header.Get("Md5")
if ComputeMD5HashString(bs) != m5 {
log.Printf("Share MD5 %s not match,it could be a network anomaly", m5)
return
}
dbs, err := DecompressData(bs)
if err != nil {
log.Println("Share decompressData err:", err)
return
}
// 写入文件内容
_, err = io.Copy(file, bytes.NewReader(dbs))
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Printf("slice write to file successful : %s", m5)
if len(isEnd) != 0 {
err = ImportImageToCluster(fn, imageUploadDaemon[svcName])
if err != nil {
log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
defer func() {
delete(imageUploadDaemon, svcName)
}()
// 删除缓存文件
if !imageUploadDaemon[svcName].Debug {
err = os.Remove(fn)
if err != nil {
log.Printf("cache file cleaning exception : %s", err)
return
}
}
}
})
m := echo.WrapMiddleware(func(handler http.Handler) http.Handler {
return h
})
return m
}
func RegisterUploadTaskToDaemon(c echo.Context) error {
task := &model.ReCallDeployInfo{}
err := c.Bind(task)
if err != nil {
return c.String(http.StatusBadRequest, err.Error())
}
//if task.AccessToken != os.Getenv("ACCESS_TOKEN") || task.AccessToken == "" {
// return c.String(http.StatusForbidden, "forbidden")
//}
imageUploadDaemon[task.Resource] = *task
log.Println(imageUploadDaemon)
return c.String(http.StatusOK, "ok")
}
|
package main
import (
"bytes"
"flag"
"fmt"
"github.com/0xjbb/scyllago"
"github.com/bwmarrin/discordgo"
)
type ScyllaCfg struct{
session *discordgo.Session
message *discordgo.MessageCreate
size int
start int
maxSize int
}
// $scylla -username Joe Blogs -password test -size 5 -start 0
func ScyllaNew(session *discordgo.Session, message *discordgo.MessageCreate, size int, start int, maxSize int) ScyllaCfg{
return ScyllaCfg{
session: session,
message: message,
size: size,
start: start,
maxSize: maxSize,
}
}
// maybe break this function up into a few smaller funcs
func (sc *ScyllaCfg) Handle(command []string){
sFlag := flag.NewFlagSet("scylla", flag.ContinueOnError)
username := sFlag.String("user", "", "Username you wish to search")
password := sFlag.String("password", "", "Password you wish to search")
name := sFlag.String("name","", "First/Last name you wish to search")
email := sFlag.String("email", "", "Email you wish to search")
domain := sFlag.String("url", "", "Domain you wish to search")
ip := sFlag.String("ip", "", "IP address you wish to search")
passhash := sFlag.String("passh", "", "Password hash you wish to search")
size := sFlag.Int("size", sc.size, "Number of results to return (max 10)")
start := sFlag.Int("start", sc.start, "Result starting position.")
sFlag.Usage = sc.usage(sFlag)
err := sFlag.Parse(command)
// find out why the flag library doesn't already do this
if *username == "" && *password == "" && *email == "" && *ip == "" && *domain == "" && *passhash == "" && *name == ""{
sFlag.Usage()
return
}
if err != nil {
fmt.Println("Parse error: ", err)
return
}
qVars := make(map[string]string, 6)
qVars["username"] = *username
qVars["password"] = *password
qVars["name"] = *name
qVars["email"] = *email
qVars["domain"] = *domain
qVars["ip"] = *ip
qVars["passhash"] = *passhash
query := ""
for key, val := range qVars{
if val == ""{ // Skip any that don't have a string
continue
}
if query == ""{
query = fmt.Sprintf("%s:%s", key, val)
continue
}
query = fmt.Sprintf("%s+%%26+%s:%s",query, key, val)
}
if query == ""{
sc.usage(sFlag)
return
}
result, err := scyllago.Query(query, *size, *start)
if err != nil {
fmt.Println("ScyllaGo Error: ",err) // do this better
// send message bask to user.
return
}
if len(result) == 0{
sc.SendEmbed("Error", "No results found!")
return
}
// @TODO convert to function
messageEmbed := discordgo.MessageEmbed{
Title: "__ScyllaBot__",
Fields: func() []*discordgo.MessageEmbedField {
var embedFields []*discordgo.MessageEmbedField
for _, values := range result {
currentEmbed := discordgo.MessageEmbedField{
Name: "------------------------------------",
// @TODO fix this shit.
Value: fmt.Sprintf("IP: %s\nName: %s\nUsername: %s\nPassword: %s\nPasshash: %s\nEmail: %s\nDomain: %s",
values.Fields.Ip,
values.Fields.Name,
values.Fields.Username,
values.Fields.Password,
values.Fields.Passhash,
values.Fields.Email,
values.Fields.Domain),
}
embedFields = append(embedFields, ¤tEmbed)
}
return embedFields
}(),
}
sc.session.ChannelMessageSendEmbed(sc.message.ChannelID, &messageEmbed)
}
// send usage to channel instead of stdout/err
func (sc *ScyllaCfg) usage(fs *flag.FlagSet) func(){
buffer := new(bytes.Buffer)
fs.SetOutput(buffer)
return func() {
fs.PrintDefaults()
sc.SendEmbed("Usage:", fmt.Sprintf("```\n%s ```", buffer.String()))
return
}
}
// @TODO rewrite.
func (sc *ScyllaCfg) SendEmbed(name string, value string){
messageEmbed := discordgo.MessageEmbed{
Title: "__ScyllaBot__",
Fields: func() []*discordgo.MessageEmbedField {
var embedFields []*discordgo.MessageEmbedField
currentEmbed := discordgo.MessageEmbedField{
Name: name,
Value: value,
}
embedFields = append(embedFields, ¤tEmbed)
return embedFields
}(),
}
sc.session.ChannelMessageSendEmbed(sc.message.ChannelID, &messageEmbed)
}
|
package cmd
import (
"fmt"
"testing"
"github.com/klauspost/reedsolomon"
)
//
/*https://golangcode.com/mocking-s3-upload/ */
// Test performs a simple test to demonstrate some reedsolomon stuff. Go make a better test after RS has been incorporated into the slab properly
func Test(t *testing.T) {
slab1 := NewSlabWithSize(2500000)
slab1.RandomFill() // Fill with random data
enc, _ := reedsolomon.New(7, 4)
shards, _ := enc.Split(slab1.GetSlabBytes())
_ = enc.Encode(shards)
ok, _ := enc.Verify(shards)
if ok {
fmt.Println("SAR: Verify ok")
} else {
t.Error("SAR: Failed to Verify stage 1 during Shard Test w/ RS")
}
shards[4], shards[6], shards[7], shards[3] = nil, nil, nil, nil
fmt.Println("SAR: Destroyed shards 4,6,7,3 (7 shards, 4 destroyed)")
_ = enc.Reconstruct(shards)
ok, _ = enc.Verify(shards)
if ok {
fmt.Println("SAR: Reconstruct ok")
} else {
t.Error("SAR: Failed to verify reconstruction, stage 2, during Shard Test w/ RS")
}
}
//todo: Build a "target" that contains multiple endpoints
/*
*/
|
package main
import (
"flag"
"log"
"github.com/awslabs/aws-virtual-gpu-device-plugin/pkg/gpu/nvidia"
)
var (
vGPU = flag.Int("vgpu", 10, "Number of virtual GPUs")
)
const VOLTA_MAXIMUM_MPS_CLIENT = 48
func main() {
flag.Parse()
log.Println("Start virtual GPU device plugin")
if *vGPU > VOLTA_MAXIMUM_MPS_CLIENT {
log.Fatal("Number of virtual GPUs can not exceed maximum number of MPS clients")
}
vgm := nvidia.NewVirtualGPUManager(*vGPU)
err := vgm.Run()
if err != nil {
log.Fatalf("Failed due to %v", err)
}
}
|
package ip
import (
"errors"
"fmt"
"github.com/cenkalti/backoff"
"io/ioutil"
"net"
"net/http"
"reflect"
"strconv"
"strings"
"time"
)
func getIPBy(dest string) (net.IP, error) {
b := backoff.NewExponentialBackOff()
b.InitialInterval = 100 * time.Millisecond
b.MaxElapsedTime = 10 * time.Second
b.Multiplier = 2
client := &http.Client{}
req, err := http.NewRequest("GET", dest, nil)
if err != nil {
return nil, err
}
for tries := 0; tries < MaxTries; tries++ {
resp, err := client.Do(req)
if err != nil {
d := b.NextBackOff()
time.Sleep(d)
continue
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, errors.New(dest + " status code " + strconv.Itoa(resp.StatusCode) + ", body: " + string(body))
}
tb := strings.TrimSpace(string(body))
ip := net.ParseIP(tb)
if ip == nil {
return nil, errors.New("IP address not valid: " + tb)
}
return ip, nil
}
return nil, errors.New("Failed to reach " + dest)
}
func detailErr(err error, errs []error) error {
errStrs := []string{err.Error()}
for _, e := range errs {
errStrs = append(errStrs, e.Error())
}
j := strings.Join(errStrs, "\n")
return errors.New(j)
}
func validate(rs []net.IP) (net.IP, error) {
if rs == nil {
return nil, fmt.Errorf("Failed to get any result from %d APIs", len(APIURIs))
}
if len(rs) < 3 {
return nil, fmt.Errorf("Less than %d results from %d APIs", 3, len(APIURIs))
}
first := rs[0]
for i := 1; i < len(rs); i++ {
if !reflect.DeepEqual(first, rs[i]) { //first != rs[i] {
return nil, fmt.Errorf("Results are not identical: %s", rs)
}
}
return first, nil
}
func worker(d string, r chan<- net.IP, e chan<- error) {
ip, err := getIPBy(d)
if err != nil {
e <- err
return
}
r <- ip
}
|
package moby
// Adapted from
// https://github.com/moby/moby/blob/ecb898dcb9065c8e9bcf7bb79fd160dea1c859b8/pkg/archive/archive_windows.go
/*
Copyright 2013-2018 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"os"
)
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func ChmodTarEntry(perm os.FileMode) os.FileMode {
// perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
permPart := perm & os.ModePerm
noPermPart := perm &^ os.ModePerm
// Add the x bit: make everything +x from windows
permPart |= 0111
permPart &= 0755
return noPermPart | permPart
}
|
// Copyright © 2020 The Knative Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package flags
import (
"fmt"
"strconv"
"strings"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/client/pkg/util"
)
// VolumeSourceType is a type standing for enumeration of ConfigMap and Secret
type VolumeSourceType int
// Enumeration of volume source types: ConfigMap or Secret
const (
ConfigMapVolumeSourceType VolumeSourceType = iota
SecretVolumeSourceType
PortFormatErr = "the port specification '%s' is not valid. Please provide in the format 'NAME:PORT', where 'NAME' is optional. Examples: '--port h2c:8080' , '--port 8080'."
)
func (vt VolumeSourceType) String() string {
names := [...]string{"config-map", "secret"}
if vt < ConfigMapVolumeSourceType || vt > SecretVolumeSourceType {
return "unknown"
}
return names[vt]
}
func containerOfPodSpec(spec *corev1.PodSpec) *corev1.Container {
if len(spec.Containers) == 0 {
newContainer := corev1.Container{}
spec.Containers = append(spec.Containers, newContainer)
}
return &spec.Containers[0]
}
// UpdateEnvVars gives the configuration all the env var values listed in the given map of
// vars. Does not touch any environment variables not mentioned, but it can add
// new env vars and change the values of existing ones.
func UpdateEnvVars(spec *corev1.PodSpec,
allArgs []string, envToUpdate *util.OrderedMap, envToRemove []string, envValueFromToUpdate *util.OrderedMap, envValueFromToRemove []string) error {
container := containerOfPodSpec(spec)
allEnvsToUpdate := util.NewOrderedMap()
envIterator := envToUpdate.Iterator()
envValueFromIterator := envValueFromToUpdate.Iterator()
envKey, envValue, envExists := envIterator.NextString()
envValueFromKey, envValueFromValue, envValueFromExists := envValueFromIterator.NextString()
for _, arg := range allArgs {
// envs are stored as NAME=value
if envExists && isValidEnvArg(arg, envKey, envValue) {
allEnvsToUpdate.Set(envKey, corev1.EnvVar{
Name: envKey,
Value: envValue,
})
envKey, envValue, envExists = envIterator.NextString()
} else if envValueFromExists && isValidEnvValueFromArg(arg, envValueFromKey, envValueFromValue) {
// envs are stored as NAME=secret:sercretName:key or NAME=config-map:cmName:key
envVarSource, err := createEnvVarSource(envValueFromValue)
if err != nil {
return err
}
allEnvsToUpdate.Set(envValueFromKey, corev1.EnvVar{
Name: envValueFromKey,
ValueFrom: envVarSource,
})
envValueFromKey, envValueFromValue, envValueFromExists = envValueFromIterator.NextString()
}
}
updated := updateEnvVarsFromMap(container.Env, allEnvsToUpdate)
updated = removeEnvVars(updated, append(envToRemove, envValueFromToRemove...))
container.Env = updated
return nil
}
// isValidEnvArg checks that the input arg is a valid argument for specifying env value,
// ie. stored as NAME=value
func isValidEnvArg(arg, envKey, envValue string) bool {
return strings.HasPrefix(arg, envKey+"="+envValue) || strings.HasPrefix(arg, "-e="+envKey+"="+envValue) || strings.HasPrefix(arg, "--env="+envKey+"="+envValue)
}
// isValidEnvValueFromArg checks that the input arg is a valid argument for specifying env from value,
// ie. stored as NAME=secret:sercretName:key or NAME=config-map:cmName:key
func isValidEnvValueFromArg(arg, envValueFromKey, envValueFromValue string) bool {
return strings.HasPrefix(arg, envValueFromKey+"="+envValueFromValue) || strings.HasPrefix(arg, "--env-value-from="+envValueFromKey+"="+envValueFromValue)
}
// UpdateEnvFrom updates envFrom
func UpdateEnvFrom(spec *corev1.PodSpec, toUpdate []string, toRemove []string) error {
container := containerOfPodSpec(spec)
envFrom, err := updateEnvFrom(container.EnvFrom, toUpdate)
if err != nil {
return err
}
container.EnvFrom, err = removeEnvFrom(envFrom, toRemove)
return err
}
// UpdateVolumeMountsAndVolumes updates the configuration for volume mounts and volumes.
func UpdateVolumeMountsAndVolumes(spec *corev1.PodSpec,
mountsToUpdate *util.OrderedMap, mountsToRemove []string, volumesToUpdate *util.OrderedMap, volumesToRemove []string) error {
container := containerOfPodSpec(spec)
volumeSourceInfoByName, mountsToUpdate, err := reviseVolumeInfoAndMountsToUpdate(mountsToUpdate, volumesToUpdate)
if err != nil {
return err
}
volumes, err := updateVolumesFromMap(spec.Volumes, volumeSourceInfoByName)
if err != nil {
return err
}
volumeMounts, err := updateVolumeMountsFromMap(container.VolumeMounts, mountsToUpdate, volumes)
if err != nil {
return err
}
volumesToRemove = reviseVolumesToRemove(container.VolumeMounts, volumesToRemove, mountsToRemove)
container.VolumeMounts = removeVolumeMounts(volumeMounts, mountsToRemove)
spec.Volumes, err = removeVolumes(volumes, volumesToRemove, container.VolumeMounts)
return err
}
// UpdateImage a given image
func UpdateImage(spec *corev1.PodSpec, image string) error {
// When not setting the image to a digest, add the user image annotation.
container := containerOfPodSpec(spec)
container.Image = image
return nil
}
// UpdateContainerCommand updates container with a given argument
func UpdateContainerCommand(spec *corev1.PodSpec, command []string) error {
container := containerOfPodSpec(spec)
container.Command = command
return nil
}
// UpdateContainerArg updates container with a given argument
func UpdateContainerArg(spec *corev1.PodSpec, arg []string) error {
container := containerOfPodSpec(spec)
container.Args = arg
return nil
}
// UpdateContainerPort updates container with a given name:port
func UpdateContainerPort(spec *corev1.PodSpec, port string) error {
container := containerOfPodSpec(spec)
var containerPort int64
var name string
var err error
elements := strings.SplitN(port, ":", 2)
if len(elements) == 2 {
name = elements[0]
containerPort, err = strconv.ParseInt(elements[1], 10, 32)
if err != nil {
return fmt.Errorf(PortFormatErr, port)
}
} else {
name = ""
containerPort, err = strconv.ParseInt(elements[0], 10, 32)
if err != nil {
return fmt.Errorf(PortFormatErr, port)
}
}
container.Ports = []corev1.ContainerPort{{
ContainerPort: int32(containerPort),
Name: name,
}}
return nil
}
// UpdateUser updates container with a given user id
func UpdateUser(spec *corev1.PodSpec, user int64) error {
container := containerOfPodSpec(spec)
container.SecurityContext = &corev1.SecurityContext{
RunAsUser: &user,
}
return nil
}
// UpdateResources updates container resources for given revision spec
func UpdateResources(spec *corev1.PodSpec, resources corev1.ResourceRequirements, requestsToRemove, limitsToRemove []string) error {
container := containerOfPodSpec(spec)
if container.Resources.Requests == nil {
container.Resources.Requests = corev1.ResourceList{}
}
for k, v := range resources.Requests {
container.Resources.Requests[k] = v
}
for _, reqToRemove := range requestsToRemove {
delete(container.Resources.Requests, corev1.ResourceName(reqToRemove))
}
if container.Resources.Limits == nil {
container.Resources.Limits = corev1.ResourceList{}
}
for k, v := range resources.Limits {
container.Resources.Limits[k] = v
}
for _, limToRemove := range limitsToRemove {
delete(container.Resources.Limits, corev1.ResourceName(limToRemove))
}
return nil
}
// UpdateServiceAccountName updates the service account name used for the corresponding knative service
func UpdateServiceAccountName(spec *corev1.PodSpec, serviceAccountName string) {
serviceAccountName = strings.TrimSpace(serviceAccountName)
spec.ServiceAccountName = serviceAccountName
}
// UpdateImagePullSecrets updates the image pull secrets used for the corresponding knative service
func UpdateImagePullSecrets(spec *corev1.PodSpec, pullsecrets string) {
pullsecrets = strings.TrimSpace(pullsecrets)
if pullsecrets == "" {
spec.ImagePullSecrets = nil
} else {
spec.ImagePullSecrets = []corev1.LocalObjectReference{{
Name: pullsecrets,
}}
}
}
// =======================================================================================
func updateEnvVarsFromMap(env []corev1.EnvVar, toUpdate *util.OrderedMap) []corev1.EnvVar {
updated := sets.NewString()
for i := range env {
object, present := toUpdate.Get(env[i].Name)
if present {
env[i] = object.(corev1.EnvVar)
updated.Insert(env[i].Name)
}
}
it := toUpdate.Iterator()
for name, envVar, ok := it.Next(); ok; name, envVar, ok = it.Next() {
if !updated.Has(name) {
env = append(env, envVar.(corev1.EnvVar))
}
}
return env
}
func removeEnvVars(env []corev1.EnvVar, toRemove []string) []corev1.EnvVar {
for _, name := range toRemove {
for i, envVar := range env {
if envVar.Name == name {
env = append(env[:i], env[i+1:]...)
break
}
}
}
return env
}
func createEnvVarSource(spec string) (*corev1.EnvVarSource, error) {
slices := strings.SplitN(spec, ":", 3)
if len(slices) != 3 {
return nil, fmt.Errorf("argument requires a value in form \"resourceType:name:key\" where \"resourceType\" can be one of \"config-map\" (\"cm\") or \"secret\" (\"sc\"); got %q", spec)
}
typeString := strings.TrimSpace(slices[0])
sourceName := strings.TrimSpace(slices[1])
sourceKey := strings.TrimSpace(slices[2])
var sourceType string
envVarSource := corev1.EnvVarSource{}
switch typeString {
case "config-map", "cm":
sourceType = "ConfigMap"
envVarSource.ConfigMapKeyRef = &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: sourceName,
},
Key: sourceKey}
case "secret", "sc":
sourceType = "Secret"
envVarSource.SecretKeyRef = &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: sourceName,
},
Key: sourceKey}
default:
return nil, fmt.Errorf("unsupported env source type \"%q\"; supported source types are \"config-map\" (\"cm\") and \"secret\" (\"sc\")", slices[0])
}
if len(sourceName) == 0 {
return nil, fmt.Errorf("the name of %s cannot be an empty string", sourceType)
}
if len(sourceKey) == 0 {
return nil, fmt.Errorf("the key referenced by resource %s \"%s\" cannot be an empty string", sourceType, sourceName)
}
return &envVarSource, nil
}
// =======================================================================================
func updateEnvFrom(envFromSources []corev1.EnvFromSource, toUpdate []string) ([]corev1.EnvFromSource, error) {
existingNameSet := make(map[string]bool)
for i := range envFromSources {
envSrc := &envFromSources[i]
if canonicalName, err := getCanonicalNameFromEnvFromSource(envSrc); err == nil {
existingNameSet[canonicalName] = true
}
}
for _, s := range toUpdate {
info, err := newVolumeSourceInfoWithSpecString(s)
if err != nil {
return nil, err
}
if _, ok := existingNameSet[info.getCanonicalName()]; !ok {
envFromSources = append(envFromSources, *info.createEnvFromSource())
}
}
return envFromSources, nil
}
func removeEnvFrom(envFromSources []corev1.EnvFromSource, toRemove []string) ([]corev1.EnvFromSource, error) {
for _, name := range toRemove {
info, err := newVolumeSourceInfoWithSpecString(name)
if err != nil {
return nil, err
}
for i, envSrc := range envFromSources {
if (info.volumeSourceType == ConfigMapVolumeSourceType && envSrc.ConfigMapRef != nil && info.volumeSourceName == envSrc.ConfigMapRef.Name) ||
(info.volumeSourceType == SecretVolumeSourceType && envSrc.SecretRef != nil && info.volumeSourceName == envSrc.SecretRef.Name) {
envFromSources = append(envFromSources[:i], envFromSources[i+1:]...)
break
}
}
}
if len(envFromSources) == 0 {
envFromSources = nil
}
return envFromSources, nil
}
func updateVolume(volume *corev1.Volume, info *volumeSourceInfo) error {
switch info.volumeSourceType {
case ConfigMapVolumeSourceType:
volume.Secret = nil
volume.ConfigMap = &corev1.ConfigMapVolumeSource{LocalObjectReference: corev1.LocalObjectReference{Name: info.volumeSourceName}}
case SecretVolumeSourceType:
volume.ConfigMap = nil
volume.Secret = &corev1.SecretVolumeSource{SecretName: info.volumeSourceName}
default:
return fmt.Errorf("Invalid VolumeSourceType")
}
return nil
}
// updateVolumeMountsFromMap updates or adds volume mounts. If a given name of a volume is not existing, it returns an error
func updateVolumeMountsFromMap(volumeMounts []corev1.VolumeMount, toUpdate *util.OrderedMap, volumes []corev1.Volume) ([]corev1.VolumeMount, error) {
set := make(map[string]bool)
for i := range volumeMounts {
volumeMount := &volumeMounts[i]
name, present := toUpdate.GetString(volumeMount.MountPath)
if present {
if !existsVolumeNameInVolumes(name, volumes) {
return nil, fmt.Errorf("There is no volume matched with %q", name)
}
volumeMount.ReadOnly = true
volumeMount.Name = name
set[volumeMount.MountPath] = true
}
}
it := toUpdate.Iterator()
for mountPath, name, ok := it.NextString(); ok; mountPath, name, ok = it.NextString() {
if !set[mountPath] {
volumeMounts = append(volumeMounts, corev1.VolumeMount{
Name: name,
ReadOnly: true,
MountPath: mountPath,
})
}
}
return volumeMounts, nil
}
func removeVolumeMounts(volumeMounts []corev1.VolumeMount, toRemove []string) []corev1.VolumeMount {
for _, mountPath := range toRemove {
for i, volumeMount := range volumeMounts {
if volumeMount.MountPath == mountPath {
volumeMounts = append(volumeMounts[:i], volumeMounts[i+1:]...)
break
}
}
}
if len(volumeMounts) == 0 {
return nil
}
return volumeMounts
}
// updateVolumesFromMap updates or adds volumes regardless whether the volume is used or not
func updateVolumesFromMap(volumes []corev1.Volume, toUpdate *util.OrderedMap) ([]corev1.Volume, error) {
set := make(map[string]bool)
for i := range volumes {
volume := &volumes[i]
info, present := toUpdate.Get(volume.Name)
if present {
err := updateVolume(volume, info.(*volumeSourceInfo))
if err != nil {
return nil, err
}
set[volume.Name] = true
}
}
it := toUpdate.Iterator()
for name, info, ok := it.Next(); ok; name, info, ok = it.Next() {
if !set[name] {
volumes = append(volumes, corev1.Volume{Name: name})
updateVolume(&volumes[len(volumes)-1], info.(*volumeSourceInfo))
}
}
return volumes, nil
}
// removeVolumes removes volumes. If there is a volume mount referencing the volume, it causes an error
func removeVolumes(volumes []corev1.Volume, toRemove []string, volumeMounts []corev1.VolumeMount) ([]corev1.Volume, error) {
for _, name := range toRemove {
for i, volume := range volumes {
if volume.Name == name {
if existsVolumeNameInVolumeMounts(name, volumeMounts) {
return nil, fmt.Errorf("The volume %q cannot be removed because it is mounted", name)
}
volumes = append(volumes[:i], volumes[i+1:]...)
break
}
}
}
if len(volumes) == 0 {
return nil, nil
}
return volumes, nil
}
// =======================================================================================
type volumeSourceInfo struct {
volumeSourceType VolumeSourceType
volumeSourceName string
}
func newVolumeSourceInfoWithSpecString(spec string) (*volumeSourceInfo, error) {
slices := strings.SplitN(spec, ":", 2)
if len(slices) != 2 {
return nil, fmt.Errorf("argument requires a value that contains the : character; got %q", spec)
}
var volumeSourceType VolumeSourceType
typeString := strings.TrimSpace(slices[0])
volumeSourceName := strings.TrimSpace(slices[1])
switch typeString {
case "config-map", "cm":
volumeSourceType = ConfigMapVolumeSourceType
case "secret", "sc":
volumeSourceType = SecretVolumeSourceType
default:
return nil, fmt.Errorf("unsupported volume source type \"%q\"; supported volume source types are \"config-map\" and \"secret\"", slices[0])
}
if len(volumeSourceName) == 0 {
return nil, fmt.Errorf("the name of %s cannot be an empty string", volumeSourceType)
}
return &volumeSourceInfo{
volumeSourceType: volumeSourceType,
volumeSourceName: volumeSourceName,
}, nil
}
func (vol *volumeSourceInfo) getCanonicalName() string {
return fmt.Sprintf("%s:%s", vol.volumeSourceType, vol.volumeSourceName)
}
func getCanonicalNameFromEnvFromSource(envSrc *corev1.EnvFromSource) (string, error) {
if envSrc.ConfigMapRef != nil {
return fmt.Sprintf("%s:%s", ConfigMapVolumeSourceType, envSrc.ConfigMapRef.Name), nil
}
if envSrc.SecretRef != nil {
return fmt.Sprintf("%s:%s", SecretVolumeSourceType, envSrc.SecretRef.Name), nil
}
return "", fmt.Errorf("there is no ConfigMapRef or SecretRef in a EnvFromSource")
}
func (vol *volumeSourceInfo) createEnvFromSource() *corev1.EnvFromSource {
switch vol.volumeSourceType {
case ConfigMapVolumeSourceType:
return &corev1.EnvFromSource{
ConfigMapRef: &corev1.ConfigMapEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: vol.volumeSourceName,
}}}
case SecretVolumeSourceType:
return &corev1.EnvFromSource{
SecretRef: &corev1.SecretEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: vol.volumeSourceName,
}}}
}
return nil
}
// =======================================================================================
func existsVolumeNameInVolumes(volumeName string, volumes []corev1.Volume) bool {
for _, volume := range volumes {
if volume.Name == volumeName {
return true
}
}
return false
}
func existsVolumeNameInVolumeMounts(volumeName string, volumeMounts []corev1.VolumeMount) bool {
for _, volumeMount := range volumeMounts {
if volumeMount.Name == volumeName {
return true
}
}
return false
}
// =======================================================================================
func reviseVolumeInfoAndMountsToUpdate(mountsToUpdate *util.OrderedMap, volumesToUpdate *util.OrderedMap) (*util.OrderedMap, *util.OrderedMap, error) {
volumeSourceInfoByName := util.NewOrderedMap() //make(map[string]*volumeSourceInfo)
mountsToUpdateRevised := util.NewOrderedMap() //make(map[string]string)
it := mountsToUpdate.Iterator()
for path, value, ok := it.NextString(); ok; path, value, ok = it.NextString() {
// slices[0] -> config-map, cm, secret, sc, volume, or vo
// slices[1] -> secret, config-map, or volume name
slices := strings.SplitN(value, ":", 2)
if len(slices) == 1 {
mountsToUpdateRevised.Set(path, slices[0])
} else {
switch volumeType := slices[0]; volumeType {
case "config-map", "cm":
generatedName := util.GenerateVolumeName(path)
volumeSourceInfoByName.Set(generatedName, &volumeSourceInfo{
volumeSourceType: ConfigMapVolumeSourceType,
volumeSourceName: slices[1],
})
mountsToUpdateRevised.Set(path, generatedName)
case "secret", "sc":
generatedName := util.GenerateVolumeName(path)
volumeSourceInfoByName.Set(generatedName, &volumeSourceInfo{
volumeSourceType: SecretVolumeSourceType,
volumeSourceName: slices[1],
})
mountsToUpdateRevised.Set(path, generatedName)
default:
return nil, nil, fmt.Errorf("unsupported volume type \"%q\"; supported volume types are \"config-map or cm\", \"secret or sc\", and \"volume or vo\"", slices[0])
}
}
}
it = volumesToUpdate.Iterator()
for name, value, ok := it.NextString(); ok; name, value, ok = it.NextString() {
info, err := newVolumeSourceInfoWithSpecString(value)
if err != nil {
return nil, nil, err
}
volumeSourceInfoByName.Set(name, info)
}
return volumeSourceInfoByName, mountsToUpdateRevised, nil
}
func reviseVolumesToRemove(volumeMounts []corev1.VolumeMount, volumesToRemove []string, mountsToRemove []string) []string {
for _, pathToRemove := range mountsToRemove {
for _, volumeMount := range volumeMounts {
if volumeMount.MountPath == pathToRemove && volumeMount.Name == util.GenerateVolumeName(pathToRemove) {
volumesToRemove = append(volumesToRemove, volumeMount.Name)
}
}
}
return volumesToRemove
}
|
package search_test
import (
"fmt"
"testing"
"github.com/carolove/Golang/algorithms/datageneration"
"github.com/carolove/Golang/algorithms/search"
)
func TestChecksumSearch(t *testing.T) {
vec := datageneration.GenerationVector()
va, vb, isFound := search.ChecksumSearch(vec, 52)
if isFound {
fmt.Println(va, vb)
} else {
fmt.Println("not found")
}
va, vb, isFound = search.ChecksumSearch(vec, 578)
if isFound {
fmt.Println(va, vb)
} else {
fmt.Println("not found")
}
va, vb, isFound = search.ChecksumSearch(vec, 582)
if isFound {
fmt.Println(va, vb)
} else {
fmt.Println("not found")
}
}
|
package models
import (
"context"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
)
// 商品详情
type ProductDetail struct {
ComID int64 `json:"com_id" bson:"com_id"`
ProductID int64 `json:"product_id" bson:"product_id"`
ProductName string `json:"product_name" bson:"product_name"`
Attr string `json:"attr" bson:"attr"` // 商品属性 以json的方式保存
Discribe string `json:"discribe" bson:"discribe"` // 商品描述
/* DetailID int64 `json:"detail_id" bson:"detail_id"`
ProductName string `json:"product_name" bson:"product_name"`
Sales int64 `json:"sales" bson:"sales"` // 销量
Origin string `json:"origin" bson:"origin"` // 产地
Size string `json:"size" bson:"size"` // 规格
Unit int64 `json:"unit" bson:"unit"` // 商品单位
Weight float64 `json:"weight" bson:"weight"` // 重量
Packaging string `json:"packaging" bson:"packaging"` // 包装
Quality string `json:"quality" bson:"quality"` // 保质期
Storage string `json:"storage" bson:"storage"` // 贮存方式*/
}
func getProductDetailCollection() *mongo.Collection {
return Client.Collection("product_detail")
}
func GetProductDetailByID(com_id, product_id int64) (detail *ProductDetail, err error) {
filter := bson.M{}
filter["com_id"] = com_id
filter["product_id"] = product_id
err = getProductDetailCollection().FindOne(context.TODO(), filter).Decode(&detail)
if err != nil {
return nil, err
}
return
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package wmp
import (
"context"
"regexp"
"strings"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/apps"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/event"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: OverviewMode,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Checks that overview mode works correctly",
Contacts: []string{
"sammiequon@chromium.org",
"chromeos-wmp@google.com",
"chromeos-sw-engprod@google.com",
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
Params: []testing.Param{{
Fixture: "chromeLoggedIn",
Val: browser.TypeAsh,
}, {
Name: "lacros",
Fixture: "lacros",
ExtraSoftwareDeps: []string{"lacros"},
Val: browser.TypeLacros,
}},
})
}
func OverviewMode(ctx context.Context, s *testing.State) {
// Reserve five seconds for various cleanup.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
cr := s.FixtValue().(chrome.HasChrome).Chrome()
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create Test API connection: ", err)
}
cleanup, err := ash.EnsureTabletModeEnabled(ctx, tconn, false)
if err != nil {
s.Fatal("Failed to ensure clamshell mode: ", err)
}
defer cleanup(cleanupCtx)
defer faillog.DumpUITreeOnError(cleanupCtx, s.OutDir(), s.HasError, tconn)
// Ensure there is no window open before test starts.
if err := ash.CloseAllWindows(ctx, tconn); err != nil {
s.Fatal("Failed to ensure no window is open: ", err)
}
ac := uiauto.New(tconn)
bt := s.Param().(browser.Type)
browserApp, err := apps.PrimaryBrowser(ctx, tconn)
if err != nil {
s.Fatal("Could not find browser app info: ", err)
}
for _, app := range []apps.App{apps.FilesSWA, browserApp} {
if err := apps.Launch(ctx, tconn, app.ID); err != nil {
s.Fatalf("Failed to launch %s: %s", app.Name, err)
}
if err := ash.WaitForApp(ctx, tconn, app.ID, time.Minute); err != nil {
s.Fatalf("%s did not appear in shelf after launch: %s", app.Name, err)
}
if _, err := ash.WaitForAppWindow(ctx, tconn, app.ID); err != nil {
s.Fatalf("%s did not become visible: %s", app.Name, err)
}
}
// Set Chrome window's state to maximized and Files window's state to normal.
if err := ash.ForEachWindow(ctx, tconn, func(w *ash.Window) error {
if ash.BrowserTypeMatch(bt)(w) {
return ash.SetWindowStateAndWait(ctx, tconn, w.ID, ash.WindowStateMaximized)
}
if strings.Contains(w.Title, "Files") {
return ash.SetWindowStateAndWait(ctx, tconn, w.ID, ash.WindowStateNormal)
}
return nil
}); err != nil {
s.Fatal("Failed to set window states: ", err)
}
// Overview only animates the user visible windows for performance reasons.
// Here the Chrome window is maximized and completely occludes the Files window,
// so the expectation is that only the Chrome window animates.
var animationError error
go func() {
testing.Poll(ctx, func(ctx context.Context) error {
ws, err := ash.GetAllWindows(ctx, tconn)
if err != nil {
animationError = errors.Wrap(err, "failed to get the window list")
return testing.PollBreak(animationError)
}
for _, window := range ws {
if ash.BrowserTypeMatch(bt)(window) && !window.IsAnimating {
animationError = errors.New("chrome window is not animating")
return animationError
}
if strings.Contains(window.Title, "Files") && window.IsAnimating {
animationError = errors.New("files window is animating")
return animationError
}
}
animationError = nil
return nil
}, &testing.PollOptions{Timeout: time.Second, Interval: 50 * time.Millisecond})
}()
if err := ash.SetOverviewModeAndWait(ctx, tconn, true); err != nil {
s.Fatal("Failed to enter into the overview mode: ", err)
}
defer ash.SetOverviewModeAndWait(cleanupCtx, tconn, false)
if animationError != nil {
s.Fatal("Maximized and(or) normal windows didn't open in the overview as expected: ", animationError)
}
// Clicking the close button in overview should close the window.
chromeOverviewItemView := nodewith.NameRegex(regexp.MustCompile(".*New Tab")).ClassName("OverviewItemView")
closeChromeButton := nodewith.ClassName("CloseButton").Ancestor(chromeOverviewItemView)
if err := ac.LeftClick(closeChromeButton)(ctx); err != nil {
s.Fatal("Failed to close chrome window: ", err)
}
if err := ac.WithInterval(2*time.Second).WaitUntilNoEvent(nodewith.Root(), event.LocationChanged)(ctx); err != nil {
s.Fatal("Failed to wait for location-change events to be completed: ", err)
}
ws, err := ash.GetAllWindows(ctx, tconn)
if err != nil {
s.Fatal("Failed to get the window list: ", err)
}
if len(ws) != 1 {
s.Fatalf("Expected 1 window, got %v window(s)", len(ws))
}
if ash.BrowserTypeMatch(bt)(ws[0]) {
s.Fatal("Chrome window still exists after closing it in overview")
}
}
|
package nsqd
import (
"bytes"
"container/heap"
"errors"
"math"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nsqio/go-diskqueue"
"github.com/nsqio/nsq/internal/lg"
"github.com/nsqio/nsq/internal/pqueue"
"github.com/nsqio/nsq/internal/quantile"
)
type Consumer interface {
UnPause()
Pause()
Close() error
TimedOutMessage()
Stats() ClientStats
Empty()
}
// Channel represents the concrete type for a NSQ channel (and also
// implements the Queue interface)
//
// There can be multiple channels per topic, each with there own unique set
// of subscribers (clients).
//
// Channels maintain all client and message metadata, orchestrating in-flight
// messages, timeouts, requeuing, etc.
type Channel struct {
// 64bit atomic vars need to be first for proper alignment on 32bit platforms
requeueCount uint64 // 需要重新排队的消息数
messageCount uint64 // 接收到的消息的总数
timeoutCount uint64 // 正在发送的消息的数量
sync.RWMutex // guards
topicName string // 其所对应的 topic 名称
name string // channel 名称
ctx *context // nsqd 实例
backend BackendQueue // 后端消息持久化的队列
// 内存消息通道。 其关联的 topic 会向此 channel 发送消息,且所有订阅的 client 会开启一个 go routine 订阅此 channel
memoryMsgChan chan *Message
exitFlag int32 // 退出标识(同 topic 的 exitFlag 作用类似)
exitMutex sync.RWMutex
// state tracking
clients map[int64]Consumer // 与此 channel 相联的 client 集合,准确而言是 Consumer 集合
paused int32 // 若其 paused属性被设置,则那些订阅了此`channel`的客户端不会被推送消息
ephemeral bool // 标记此 channel 是否是临时的
deleteCallback func(*Channel) // 删除回调函数(同 topic 的 deleteCallback 作用类似)
deleter sync.Once
// Stats tracking
e2eProcessingLatencyStream *quantile.Quantile
// TODO: these can be DRYd up
// 延迟投递消息集合,消息体会放入 deferredPQ,并且由后台的queueScanLoop协程来扫描消息
// 将过期的消息照常使用 c.put(msg) 发送出去。
deferredMessages map[MessageID]*pqueue.Item
deferredPQ pqueue.PriorityQueue // 被延迟投递消息集合 对应的 优先级队列 PriorityQueue
deferredMutex sync.Mutex // guards deferredMessages
// 正在发送中的消息记录集合,直到收到客户端的 FIN 才删除,否则一旦超过 timeout,则重传消息。
// (因此client需要对消息做去重处理 de-duplicate)
inFlightMessages map[MessageID]*Message
inFlightPQ inFlightPqueue // 正在发送中的消息记录集合 对应的 inFlightPqueue
inFlightMutex sync.Mutex // guards inFlightMessages
}
// NewChannel creates a new instance of the Channel type and returns a pointer
// channel 构造函数
func NewChannel(topicName string, channelName string, ctx *context,
deleteCallback func(*Channel)) *Channel {
// 1. 初始化 channel 部分参数
c := &Channel{
topicName: topicName,
name: channelName,
memoryMsgChan: make(chan *Message, ctx.nsqd.getOpts().MemQueueSize),
clients: make(map[int64]Consumer),
deleteCallback: deleteCallback,
ctx: ctx,
}
if len(ctx.nsqd.getOpts().E2EProcessingLatencyPercentiles) > 0 {
c.e2eProcessingLatencyStream = quantile.New(
ctx.nsqd.getOpts().E2EProcessingLatencyWindowTime,
ctx.nsqd.getOpts().E2EProcessingLatencyPercentiles,
)
}
// 2. 初始化 channel 维护的两个消息队列
c.initPQ()
// 3. 同 topic 类似,那些 ephemeral 类型的 channel 不会关联到一个 BackendQueue,而只是被赋予了一个 dummy BackendQueue
if strings.HasSuffix(channelName, "#ephemeral") {
c.ephemeral = true
c.backend = newDummyBackendQueue()
} else {
dqLogf := func(level diskqueue.LogLevel, f string, args ...interface{}) {
opts := ctx.nsqd.getOpts()
lg.Logf(opts.Logger, opts.LogLevel, lg.LogLevel(level), f, args...)
}
// backend names, for uniqueness, automatically include the topic...
// 4. 实例化一个后端持久化存储,同样是通过 go-diskqueue 来创建的,其初始化参数同 topic 中实例化 backendQueue 参数类似
backendName := getBackendName(topicName, channelName)
c.backend = diskqueue.New(
backendName,
ctx.nsqd.getOpts().DataPath,
ctx.nsqd.getOpts().MaxBytesPerFile,
int32(minValidMsgLength),
int32(ctx.nsqd.getOpts().MaxMsgSize)+minValidMsgLength,
ctx.nsqd.getOpts().SyncEvery,
ctx.nsqd.getOpts().SyncTimeout,
dqLogf,
)
}
// 5. 通知 lookupd 添加注册信息
c.ctx.nsqd.Notify(c)
return c
}
// in-flight queue 及 deferred queue 初始化
func (c *Channel) initPQ() {
// 默认队列大小为 MemQueueSize/10
pqSize := int(math.Max(1, float64(c.ctx.nsqd.getOpts().MemQueueSize)/10))
c.inFlightMutex.Lock()
c.inFlightMessages = make(map[MessageID]*Message)
c.inFlightPQ = newInFlightPqueue(pqSize)
c.inFlightMutex.Unlock()
c.deferredMutex.Lock()
c.deferredMessages = make(map[MessageID]*pqueue.Item)
c.deferredPQ = pqueue.New(pqSize)
c.deferredMutex.Unlock()
}
// Exiting returns a boolean indicating if this channel is closed/exiting
func (c *Channel) Exiting() bool {
return atomic.LoadInt32(&c.exitFlag) == 1
}
// Delete empties the channel and closes
// 删除此 channel,清空所有消息,然后关闭
func (c *Channel) Delete() error {
return c.exit(true)
}
// Close cleanly closes the Channel
// 只是将三个消息队列中的消息刷盘,然后关闭
func (c *Channel) Close() error {
return c.exit(false)
}
//
func (c *Channel) exit(deleted bool) error {
c.exitMutex.Lock()
defer c.exitMutex.Unlock()
// 1. 保证还未被设置 exitFlag,即还在运行中,同时设置 exitFlag
if !atomic.CompareAndSwapInt32(&c.exitFlag, 0, 1) {
return errors.New("exiting")
}
// 2. 若需要删除数据,则通知 nsqlookupd,有 channel 被删除
if deleted {
c.ctx.nsqd.logf(LOG_INFO, "CHANNEL(%s): deleting", c.name)
c.ctx.nsqd.Notify(c)
} else {
c.ctx.nsqd.logf(LOG_INFO, "CHANNEL(%s): closing", c.name)
}
// this forceably closes client connections
c.RLock()
// 3. 强制关闭所有订阅了此 channel 的客户端
for _, client := range c.clients {
client.Close()
}
c.RUnlock()
// 4. 清空此 channel 所维护的内存消息队列和持久化存储消息队列中的消息
if deleted {
// empty the queue (deletes the backend files, too)
c.Empty()
// 5. 删除持久化存储消息队列中的消息
return c.backend.Delete()
}
// write anything leftover to disk
// 6. 强制将内存消息队列、以及两个发送消息优先级队列中的消息写到持久化存储中
c.flush()
// 7. 关闭持久化存储消息队列
return c.backend.Close()
}
// 清空 channel 的消息
func (c *Channel) Empty() error {
c.Lock()
defer c.Unlock()
// 1. 重新初始化(清空) in-flight queue 及 deferred queue
c.initPQ()
// 2. 清空由 channel 为客户端维护的一些信息,比如 当前正在发送的消息的数量 InFlightCount
// 同时更新了 ReadyStateChan
for _, client := range c.clients {
client.Empty()
}
// 3. 将 memoryMsgChan 中的消息清空
for {
select {
case <-c.memoryMsgChan:
default:
goto finish
}
}
// 4. 最后将后端持久化存储中的消息清空
finish:
return c.backend.Empty()
}
// flush persists all the messages in internal memory buffers to the backend
// it does not drain inflight/deferred because it is only called in Close()
// 将未消费的消息都写到持久化存储中,主要包括三个消息集合:memoryMsgChan、inFlightMessages和deferredMessages
func (c *Channel) flush() error {
var msgBuf bytes.Buffer
if len(c.memoryMsgChan) > 0 || len(c.inFlightMessages) > 0 || len(c.deferredMessages) > 0 {
c.ctx.nsqd.logf(LOG_INFO, "CHANNEL(%s): flushing %d memory %d in-flight %d deferred messages to backend",
c.name, len(c.memoryMsgChan), len(c.inFlightMessages), len(c.deferredMessages))
}
// 1. 将内存消息队列中的积压的消息刷盘
for {
select {
case msg := <-c.memoryMsgChan:
err := writeMessageToBackend(&msgBuf, msg, c.backend)
if err != nil {
c.ctx.nsqd.logf(LOG_ERROR, "failed to write message to backend - %s", err)
}
default:
goto finish
}
}
// 2. 将还未发送出去的消息 inFlightMessages 也写到持久化存储
finish:
c.inFlightMutex.Lock()
for _, msg := range c.inFlightMessages {
err := writeMessageToBackend(&msgBuf, msg, c.backend)
if err != nil {
c.ctx.nsqd.logf(LOG_ERROR, "failed to write message to backend - %s", err)
}
}
c.inFlightMutex.Unlock()
// 3. 将被推迟发送的消息集合中的 deferredMessages 消息也到持久化存储
c.deferredMutex.Lock()
for _, item := range c.deferredMessages {
msg := item.Value.(*Message)
err := writeMessageToBackend(&msgBuf, msg, c.backend)
if err != nil {
c.ctx.nsqd.logf(LOG_ERROR, "failed to write message to backend - %s", err)
}
}
c.deferredMutex.Unlock()
return nil
}
func (c *Channel) Depth() int64 {
return int64(len(c.memoryMsgChan)) + c.backend.Depth()
}
func (c *Channel) Pause() error {
return c.doPause(true)
}
func (c *Channel) UnPause() error {
return c.doPause(false)
}
func (c *Channel) doPause(pause bool) error {
if pause {
atomic.StoreInt32(&c.paused, 1)
} else {
atomic.StoreInt32(&c.paused, 0)
}
c.RLock()
for _, client := range c.clients {
if pause {
client.Pause()
} else {
client.UnPause()
}
}
c.RUnlock()
return nil
}
func (c *Channel) IsPaused() bool {
return atomic.LoadInt32(&c.paused) == 1
}
// PutMessage writes a Message to the queue
// 此方法会由 topic.messagePump 方法中调用。
// 即当 topic 收到生产者投递的消息时,将此消息放到与其关联的 channels 的延迟队列 deferred queue
// 或者 普通的消息队列中(包括 内存消息队列 memoryMsgChan 或 后端持久化 backend)(即此方法)
// channel 调用 put 方法将消息放到消息队列中,同时更新消息计数
func (c *Channel) PutMessage(m *Message) error {
c.RLock()
defer c.RUnlock()
if c.Exiting() {
return errors.New("exiting")
}
err := c.put(m)
if err != nil {
return err
}
atomic.AddUint64(&c.messageCount, 1)
return nil
}
// 同 topic.put 方法类似,其在 put message 时,依据实际情况将消息 push 到内在队列 memoryMsgChan 或者后端持久化 backend
func (c *Channel) put(m *Message) error {
select {
case c.memoryMsgChan <- m:
default:
b := bufferPoolGet()
err := writeMessageToBackend(b, m, c.backend)
bufferPoolPut(b)
c.ctx.nsqd.SetHealth(err)
if err != nil {
c.ctx.nsqd.logf(LOG_ERROR, "CHANNEL(%s): failed to write message to backend - %s",
c.name, err)
return err
}
}
return nil
}
// 将 message 添加到 deferred queue 中
func (c *Channel) PutMessageDeferred(msg *Message, timeout time.Duration) {
atomic.AddUint64(&c.messageCount, 1)
c.StartDeferredTimeout(msg, timeout)
}
// TouchMessage resets the timeout for an in-flight message
// 重置正在发送的消息的超时时间,即将消息从 inFlightMessages 及 in-flight queue 中弹出
// 然后更新其超时时间为指定的超时时间,再将消息压入到两个集合中
func (c *Channel) TouchMessage(clientID int64, id MessageID, clientMsgTimeout time.Duration) error {
msg, err := c.popInFlightMessage(clientID, id)
if err != nil {
return err
}
c.removeFromInFlightPQ(msg)
newTimeout := time.Now().Add(clientMsgTimeout)
if newTimeout.Sub(msg.deliveryTS) >=
c.ctx.nsqd.getOpts().MaxMsgTimeout {
// we would have gone over, set to the max
newTimeout = msg.deliveryTS.Add(c.ctx.nsqd.getOpts().MaxMsgTimeout)
}
msg.pri = newTimeout.UnixNano()
err = c.pushInFlightMessage(msg)
if err != nil {
return err
}
c.addToInFlightPQ(msg)
return nil
}
// FinishMessage successfully discards an in-flight message
// 丢弃 in-flight 中指定的消息,因为此消息已经被消费者成功消费
// 即将消息从 channel 的 in-flight queue 及 inFlightMessages 字典中移除
func (c *Channel) FinishMessage(clientID int64, id MessageID) error {
msg, err := c.popInFlightMessage(clientID, id)
if err != nil {
return err
}
c.removeFromInFlightPQ(msg)
// 同时,记录此次消费的延迟情况
if c.e2eProcessingLatencyStream != nil {
c.e2eProcessingLatencyStream.Insert(msg.Timestamp)
}
return nil
}
// RequeueMessage requeues a message based on `time.Duration`, ie:
//
// `timeoutMs` == 0 - requeue a message immediately
// `timeoutMs` > 0 - asynchronously wait for the specified timeout
// and requeue a message (aka "deferred requeue")
//
// 将消息重新入队。这与 timeout 参数密切相关。
// 当 timeout == 0 时,直接将此消息重入队。否则,异步等待此消息超时,然后 再将此消息重入队,即是相当于消息被延迟了
func (c *Channel) RequeueMessage(clientID int64, id MessageID, timeout time.Duration) error {
// 1. 先将消息从 inFlightMessages 移除
msg, err := c.popInFlightMessage(clientID, id)
if err != nil {
return err
}
// 2. 同时将消息从 in-flight queue 中移除,并更新 chanel 维护的消息重入队数量 requeueCount
c.removeFromInFlightPQ(msg)
atomic.AddUint64(&c.requeueCount, 1)
// 3. 若 timeout 为0,则将消息重新入队。即调用 channel.put 方法,将消息添加到 memoryMsgChan 或 backend
if timeout == 0 {
c.exitMutex.RLock()
if c.Exiting() {
c.exitMutex.RUnlock()
return errors.New("exiting")
}
err := c.put(msg)
c.exitMutex.RUnlock()
return err
}
// deferred requeue
// 否则,创建一个延迟消息,并设置延迟时间
return c.StartDeferredTimeout(msg, timeout)
}
// AddClient adds a client to the Channel's client list
// 添加一个 client 到 channel 的 client 列表。
// client 使用 tcp协议发送一个 SUB 命令请求。
func (c *Channel) AddClient(clientID int64, client Consumer) error {
c.Lock()
defer c.Unlock()
_, ok := c.clients[clientID]
if ok {
return nil
}
// 检查是否超过了最大的 client 的数量了
maxChannelConsumers := c.ctx.nsqd.getOpts().MaxChannelConsumers
if maxChannelConsumers != 0 && len(c.clients) >= maxChannelConsumers {
return errors.New("E_TOO_MANY_CHANNEL_CONSUMERS")
}
c.clients[clientID] = client
return nil
}
// RemoveClient removes a client from the Channel's client list
// 从channel 的 client 列表移除一个 client。
// 当一个 ephemeral 的 channel的所有的 client 全部都被移除后,则其也会被删除(对于 ephemeral属性的 topic 也是类似)
func (c *Channel) RemoveClient(clientID int64) {
c.Lock()
defer c.Unlock()
_, ok := c.clients[clientID]
if !ok {
return
}
delete(c.clients, clientID)
if len(c.clients) == 0 && c.ephemeral == true {
go c.deleter.Do(func() { c.deleteCallback(c) })
}
}
// 设置 in-flight message 计时属性,同时将此 message 加入到 in-flight queue 中,等待被 queueScanWorker 处理
func (c *Channel) StartInFlightTimeout(msg *Message, clientID int64, timeout time.Duration) error {
now := time.Now()
msg.clientID = clientID
// 1. 设置 message 属性,特别是 message.pri 即为当前消息处理时间的 deadline,超过此时间,则不再处理
msg.deliveryTS = now
msg.pri = now.Add(timeout).UnixNano()
// 2. 将 message 添加到 inFlightMessages 字典
err := c.pushInFlightMessage(msg)
if err != nil {
return err
}
// 3. 将消息添加到 in-flight 优先级队列中
c.addToInFlightPQ(msg)
return nil
}
// 将 message 加入到 deferred queue 中,等待被 queueScanWorker 处理
func (c *Channel) StartDeferredTimeout(msg *Message, timeout time.Duration) error {
// 1. 计算超时超时戳,作为 Priority
absTs := time.Now().Add(timeout).UnixNano()
// 2. 构造 item
item := &pqueue.Item{Value: msg, Priority: absTs}
// 3. item 添加到 deferred 字典
err := c.pushDeferredMessage(item)
if err != nil {
return err
}
// 4. 将 item 放入到 deferred message 优先级队列
c.addToDeferredPQ(item)
return nil
}
// pushInFlightMessage atomically adds a message to the in-flight dictionary
// 将 message 添加到 in-flight 字典 < msg.ID, message>
func (c *Channel) pushInFlightMessage(msg *Message) error {
c.inFlightMutex.Lock()
_, ok := c.inFlightMessages[msg.ID]
if ok {
c.inFlightMutex.Unlock()
return errors.New("ID already in flight")
}
c.inFlightMessages[msg.ID] = msg
c.inFlightMutex.Unlock()
return nil
}
// popInFlightMessage atomically removes a message from the in-flight dictionary
// 将 message 从 inFlightMessages 字典中移除,并且需要考虑消息的 client ID 是否 match
func (c *Channel) popInFlightMessage(clientID int64, id MessageID) (*Message, error) {
c.inFlightMutex.Lock()
msg, ok := c.inFlightMessages[id]
if !ok {
c.inFlightMutex.Unlock()
return nil, errors.New("ID not in flight")
}
if msg.clientID != clientID {
c.inFlightMutex.Unlock()
return nil, errors.New("client does not own message")
}
delete(c.inFlightMessages, id)
c.inFlightMutex.Unlock()
return msg, nil
}
func (c *Channel) addToInFlightPQ(msg *Message) {
c.inFlightMutex.Lock()
c.inFlightPQ.Push(msg)
c.inFlightMutex.Unlock()
}
func (c *Channel) removeFromInFlightPQ(msg *Message) {
c.inFlightMutex.Lock()
if msg.index == -1 {
// this item has already been popped off the pqueue
c.inFlightMutex.Unlock()
return
}
c.inFlightPQ.Remove(msg.index)
c.inFlightMutex.Unlock()
}
// 将 item/message 添加到 deferredMessages 字典
func (c *Channel) pushDeferredMessage(item *pqueue.Item) error {
c.deferredMutex.Lock()
// TODO: these map lookups are costly
id := item.Value.(*Message).ID
_, ok := c.deferredMessages[id]
if ok {
c.deferredMutex.Unlock()
return errors.New("ID already deferred")
}
c.deferredMessages[id] = item
c.deferredMutex.Unlock()
return nil
}
// 将 item/message 从 deferredMessages 字典中删除
func (c *Channel) popDeferredMessage(id MessageID) (*pqueue.Item, error) {
c.deferredMutex.Lock()
// TODO: these map lookups are costly
item, ok := c.deferredMessages[id]
if !ok {
c.deferredMutex.Unlock()
return nil, errors.New("ID not deferred")
}
delete(c.deferredMessages, id)
c.deferredMutex.Unlock()
return item, nil
}
func (c *Channel) addToDeferredPQ(item *pqueue.Item) {
c.deferredMutex.Lock()
heap.Push(&c.deferredPQ, item)
c.deferredMutex.Unlock()
}
// queueScanWorker 循环处理 deferred queue
func (c *Channel) processDeferredQueue(t int64) bool {
// 1. 保证 channel 未退出,即正常工作中
c.exitMutex.RLock()
defer c.exitMutex.RUnlock()
if c.Exiting() {
return false
}
// 2. 循环查询队列中是否有消息达到了延迟时间,需要被处理
dirty := false
for {
// 2.1 从队列中弹出最早被处理的消息,即堆顶元素(依据 Message.priority)
// 若堆顶元素 deadline 未到(即 item.Priority > t)则返回空
// 注意 item.Priority 表示的是延迟的时间,并非消息处理的 deadline
// 表明此延迟消息的延迟时间还未走完(还需要继续延时),因此不能将它放入到
c.deferredMutex.Lock()
item, _ := c.deferredPQ.PeekAndShift(t)
c.deferredMutex.Unlock()
if item == nil {
goto exit
}
dirty = true
// 2.2 否则,表明此消息已经达到了延时处理的 deadline了(即可以开始投递给客户端了),
// 因此需要从 deferred message 字典中删除对应的消息,将其重新添加到 channel 的消息队列中
// 这里啰嗦一句,此消息是在 topic.messagePum 主循环中被 push 到 channel.deferredPQ 中的
// 因此,延时时间已到,需要将其重新加入到正常的消息发送队列 channel.inflightPQ(经过 memoryMsgChan 管道传递)
msg := item.Value.(*Message)
_, err := c.popDeferredMessage(msg.ID)
if err != nil {
goto exit
}
// 2.3 将消息添加到内存队列 memoryMsgChan 或者后端持久化存储中 backend
c.put(msg)
}
exit:
return dirty
}
// queueScanWorker 循环处理 in-flight queue
func (c *Channel) processInFlightQueue(t int64) bool {
// 1. 先上一把锁,防止在处理 in-flight queue 时, channel 被关闭退出
c.exitMutex.RLock()
defer c.exitMutex.RUnlock()
// 2. 若当前 channel 已经设置了 exitFlag 标记,则放弃处理,直接退出
if c.Exiting() {
return false
}
dirty := false
for {
// 3. 弹出 channel.inFlightPQ 队列首元素,并且若队首元素的 pri 大于当前的时间戳,
// 则表明此消息未过期,即仍在其处理的 deadline 期间,不用被移除,因此,返回队列是干净的。
// 即没有任何过时的消息
c.inFlightMutex.Lock()
msg, _ := c.inFlightPQ.PeekAndShift(t)
c.inFlightMutex.Unlock()
if msg == nil {
goto exit
}
dirty = true
// 4. 否则,说明有需要删除的消息,则将消息从 channel 维护的消息字典 channel.inFlightMessages 中删除
// 因为此消息的处理时间已经到了,换言之,此消息被客户端处理超时了
_, err := c.popInFlightMessage(msg.clientID, msg.ID)
if err != nil {
goto exit
}
// 5. 增加处理超时的消息的计数 timeoutCount
atomic.AddUint64(&c.timeoutCount, 1)
c.RLock()
client, ok := c.clients[msg.clientID]
c.RUnlock()
if ok {
// 6. 修改对应处理超时的客户端的属性,如 减少 InFlightCount 数量
client.TimedOutMessage()
}
// 7. 然后重新将消息 put 到内存队列或持久化存储中,等待后续被重新投递
// 后面重新投递时,会更新此消息的属性,比如处理超时时间,
// 甚至会重新发送给订阅了此 channel 的另外一个客户端
c.put(msg)
}
exit:
return dirty
}
|
// Copyright 2020 Liquidata, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package async
import "sync"
// WaitGroup functions similarly to sync.WaitGroup that ships with Go, with the key difference being that this one
// allows calls to Add with a positive delta to occur while another thread is waiting, while the sync version
// may panic. The tradeoff is a performance reduction since we now lock on all modifications to the counter.
type WaitGroup struct {
counter int64 // we allow negative counters and don't panic on them, as it could be useful for the caller
initOnce sync.Once
syncCond *sync.Cond
}
// Add adds delta, which may be negative, to the WaitGroup counter. If the counter becomes zero, all goroutines blocked
// on Wait are released. If the counter goes negative, Add panics.
func (wg *WaitGroup) Add(delta int64) {
wg.init()
wg.syncCond.L.Lock()
defer wg.syncCond.L.Unlock()
wg.counter += delta
if wg.counter < 0 {
panic("negative WaitGroup counter")
} else if wg.counter == 0 {
wg.syncCond.Broadcast()
}
}
// Done decrements the WaitGroup counter by one.
func (wg *WaitGroup) Done() {
wg.Add(-1)
}
// Wait blocks until the WaitGroup counter is less than or equal to zero.
func (wg *WaitGroup) Wait() {
wg.init()
wg.syncCond.L.Lock()
defer wg.syncCond.L.Unlock()
for wg.counter > 0 {
wg.syncCond.Wait()
}
}
// sync.WaitGroup allows the user to use the zero value of a wait group with &sync.WaitGroup{}. Since this is supposed
// to be a drop-in replacement, the user would expect to call &async.WaitGroup{}. Since we need some setup, we make use
// of sync.Once to run that setup the first time the wait group is used.
func (wg *WaitGroup) init() {
wg.initOnce.Do(func() {
wg.syncCond = sync.NewCond(&sync.Mutex{})
})
}
|
package gokafka
import "testing"
func TestGetmetadata(t *testing.T) {
_, err := GetMetaData("kafka.test:9092", "test", 0, "gokafka")
if err != nil {
t.Error("could not get metadata of topic(test) from server(kafka.test)")
t.Error(err)
}
_, err = GetMetaData("kafka.test:80", "test", 0, "gokafka")
if err != nil {
t.Log("could not get metadata of topic(test) from 80 port")
}
}
func TestGetoffset(t *testing.T) {
_, err := GetOffset("kafka.test:9092", "test", 0, 0, "gokafka", -1, 1)
if err != nil {
t.Error("could not get metadata of topic(test) from server(kafka.test)")
t.Error(err)
}
_, err = GetMetaData("kafka.test:80", "test", 0, "gokafka")
if err != nil {
t.Log("could not get metadata of topic(test) from 80 port")
}
}
|
package server
// NewHerokuServer - create new Heroku Server with confguration
func NewHerokuServer(params *Config) (server *Server) {
server = &Server{
Host: params.Host,
Port: params.Port,
Single: params.Single,
Stats: Stats{},
pool: make(ProxyPackMap),
spaceSignal: make(SpaceSignal),
}
server.tcp = NewHerokuTCPServer(server)
server.web = NewHerokuWEBServer(server)
// server.web = NewWEBServer(server)
return
}
|
package main
import "fmt"
func main() {
month := 6
s := Season(month)
fmt.Printf("this month is %s", s)
}
func Season(m int) (s string) {
switch m {
case 3, 4, 5:
s = "Spring"
case 6, 7, 8:
s = "Summer"
case 9, 10, 11:
s = "Fall"
case 12, 1, 2:
s = "Winter"
}
return s
}
|
package main
import "fmt"
func sortColors(nums []int) {
right := len(nums) - 1
left := 0
current := 0
for current < right {
if nums[current] == 0 {
nums[current], nums[left] = nums[left], nums[current]
left++
current++
} else if nums[current] == 2 {
nums[current], nums[right] = nums[right], nums[current]
current++
right--
} else {
current++
}
}
// fmt.Println(nums)
}
func main() {
a := []int{2, 0, 2, 1, 1, 0}
sortColors(a)
}
|
package utils
import (
"fmt"
"holdempoker/models"
"strconv"
"strings"
"github.com/bradfitz/slice"
)
//PokerHandUtil 포커 족보 유틸
type PokerHandUtil struct {
hands []interface{}
}
// CheckHands 족보를 체크한다.
func (p *PokerHandUtil) CheckHands(cards []int) models.HandResult {
var result models.HandResult
var funcRef interface{}
p.SetHands()
for i := len(p.hands) - 1; i >= 0; i-- {
funcRef = p.hands[i]
result = funcRef.(func([]int) models.HandResult)(cards)
fmt.Println(result.HandType)
if result.HandType != models.HandTypeNone {
break
}
}
return result
}
//SetHands 족보체크 함수 추가
func (p *PokerHandUtil) SetHands() {
p.hands = make([]interface{}, 10, 20)
p.hands[models.HandTypeRoyalStraightFlush] = p.CheckRoyalStraightFlush
p.hands[models.HandTypeStraightFlush] = p.CheckStraightFlush
p.hands[models.HandTypePoker] = p.CheckPoker
p.hands[models.HandTypeFullHouse] = p.CheckFullHouse
p.hands[models.HandTypeFlush] = p.CheckFlush
p.hands[models.HandTypeStrait] = p.CheckStraight
p.hands[models.HandTypeTriple] = p.CheckTriple
p.hands[models.HandTypeTwoPair] = p.CheckTwoPairs
p.hands[models.HandTypeOnePair] = p.CheckOnePair
p.hands[models.HandTypeTitle] = p.CheckTitle
}
// CheckRoyalStraightFlush 로얄스트레이트 플러쉬
func (p *PokerHandUtil) CheckRoyalStraightFlush(cards []int) models.HandResult {
result := models.NewHandResult()
handsCount := 4
cardsCount := 5
hands := [4][5]int{
{0, 9, 10, 11, 12},
{13, 22, 23, 24, 25},
{26, 35, 36, 37, 38},
{39, 48, 49, 50, 51}}
matchCount := 0
compare1 := -1
compare2 := -1
for i := 0; i < handsCount; i++ {
matchCount = 0
result.InitializeMadeCard()
for k := 0; k < cardsCount; k++ {
for j := 0; j < len(cards); j++ {
compare1 = hands[i][k]
compare2 = cards[j]
if compare2 == -1 {
continue
}
if compare1 == compare2 {
matchCount++
result.AddMadeCard(compare1)
}
}
}
if matchCount == cardsCount {
result.HandType = models.HandTypeRoyalStraightFlush
result.CardType = i + 1
break
}
matchCount = 0
}
return result
}
// CheckStraightFlush 스트레이트 플러쉬
func (p *PokerHandUtil) CheckStraightFlush(cards []int) models.HandResult {
result := models.NewHandResult()
handsCount := 36
cardsCount := 5
hands := [36][5]int{
{0, 1, 2, 3, 4},
{1, 2, 3, 4, 5},
{2, 3, 4, 5, 6},
{3, 4, 5, 6, 7},
{4, 5, 6, 7, 8},
{5, 6, 7, 8, 9},
{6, 7, 8, 9, 10},
{7, 8, 9, 10, 11},
{8, 9, 10, 11, 12},
{13, 14, 15, 16, 17},
{14, 15, 16, 17, 18},
{15, 16, 17, 18, 19},
{16, 17, 18, 19, 20},
{17, 18, 19, 20, 21},
{18, 19, 20, 21, 22},
{19, 20, 21, 22, 23},
{20, 21, 22, 23, 24},
{21, 22, 23, 24, 25},
{26, 27, 28, 29, 30},
{27, 28, 29, 30, 31},
{28, 29, 30, 31, 32},
{29, 30, 31, 32, 33},
{30, 31, 32, 33, 34},
{31, 32, 33, 34, 35},
{32, 33, 34, 35, 36},
{33, 34, 35, 36, 37},
{34, 35, 36, 37, 38},
{39, 40, 41, 42, 43},
{40, 41, 42, 43, 44},
{41, 42, 43, 44, 45},
{42, 43, 44, 45, 46},
{43, 44, 45, 46, 47},
{44, 45, 46, 47, 48},
{45, 46, 47, 48, 49},
{46, 47, 48, 49, 50},
{47, 48, 49, 50, 51}}
matchCount := 0
compare1 := -1
compare2 := -1
for i := 0; i < handsCount; i++ {
matchCount = 0
result.InitializeMadeCard()
for k := 0; k < cardsCount; k++ {
for j := 0; j < len(cards); j++ {
compare1 = hands[i][k]
compare2 = cards[j]
if compare2 == -1 {
continue
}
if compare1 == compare2 {
matchCount++
result.AddMadeCard(compare1)
}
}
}
if matchCount == cardsCount {
result.HandType = models.HandTypeStraightFlush
result.CardType = ((i-(i%9))/9 + 1)
result.Hands[0] = 4 + (i % 9)
break
}
matchCount = 0
}
return result
}
// CheckPoker 포카드
func (p *PokerHandUtil) CheckPoker(cards []int) models.HandResult {
result := models.NewHandResult()
matchCount := 0
matchIndex := 0
i := 0
for i = 0; i < len(cards); i++ {
matchCount = 0
for j := 0; j < len(cards); j++ {
if cards[j] == -1 {
continue
}
if cards[i]%13 == cards[j]%13 {
if matchCount == 3 {
matchIndex = cards[i] % 13
result.AddMadeCard(cards[i])
}
matchCount++
}
}
if matchCount == 4 {
result.HandType = models.HandTypePoker
if matchIndex == 0 {
matchIndex = 13
} else {
result.Hands[0] = matchIndex
}
}
}
return result
}
// CheckFullHouse 풀하우스
func (p *PokerHandUtil) CheckFullHouse(cards []int) models.HandResult {
result := models.NewHandResult()
i := 0
j := 0
hasTriple := false
matchCount := 0
matchIndex1 := 0
//matchIndex2 := 0
isFullHouse := false
for i = 0; i < len(cards); i++ {
matchCount = 0
for j = 0; j < len(cards); j++ {
if cards[j] == -1 {
continue
}
if cards[i]%13 == cards[j]%13 {
if matchCount == 2 {
matchIndex1 = cards[i] % 13
result.AddMadeCard(cards[i])
}
matchCount++
}
}
if matchCount >= 3 {
hasTriple = true
}
}
if hasTriple == true {
for i = 0; i < len(cards); i++ {
matchCount = 0
for j = 0; j < len(cards); j++ {
if cards[j] == -1 {
continue
}
if cards[i]%13 == cards[j]%13 && cards[i]%13 != matchIndex1 {
if matchCount == 1 {
//matchIndex2 = cards[i] % 13
result.AddMadeCard(cards[i])
}
matchCount++
}
}
if matchCount >= 2 {
isFullHouse = true
result.HandType = models.HandTypeFullHouse
}
}
}
if isFullHouse == true {
hand1 := -100
hand2 := -100
for i = 0; i < len(cards); i++ {
matchCount = 0
matchIndex1 = -1
for j = 0; j < len(cards); j++ {
if cards[j] == -1 {
continue
}
if cards[i]%13 == cards[j]%13 {
if matchCount == 1 {
matchIndex1 = cards[i] % 13
if matchIndex1 == 0 {
matchIndex1 = 13
}
}
matchCount++
}
if matchIndex1 == -1 {
continue
}
if matchCount == 2 {
// 페어일떄는 가장큰 숫자로 대체
if matchIndex1 > hand2 {
hand2 = matchIndex1
}
} else if matchCount == 3 {
// 트리플일때는 트리플 값을 세팅
if matchIndex1 > hand1 {
hand1 = matchIndex1
} else {
if matchIndex1 > hand2 {
hand2 = matchIndex1
}
}
}
}
}
result.Hands[0] = hand1
result.Hands[1] = hand2
}
return result
}
// CheckFlush 풀러쉬
func (p *PokerHandUtil) CheckFlush(cards []int) models.HandResult {
result := models.NewHandResult()
i := 0
j := 0
matchCount := 0
count := 0
matchType := models.CardTypeNone
for i = 0; i < len(cards); i++ {
matchCount = 0
for j = 0; j < len(cards); j++ {
if cards[j] == -1 {
continue
}
if GetCardType(cards[i]) == GetCardType(cards[j]) {
if matchCount == 4 {
matchType = GetCardType(cards[i])
result.AddMadeCard(cards[i])
result.AddMadeCard(cards[j])
}
matchCount++
}
}
if matchCount >= 5 {
result.HandType = models.HandTypeFlush
result.CardType = matchType
count = 0
for j = 0; j < len(cards); j++ {
if GetCardType(cards[j]) == matchType {
result.Hands[count] = cards[j] % 13
if result.Hands[count] == 0 {
result.Hands[count] = 13
}
count++
}
}
// 정렬
slice.Sort(result.Hands[:], func(i, j int) bool {
return result.Hands[i] < result.Hands[j]
})
// reverse
for i := len(result.Hands)/2 - 1; i >= 0; i-- {
opp := len(result.Hands) - 1 - i
result.Hands[i], result.Hands[opp] = result.Hands[opp], result.Hands[i]
}
}
}
return result
}
// CheckStraight 스트레이트
func (p *PokerHandUtil) CheckStraight(cards []int) models.HandResult {
result := models.NewHandResult()
hands := [10]string{
"0,1,2,3,4",
"1,2,3,4,5",
"2,3,4,5,6",
"3,4,5,6,7",
"4,5,6,7,8",
"5,6,7,8,9",
"6,7,8,9,10",
"7,8,9,10,11",
"8,9,10,11,12",
"9,10,11,12"}
i, j, k := 0, 0, 0
compareCards := make([]int, len(cards), len(cards))
for i = 0; i < len(cards); i++ {
compareCards[i] = cards[i] % 13
}
cardStr := GetCardStr(compareCards)
for i = 0; i < len(hands); i++ {
result.InitializeMadeCard()
if i == 9 {
if strings.Index(cardStr, hands[i]) >= 0 && GetIntArrayIndexOf(compareCards, 0) >= 0 {
result.HandType = models.HandTypeStrait
result.Hands[0] = 13
selectedHands := strings.Split(hands[i], ",")
for j = 0; j < len(compareCards); j++ {
for k = 0; k < len(selectedHands); k++ {
parseInt, _ := strconv.Atoi(selectedHands[k])
if compareCards[j] == parseInt {
result.AddMadeCard(cards[j])
break
}
}
if cards[j]%13 == 0 {
result.AddMadeCard(cards[j])
}
}
break
}
} else {
if strings.Index(cardStr, hands[i]) >= 0 {
result.HandType = models.HandTypeStrait
result.Hands[0] = 13 - (9 - i)
selectedHands := strings.Split(hands[i], ",")
for j = 0; j < len(compareCards); j++ {
for k = 0; k < len(selectedHands); k++ {
parseInt, _ := strconv.Atoi(selectedHands[k])
if compareCards[j] == parseInt {
result.AddMadeCard(cards[j])
break
}
}
}
break
}
}
}
return result
}
// CheckTriple 트리플
func (p *PokerHandUtil) CheckTriple(cards []int) models.HandResult {
result := models.NewHandResult()
i := 0
j := 0
matchCount := 0
matchIndex := -1
kickCount := 0
for i = 0; i < len(cards); i++ {
matchCount = 0
if cards[i] == -1 {
continue
}
for j = 0; j < len(cards); j++ {
if cards[i]%13 == cards[j]%13 {
if matchCount == 2 {
matchIndex = cards[i] % 13
if matchIndex == 0 {
matchIndex = 13
}
result.Hands[0] = matchIndex
result.AddMadeCard(cards[i])
result.AddMadeCard(cards[j])
}
matchCount++
}
}
if matchCount == 3 {
result.HandType = models.HandTypeTriple
}
}
var card int
for i = 0; i < len(cards); i++ {
card = cards[i] % 13
if card == 0 {
card = 13
}
if card != matchCount {
result.Kicks[kickCount] = card
kickCount++
}
}
// 정렬
slice.Sort(result.Kicks[:], func(i, j int) bool {
return result.Kicks[i] < result.Kicks[j]
})
return result
}
// CheckTwoPairs 투페어
func (p *PokerHandUtil) CheckTwoPairs(cards []int) models.HandResult {
result := models.NewHandResult()
i := 0
j := 0
k := 0
matchCount := 0
matchIndex := -1
endCount := 0
exist := false
kickCount := 0
for i = 0; i < len(cards); i++ {
matchCount = 0
if cards[i] == -1 {
continue
}
for j = 0; j < len(cards); j++ {
if cards[i]%13 == cards[j]%13 {
if matchCount == 1 {
matchIndex = cards[i] % 13
if matchIndex == 0 {
matchIndex = 13
}
exist = false
for k = 0; k < endCount; k++ {
if result.Hands[k] == matchIndex {
exist = true
}
}
if exist == false {
result.Hands[endCount] = matchIndex
matchCount++
result.AddMadeCard(cards[i])
result.AddMadeCard(cards[j])
}
} else {
matchCount++
}
}
}
if matchCount == 2 {
matchCount = 0
endCount++
}
}
if endCount >= 2 {
result.HandType = models.HandTypeTwoPair
// 정렬
slice.Sort(result.Hands[:], func(i, j int) bool {
return result.Hands[i] < result.Hands[j]
})
// reverse
for i := len(result.Hands)/2 - 1; i >= 0; i-- {
opp := len(result.Hands) - 1 - i
result.Hands[i], result.Hands[opp] = result.Hands[opp], result.Hands[i]
}
}
var card int
for i = 0; i < len(cards); i++ {
card = cards[i] % 13
if card == 0 {
card = 13
}
if card != result.Hands[0] && card != result.Hands[1] {
result.Kicks[kickCount] = card
kickCount++
}
}
return result
}
// CheckOnePair 원페어
func (p *PokerHandUtil) CheckOnePair(cards []int) models.HandResult {
result := models.NewHandResult()
i := 0
j := 0
k := 0
matchCount := 0
kickCount := 0
for i = 0; i < len(cards); i++ {
matchCount = 0
if cards[i] == -1 {
continue
}
for j = 0; j < len(cards); j++ {
if cards[i]%13 == cards[j]%13 {
if matchCount == 1 {
result.Hands[0] = cards[i] % 13
if result.Hands[0] == 0 {
result.Hands[0] = 13
}
for k = 0; k < len(cards); k++ {
index := GetCardOrder(cards, k)
temp := index % 13
if temp == 0 {
temp = 13
}
if temp == result.Hands[0] {
result.AddMadeCard(cards[i])
result.AddMadeCard(cards[j])
result.CardType = GetCardType(index)
break
}
}
}
matchCount++
}
}
if matchCount >= 2 {
result.HandType = models.HandTypeOnePair
}
}
var card int
for i = 0; i < len(cards); i++ {
card = cards[i] % 13
if card == 0 {
card = 13
}
if card != result.Hands[0] {
result.Kicks[kickCount] = card
kickCount++
}
}
// 정렬
slice.Sort(result.Kicks[:], func(i, j int) bool {
return result.Kicks[i] < result.Kicks[j]
})
return result
}
// CheckTitle 타이틀
func (p *PokerHandUtil) CheckTitle(cards []int) models.HandResult {
result := models.NewHandResult()
result.HandType = models.HandTypeTitle
for i := 0; i < len(cards); i++ {
result.Hands[i] = GetCardOrder(cards, i) % 13
if result.Hands[i] == 0 {
result.Hands[i] = 13
}
result.AddMadeCard(cards[i])
}
return result
}
//GetCardType 카드인덱스로 카드타입을 반환
func GetCardType(cardIndex int) int {
cardType := models.CardTypeNone
if cardIndex >= 0 && cardIndex <= 12 {
cardType = models.CardTypeSpade //스페이드
}
if cardIndex >= 13 && cardIndex <= 25 {
cardType = models.CardTypeDiamond //다이아몬드
}
if cardIndex >= 26 && cardIndex <= 38 {
cardType = models.CardTypeHeart //하트
}
if cardIndex >= 39 && cardIndex <= 51 {
cardType = models.CardTypeClover //클로버
}
return cardType
}
//GetCardStr 카드인덱스모음을 문자열로 변환
func GetCardStr(cards []int) string {
str := ""
tempCards := make([]int, len(cards))
count := 0
copy(tempCards, cards)
// 정렬
slice.Sort(tempCards[:], func(i, j int) bool {
return tempCards[i] < tempCards[j]
})
for i := 0; i < len(tempCards); i++ {
if tempCards[i] == -1 {
continue
}
if count > 0 {
str += ","
}
str += strconv.Itoa(tempCards[i])
count++
}
return str
}
// GetCardOrder 카드정렬을 가져온다.
func GetCardOrder(cards []int, orderNo int) int {
i := 0
value := 0
order := make([]int, len(cards))
for i = 0; i < len(cards); i++ {
if cards[i]%13 == 0 {
value = 13
} else {
value = cards[i] % 13
}
value = value*4 - (GetCardType(cards[i]) - 1)
order[i] = value
}
// 정렬
slice.Sort(order[:], func(i, j int) bool {
return order[i] < order[j]
})
ret := order[orderNo]
cardType := ret % 4
if cardType == 0 {
cardType = 1
} else if cardType == 1 {
cardType = 2
} else if cardType == 2 {
cardType = 3
} else if cardType == 3 {
cardType = 4
}
ret = (ret+cardType-1)/4 + 13*(cardType-1)
return ret
}
|
package output
import (
"fmt"
"strings"
"github.com/mandelsoft/cmdint/pkg/cmdint"
"github.com/afritzler/garden-examiner/cmd/gex/const"
"github.com/afritzler/garden-examiner/cmd/gex/context"
"github.com/afritzler/garden-examiner/cmd/gex/util"
. "github.com/afritzler/garden-examiner/pkg/data"
)
type TableProcessingOutput struct {
ElementOutput
header []string
opts *cmdint.Options
}
var _ Output = &TableProcessingOutput{}
func NewProcessingTableOutput(opts *cmdint.Options, chain ProcessChain, header ...string) *TableProcessingOutput {
return (&TableProcessingOutput{}).new(opts, chain, header)
}
func (this *TableProcessingOutput) new(opts *cmdint.Options, chain ProcessChain, header []string) *TableProcessingOutput {
this.header = header
this.ElementOutput.new(chain)
this.opts = opts
return this
}
func (this *TableProcessingOutput) Out(*context.Context) error {
lines := [][]string{this.header}
sort := this.opts.GetArrayOptionValue(constants.O_SORT)
slice := Slice(this.Elems)
if sort != nil {
cols := make([]string, len(this.header))
idxs := map[string]int{}
for i, n := range this.header {
cols[i] = strings.ToLower(n)
if strings.HasPrefix(cols[i], "-") {
cols[i] = cols[i][1:]
}
idxs[cols[i]] = i
}
for _, k := range sort {
key, _ := cmdint.SelectBest(strings.ToLower(k), cols...)
if key == "" {
return fmt.Errorf("unknown field '%s'", k)
}
slice.Sort(compare_column(idxs[key]))
}
}
util.FormatTable("", append(lines, StringArraySlice(slice)...))
return nil
}
func compare_column(c int) CompareFunction {
return func(a interface{}, b interface{}) int {
aa := a.([]string)
ab := b.([]string)
if len(aa) > c && len(ab) > c {
return strings.Compare(aa[c], ab[c])
}
return len(aa) - len(ab)
}
}
|
package main
func main() {
}
func selectSort(array []int) {
if len(array) < 2 {
return
}
for i:=0; i < len(array); i++ {
min := i
for j := i+1; j < len(array); j++ {
if array[min] > array[j] {
min = j
}
}
if min != i{
array[min], array[i] = array[i], array[min]
}
}
}
|
package requests
import (
"fmt"
"net/url"
"strings"
"github.com/atomicjolt/canvasapi"
)
// AddToolToRceFavorites Add the specified editor_button external tool to a preferred location in the RCE
// for courses in the given account and its subaccounts (if the subaccounts
// haven't set their own RCE Favorites). Cannot set more than 2 RCE Favorites.
// https://canvas.instructure.com/doc/api/external_tools.html
//
// Path Parameters:
// # Path.AccountID (Required) ID
// # Path.ID (Required) ID
//
type AddToolToRceFavorites struct {
Path struct {
AccountID string `json:"account_id" url:"account_id,omitempty"` // (Required)
ID string `json:"id" url:"id,omitempty"` // (Required)
} `json:"path"`
}
func (t *AddToolToRceFavorites) GetMethod() string {
return "POST"
}
func (t *AddToolToRceFavorites) GetURLPath() string {
path := "accounts/{account_id}/external_tools/rce_favorites/{id}"
path = strings.ReplaceAll(path, "{account_id}", fmt.Sprintf("%v", t.Path.AccountID))
path = strings.ReplaceAll(path, "{id}", fmt.Sprintf("%v", t.Path.ID))
return path
}
func (t *AddToolToRceFavorites) GetQuery() (string, error) {
return "", nil
}
func (t *AddToolToRceFavorites) GetBody() (url.Values, error) {
return nil, nil
}
func (t *AddToolToRceFavorites) GetJSON() ([]byte, error) {
return nil, nil
}
func (t *AddToolToRceFavorites) HasErrors() error {
errs := []string{}
if t.Path.AccountID == "" {
errs = append(errs, "'Path.AccountID' is required")
}
if t.Path.ID == "" {
errs = append(errs, "'Path.ID' is required")
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *AddToolToRceFavorites) Do(c *canvasapi.Canvas) error {
_, err := c.SendRequest(t)
if err != nil {
return err
}
return nil
}
|
package lang
import (
"fmt"
)
type number struct {
value int64
}
func MakeNumber(v int64) *number {
return &number{v}
}
func (n *number) String() string {
return fmt.Sprintf("%v", n.value)
}
func (n *number) Equal(o Expr) bool {
switch other := o.(type) {
case *number:
return n.value == other.value
default:
return false
}
}
func (n *number) Plus(o *number) *number {
return MakeNumber(n.value + o.value)
}
func (n *number) Minus(o *number) *number {
return MakeNumber(n.value - o.value)
}
func (n *number) Mult(o *number) *number {
return MakeNumber(n.value * o.value)
}
func (n *number) Divide(o *number) *number {
return MakeNumber(n.value / o.value)
}
func (n *number) Modulo(o *number) *number {
return MakeNumber(n.value % o.value)
}
|
package elevengo
import (
"net"
"net/http"
"net/http/cookiejar"
)
type Client struct {
jar http.CookieJar
hc *http.Client
ua string
info *_UserInfo
offline *_OfflineToken
}
func New(opts *Options) *Client {
if opts == nil {
opts = NewOptions()
}
// core component
d := &net.Dialer{
Timeout: opts.DialTimeout,
}
tp := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: d.DialContext,
IdleConnTimeout: opts.IdleTimeout,
MaxIdleConnsPerHost: opts.MaxIdleConnsPreHost,
MaxIdleConns: opts.MaxIdleConns,
}
jar, _ := cookiejar.New(nil)
hc := &http.Client{
Transport: tp,
Jar: jar,
}
// assemble the client
return &Client{
jar: jar,
hc: hc,
ua: opts.UserAgent,
}
}
func Default() *Client {
return New(nil)
}
|
package aliyun
type IAliYunClient interface {
GetResponse(path string, clinetInfo interface{}, bizData interface{}) []byte
GetHeaderMap(path string, clientInfo interface{}, bizData interface{}) map[string]string
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"strings"
)
func guessNumber(q string) int {
s := strings.Fields(q)
var c, lo, hi int
fmt.Sscan(s[0], &hi)
s = s[1 : len(s)-1]
for _, i := range s {
if i == "Lower" {
hi = (lo+hi)/2 + c - 1
} else {
lo = (lo+hi)/2 + c + 1
}
c = (lo + hi) % 2
}
return (lo+hi)/2 + c
}
func main() {
data, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
defer data.Close()
scanner := bufio.NewScanner(data)
for scanner.Scan() {
fmt.Println(guessNumber(scanner.Text()))
}
}
|
package loader
// This file parses a fragment of C with libclang and stores the result for AST
// modification. It does not touch the AST itself.
import (
"errors"
"go/ast"
"go/token"
"strconv"
"strings"
"unsafe"
)
/*
#include <clang-c/Index.h> // if this fails, install libclang-7-dev
#include <stdlib.h>
int tinygo_clang_visitor(CXCursor c, CXCursor parent, CXClientData client_data);
*/
import "C"
var globalFileInfo *fileInfo
func (info *fileInfo) parseFragment(fragment string, cflags []string) error {
index := C.clang_createIndex(0, 1)
defer C.clang_disposeIndex(index)
filenameC := C.CString("cgo-fake.c")
defer C.free(unsafe.Pointer(filenameC))
fragmentC := C.CString(fragment)
defer C.free(unsafe.Pointer(fragmentC))
unsavedFile := C.struct_CXUnsavedFile{
Filename: filenameC,
Length: C.ulong(len(fragment)),
Contents: fragmentC,
}
// convert Go slice of strings to C array of strings.
cmdargsC := C.malloc(C.size_t(len(cflags)) * C.size_t(unsafe.Sizeof(uintptr(0))))
defer C.free(cmdargsC)
cmdargs := (*[1 << 16]*C.char)(cmdargsC)
for i, cflag := range cflags {
s := C.CString(cflag)
cmdargs[i] = s
defer C.free(unsafe.Pointer(s))
}
var unit C.CXTranslationUnit
errCode := C.clang_parseTranslationUnit2(
index,
filenameC,
(**C.char)(cmdargsC), C.int(len(cflags)), // command line args
&unsavedFile, 1, // unsaved files
C.CXTranslationUnit_None,
&unit)
if errCode != 0 {
panic("loader: failed to parse source with libclang")
}
defer C.clang_disposeTranslationUnit(unit)
if C.clang_getNumDiagnostics(unit) != 0 {
return errors.New("cgo: libclang cannot parse fragment")
}
if globalFileInfo != nil {
// There is a race condition here but that doesn't really matter as it
// is a sanity check anyway.
panic("libclang.go cannot be used concurrently yet")
}
globalFileInfo = info
defer func() {
globalFileInfo = nil
}()
cursor := C.clang_getTranslationUnitCursor(unit)
C.clang_visitChildren(cursor, (*[0]byte)(unsafe.Pointer(C.tinygo_clang_visitor)), C.CXClientData(uintptr(0)))
return nil
}
//export tinygo_clang_visitor
func tinygo_clang_visitor(c, parent C.CXCursor, client_data C.CXClientData) C.int {
info := globalFileInfo
kind := C.clang_getCursorKind(c)
switch kind {
case C.CXCursor_FunctionDecl:
name := getString(C.clang_getCursorSpelling(c))
cursorType := C.clang_getCursorType(c)
if C.clang_isFunctionTypeVariadic(cursorType) != 0 {
return C.CXChildVisit_Continue // not supported
}
numArgs := int(C.clang_Cursor_getNumArguments(c))
fn := &functionInfo{}
info.functions[name] = fn
for i := 0; i < numArgs; i++ {
arg := C.clang_Cursor_getArgument(c, C.uint(i))
argName := getString(C.clang_getCursorSpelling(arg))
argType := C.clang_getArgType(cursorType, C.uint(i))
if argName == "" {
argName = "$" + strconv.Itoa(i)
}
fn.args = append(fn.args, paramInfo{
name: argName,
typeExpr: info.makeASTType(argType),
})
}
resultType := C.clang_getCursorResultType(c)
if resultType.kind != C.CXType_Void {
fn.results = &ast.FieldList{
List: []*ast.Field{
&ast.Field{
Type: info.makeASTType(resultType),
},
},
}
}
case C.CXCursor_TypedefDecl:
typedefType := C.clang_getCursorType(c)
name := getString(C.clang_getTypedefName(typedefType))
underlyingType := C.clang_getTypedefDeclUnderlyingType(c)
expr := info.makeASTType(underlyingType)
if strings.HasPrefix(name, "_Cgo_") {
expr := expr.(*ast.Ident)
typeSize := C.clang_Type_getSizeOf(underlyingType)
switch expr.Name {
// TODO: plain char (may be signed or unsigned)
case "C.schar", "C.short", "C.int", "C.long", "C.longlong":
switch typeSize {
case 1:
expr.Name = "int8"
case 2:
expr.Name = "int16"
case 4:
expr.Name = "int32"
case 8:
expr.Name = "int64"
}
case "C.uchar", "C.ushort", "C.uint", "C.ulong", "C.ulonglong":
switch typeSize {
case 1:
expr.Name = "uint8"
case 2:
expr.Name = "uint16"
case 4:
expr.Name = "uint32"
case 8:
expr.Name = "uint64"
}
}
}
info.typedefs[name] = &typedefInfo{
typeExpr: expr,
}
case C.CXCursor_VarDecl:
name := getString(C.clang_getCursorSpelling(c))
cursorType := C.clang_getCursorType(c)
info.globals[name] = &globalInfo{
typeExpr: info.makeASTType(cursorType),
}
}
return C.CXChildVisit_Continue
}
func getString(clangString C.CXString) (s string) {
rawString := C.clang_getCString(clangString)
s = C.GoString(rawString)
C.clang_disposeString(clangString)
return
}
// makeASTType return the ast.Expr for the given libclang type. In other words,
// it converts a libclang type to a type in the Go AST.
func (info *fileInfo) makeASTType(typ C.CXType) ast.Expr {
var typeName string
switch typ.kind {
case C.CXType_SChar:
typeName = "C.schar"
case C.CXType_UChar:
typeName = "C.uchar"
case C.CXType_Short:
typeName = "C.short"
case C.CXType_UShort:
typeName = "C.ushort"
case C.CXType_Int:
typeName = "C.int"
case C.CXType_UInt:
typeName = "C.uint"
case C.CXType_Long:
typeName = "C.long"
case C.CXType_ULong:
typeName = "C.ulong"
case C.CXType_LongLong:
typeName = "C.longlong"
case C.CXType_ULongLong:
typeName = "C.ulonglong"
case C.CXType_Bool:
typeName = "bool"
case C.CXType_Float, C.CXType_Double, C.CXType_LongDouble:
switch C.clang_Type_getSizeOf(typ) {
case 4:
typeName = "float32"
case 8:
typeName = "float64"
default:
// Don't do anything, rely on the fallback code to show a somewhat
// sensible error message like "undeclared name: C.long double".
}
case C.CXType_Complex:
switch C.clang_Type_getSizeOf(typ) {
case 8:
typeName = "complex64"
case 16:
typeName = "complex128"
}
case C.CXType_Pointer:
return &ast.StarExpr{
Star: info.importCPos,
X: info.makeASTType(C.clang_getPointeeType(typ)),
}
case C.CXType_FunctionProto:
// Be compatible with gc, which uses the *[0]byte type for function
// pointer types.
// Return type [0]byte because this is a function type, not a pointer to
// this function type.
return &ast.ArrayType{
Lbrack: info.importCPos,
Len: &ast.BasicLit{
ValuePos: info.importCPos,
Kind: token.INT,
Value: "0",
},
Elt: &ast.Ident{
NamePos: info.importCPos,
Name: "byte",
},
}
}
if typeName == "" {
// Fallback, probably incorrect but at least the error points to an odd
// type name.
typeName = "C." + getString(C.clang_getTypeSpelling(typ))
}
return &ast.Ident{
NamePos: info.importCPos,
Name: typeName,
}
}
|
package transeq
import (
"bytes"
"context"
"fmt"
"io"
)
const (
mb = 1 << (10 * 2)
// size of the buffer for writing to file
maxBufferSize = 1 * mb
// suffixes to add to sequence id for each frame
suffixes = "123456"
// max line size for the output file
maxLineSize = 60
// specific codons
stop = '*'
unknown = 'X'
)
type writer struct {
codes [arrayCodeSize]byte
buf []byte
currentLineLen int
startPos [3]int
frameIndex int
framesToGenerate [6]int
reverse bool
alternative bool
trim bool
// if in trim mode, nb of bytes to trim (nb of successive 'X', '*' and '\n'
// from right end of the sequence)
toTrim int
}
func newWriter(codes [arrayCodeSize]byte, framesToGenerate [6]int, reverse, alternative, trim bool) *writer {
return &writer{
codes: codes,
buf: make([]byte, 0, maxBufferSize),
startPos: [3]int{0, 1, 2},
framesToGenerate: framesToGenerate,
reverse: reverse,
alternative: alternative,
trim: trim,
}
}
func (w *writer) reset() {
w.frameIndex = 0
if w.reverse && !w.alternative {
w.startPos[0], w.startPos[1], w.startPos[2] = 0, 1, 2
}
}
func (w *writer) translate(sequence encodedSequence) {
w.reset()
w.translate3Frames(sequence)
if w.reverse {
if !w.alternative {
// Staden convention: Frame -1 is the reverse-complement of the sequence
// having the same codon phase as frame 1. Frame -2 is the same phase as
// frame 2. Frame -3 is the same phase as frame 3
//
// use the matrix to keep track of the forward frame as it depends on the
// length of the sequence
switch sequence.nuclSeqSize() % 3 {
case 0:
w.startPos[0], w.startPos[1], w.startPos[2] = 0, 2, 1
case 1:
w.startPos[0], w.startPos[1], w.startPos[2] = 1, 0, 2
case 2:
w.startPos[0], w.startPos[1], w.startPos[2] = 2, 1, 0
}
}
sequence.reverseComplement()
w.translate3Frames(sequence)
}
}
func (w *writer) translate3Frames(sequence encodedSequence) {
for _, startPos := range w.startPos {
if w.framesToGenerate[w.frameIndex] == 0 {
w.frameIndex++
continue
}
w.writeHeader(sequence.header())
// read the sequence 3 letters at a time, starting at a specific position
// corresponding to the frame
for pos := sequence.headerSize() + startPos; pos < len(sequence)-2; pos += 3 {
index := uint32(sequence[pos]) | uint32(sequence[pos+1])<<8 | uint32(sequence[pos+2])<<16
w.writeAA(w.codes[index])
}
switch (sequence.nuclSeqSize() - startPos) % 3 {
case 2:
// the last codon is only 2 nucleotide long, try to guess
// the corresponding AA
index := uint32(sequence[len(sequence)-2]) | uint32(sequence[len(sequence)-1])<<8
w.writeAA(w.codes[index])
case 1:
// the last codon is only 1 nucleotide long, no way to guess
// the corresponding AA
w.writeAA(unknown)
}
w.trimAndReturn()
w.frameIndex++
}
}
// sequence id should look like
// >sequenceID_<frame> comment
func (w *writer) writeHeader(seqHeader []byte) {
end := bytes.IndexByte(seqHeader, ' ')
if end != -1 {
w.buf = append(w.buf, seqHeader[:end]...)
w.buf = append(w.buf, '_', suffixes[w.frameIndex])
w.buf = append(w.buf, seqHeader[end:]...)
} else {
w.buf = append(w.buf, seqHeader...)
w.buf = append(w.buf, '_', suffixes[w.frameIndex])
}
w.newLine()
}
func (w *writer) writeAA(aa byte) {
if w.currentLineLen == maxLineSize {
w.newLine()
}
w.buf = append(w.buf, aa)
w.currentLineLen++
if w.trim {
if aa == stop || aa == unknown {
w.toTrim++
} else {
w.toTrim = 0
}
}
}
func (w *writer) newLine() {
w.buf = append(w.buf, '\n')
w.currentLineLen = 0
if w.trim {
w.toTrim++
}
}
func (w *writer) trimAndReturn() {
if w.toTrim > 0 {
w.buf = w.buf[:len(w.buf)-w.toTrim]
w.currentLineLen -= w.toTrim
}
if w.currentLineLen != 0 {
w.newLine()
}
w.toTrim = 0
}
func (w *writer) flush(out io.Writer, cancel context.CancelFunc, errs chan error) {
_, err := out.Write(w.buf)
if err != nil {
select {
case errs <- fmt.Errorf("fail to write to output file: %v", err):
cancel()
default:
}
}
w.buf = w.buf[:0]
}
|
// Collection data structure for database
package db
import (
"encoding/json"
"fmt"
"github.com/gophergala/echodb/dbcore"
"github.com/gophergala/echodb/dbwebsocket"
"math/rand"
"os"
"path"
"strconv"
)
const (
INDEX_FILE = "_idx"
)
type Collection struct {
db *Database
name string
parts []*dbcore.Partition
}
func OpenCollection(db *Database, name string) (*Collection, error) {
collection := &Collection{db: db, name: name}
return collection, collection.bootstrap()
}
func (col *Collection) bootstrap() error {
if err := os.MkdirAll(path.Join(col.db.path, col.name), 0700); err != nil {
return err
}
col.parts = make([]*dbcore.Partition, col.db.numParts)
for i := 0; i < col.db.numParts; i++ {
var err error
if col.parts[i], err = dbcore.OpenPartition(
path.Join(col.db.path, col.name, col.name+"."+strconv.Itoa(i)),
path.Join(col.db.path, col.name, INDEX_FILE+"."+strconv.Itoa(i))); err != nil {
return err
}
}
return nil
}
func (col *Collection) close() error {
for i := 0; i < col.db.numParts; i++ {
col.parts[i].Lock.Lock()
col.parts[i].Close()
col.parts[i].Lock.Unlock()
}
return nil
}
func (col *Collection) Count() int {
col.db.access.RLock()
defer col.db.access.RUnlock()
count := 0
for _, part := range col.parts {
part.Lock.RLock()
count += part.ApproxDocCount()
part.Lock.RUnlock()
}
return count
}
// Insert a document into the collection.
func (col *Collection) Insert(doc map[string]interface{}) (id int, err error) {
docJS, err := json.Marshal(doc)
if err != nil {
return
}
id = rand.Int()
partNum := id % col.db.numParts
col.db.access.RLock()
part := col.parts[partNum]
// Put document data into collection
part.Lock.Lock()
if _, err = part.Insert(id, []byte(docJS)); err != nil {
part.Lock.Unlock()
col.db.access.RUnlock()
return
}
// If another thread is updating the document in the meanwhile, let it take over index maintenance
if err = part.LockUpdate(id); err != nil {
part.Lock.Unlock()
col.db.access.RUnlock()
return id, nil
}
part.UnlockUpdate(id)
part.Lock.Unlock()
col.db.access.RUnlock()
doc["_id"] = id
emitDoc(col.name, "create", doc)
return
}
// Retrieve a document by ID.
func (col *Collection) FindById(id int) (doc map[string]interface{}, err error) {
col.db.access.RLock()
defer col.db.access.RUnlock()
part := col.parts[id%col.db.numParts]
part.Lock.RLock()
docB, err := part.Read(id)
part.Lock.RUnlock()
if err != nil {
return
}
err = json.Unmarshal(docB, &doc)
return
}
// Cursor to all records in collection
func (col *Collection) All() chan map[string]interface{} {
count := col.Count()
c := make(chan map[string]interface{}, count)
if count == 0 {
close(c)
return c
}
partDiv := count / col.db.numParts
for i := 0; i < col.db.numParts; i++ {
part := col.parts[i]
for j := 0; j < partDiv; j++ {
for d := range part.All(j, partDiv) {
doc := make(map[string]interface{})
json.Unmarshal(d.Data, &doc)
doc["_id"] = d.Id
c <- doc
}
}
}
close(c)
return c
}
// Update a document
func (col *Collection) Update(id int, doc map[string]interface{}) error {
if doc == nil {
return fmt.Errorf("Updating %d: input doc may not be nil", id)
}
docJS, err := json.Marshal(doc)
if err != nil {
return err
}
col.db.access.RLock()
part := col.parts[id%col.db.numParts]
part.Lock.Lock()
// Place lock, read back original document and update
if err := part.LockUpdate(id); err != nil {
part.Lock.Unlock()
col.db.access.RUnlock()
return err
}
originalB, err := part.Read(id)
if err != nil {
part.UnlockUpdate(id)
part.Lock.Unlock()
col.db.access.RUnlock()
return err
}
var original map[string]interface{}
if err = json.Unmarshal(originalB, &original); err != nil {
fmt.Printf("Will not attempt to unindex document %d during update\n", id)
}
if err = part.Update(id, []byte(docJS)); err != nil {
part.UnlockUpdate(id)
part.Lock.Unlock()
col.db.access.RUnlock()
return err
}
part.UnlockUpdate(id)
part.Lock.Unlock()
col.db.access.RUnlock()
doc["_id"] = id
emitDoc(col.name, "update", doc)
return nil
}
// Delete a document
func (col *Collection) Delete(id int) error {
col.db.access.RLock()
part := col.parts[id%col.db.numParts]
part.Lock.Lock()
// Place lock, read back original document and delete document
if err := part.LockUpdate(id); err != nil {
part.Lock.Unlock()
col.db.access.RUnlock()
return err
}
originalB, err := part.Read(id)
if err != nil {
part.UnlockUpdate(id)
part.Lock.Unlock()
col.db.access.RUnlock()
return err
}
var original map[string]interface{}
if err = json.Unmarshal(originalB, &original); err != nil {
fmt.Printf("Will not attempt to unindex document %d during delete\n", id)
}
if err = part.Delete(id); err != nil {
part.UnlockUpdate(id)
part.Lock.Unlock()
col.db.access.RUnlock()
return err
}
part.UnlockUpdate(id)
part.Lock.Unlock()
col.db.access.RUnlock()
emitDoc(col.name, "delete", map[string]interface{}{"_id": id})
return nil
}
func emitDoc(name, action string, doc map[string]interface{}) {
emit := map[string]interface{}{"__action": action, "__doc": doc}
emitDocJS, err := json.Marshal(emit)
if err != nil {
return
}
dbwebsocket.Emit(name, emitDocJS)
}
|
package runtime_test
import (
"testing"
"github.com/bmizerany/assert"
"github.com/gonitor/gonitor/service/runtime"
)
// TestServiceGetGoOS .
func TestServiceGetGoOS(test *testing.T) {
result := runtime.ServiceGetGoOS()
assert.Equal(test, len(result) > 0, true)
}
|
package wallet
import (
"../block"
"../transaction"
)
type Wallet struct {
Address string
Amount uint64
Timestamp uint32
Height uint32
TxList []transaction.Transaction
Fee []block.Block
}
var Wallets = make(map[string]Wallet)
func TransferMoney(bl block.Block) {
var miner string
var fee uint64 = 0
for _, tx := range bl.TxList {
if tx.Sender == "0000000000000000000000000000000000000000000000000000000000000000" {
miner = tx.Receiver
} else {
Withdraw(bl, tx)
}
Refill(bl, tx)
MinerFee(bl, miner, fee)
}
}
func MinerFee(bl block.Block, miner string, fee uint64) {
if w, ok := Wallets[miner]; ok {
w.Amount += fee
w.Timestamp = bl.Timestamp
w.Height = bl.Height
w.Fee = append(w.Fee, bl)
Wallets[w.Address] = w
} else {
panic("Miner not found")
}
}
func Refill(bl block.Block, tx transaction.Transaction) {
if w, ok := Wallets[tx.Receiver]; ok {
w.Amount += tx.Amount
w.Timestamp = tx.Timestamp
w.Height = bl.Height
w.TxList = append(w.TxList, tx)
Wallets[w.Address] = w
} else {
txList := []transaction.Transaction{tx}
blList := []block.Block{}
w = Wallet{
Address: tx.Receiver,
Amount: tx.Amount,
Timestamp: tx.Timestamp,
Height: bl.Height,
TxList: txList,
Fee: blList,
}
Wallets[w.Address] = w
}
}
func Withdraw(bl block.Block, tx transaction.Transaction) {
if w, ok := Wallets[tx.Sender]; ok {
if tx.Amount+tx.Fee > w.Amount {
panic("Not enough money")
}
w.Amount -= tx.Amount
w.Timestamp = tx.Timestamp
w.Height = bl.Height
w.TxList = append(w.TxList, tx)
Wallets[w.Address] = w
} else {
panic("Sender wallet not found!")
}
}
|
package communications
import (
"context"
"encoding/json"
"github.com/niolabs/gonio-framework"
"github.com/pubkeeper/go-client"
)
type PublisherBlock struct {
nio.Consumer
client.Connection
config PublisherBlockConfig
}
type PublisherBlockConfig struct {
nio.BlockConfigAtom
Topic string `json:"topic"`
}
func (block *PublisherBlock) Configure(config nio.RawBlockConfig) error {
block.Consumer.Configure()
if err := json.Unmarshal(config, &block.config); err != nil {
return err
}
return nil
}
func (block *PublisherBlock) Start(ctx context.Context) {
b := block.Connection.RegisterBrewer(block.config.Topic)
defer block.Connection.UnregisterBrewer(b)
for {
select {
case signals := <-block.ChIn:
bytes, _ := json.Marshal(signals)
b.Send <- bytes
case <-ctx.Done():
return
}
}
}
func (block *PublisherBlock) Enqueue(terminal nio.Terminal, signals nio.SignalGroup) error {
return block.Consumer.Enqueue(terminal, signals, 1)
}
func (block *PublisherBlock) EachOutput(func(nio.Terminal, <-chan nio.SignalGroup)) {}
var publisherDefinition = nio.BlockTypeDefinition{
Version: "1.1.0",
BlockAttributes: nio.BlockAttributes{
Outputs: []nio.TerminalDefinition{},
Inputs: []nio.TerminalDefinition{
{
Label: "default",
Type: "input",
Visible: true,
Order: 0,
ID: "__default_terminal_value",
Default: true,
},
},
},
Namespace: "blocks.communication.publisher.Publisher",
Properties: map[nio.Property]nio.PropertyDefinition{
"type": {
"order": nil,
"advanced": false,
"visible": false,
"title": "Type",
"type": "StringType",
"readonly": true,
"allow_none": false,
"default": nil,
},
"timeout": {
"order": nil,
"type": "TimeDeltaType",
"advanced": true,
"visible": true,
"default": map[string]float64{
"seconds": 2,
},
"allow_none": false,
"title": "Connect Timeout",
},
"version": {
"order": nil,
"type": "StringType",
"advanced": true,
"visible": true,
"default": "1.1.0",
"allow_none": false,
"title": "Version",
},
"topic": {
"order": nil,
"type": "StringType",
"advanced": false,
"visible": true,
"default": nil,
"allow_none": false,
"title": "Topic",
},
"id": {
"order": nil,
"type": "StringType",
"advanced": false,
"visible": false,
"default": nil,
"allow_none": false,
"title": "Id",
},
"name": {
"order": nil,
"type": "StringType",
"advanced": false,
"visible": false,
"default": nil,
"allow_none": true,
"title": "Name",
},
"log_level": {
"order": nil,
"options": map[string]int{
"WARNING": 30,
"NOTSET": 0,
"ERROR": 40,
"INFO": 20,
"DEBUG": 10,
"CRITICAL": 50,
},
"advanced": true,
"visible": true,
"title": "Log Level",
"type": "SelectType",
"enum": "LogLevel",
"allow_none": false,
"default": "NOTSET",
},
},
Commands: map[nio.Command]nio.CommandDefinition{},
Name: "Publisher",
}
func NewPublisher(connection client.Connection) nio.BlockTypeEntry {
return nio.BlockTypeEntry{
Create: func() nio.Block {
return &PublisherBlock{
Connection: connection,
}
},
Definition: publisherDefinition,
}
}
|
package main
import (
"github.com/gin-gonic/gin"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/sqlite"
"qor-admin-3/admin"
)
func main() {
// Set up the database
DB, _ := gorm.Open("sqlite3", ":memory:")
r := gin.New()
a := admin.New(DB, "", "secret")
a.Bind(r)
r.Run("127.0.0.1:8080")
}
|
package recaptcha
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
"time"
"github.com/sirupsen/logrus"
)
func init() {
logrus.SetLevel(logrus.DebugLevel)
}
const (
// recaptcha API to ensure the token is valid
reCAPTCHALink = "https://www.google.com/recaptcha/api/siteverify"
// DefaultTreshold Default minimin score when using V3 api
DefaultTreshold = 0.5
)
var (
logger = logrus.WithFields(logrus.Fields{"package": "ReCAPTCHA"})
)
type reCAPTCHAResponse struct {
Success bool `json:"success"` // whether this request was a valid reCAPTCHA token for your site
ChallengeTS time.Time `json:"challenge_ts"` // timestamp of the challenge load (ISO format yyyy-MM-dd'T'HH:mm:ssZZ)
Score *float64 `json:"score,omitempty"` // the score for this request (0.0 - 1.0)
Action *string `json:"action,omitempty"` // the action name for this request (important to verify)
Hostname string `json:"hostname,omitempty"` // the hostname of the site where the reCAPTCHA was solved
ApkPackageName *string `json:"apk_package_name,omitempty"` // the package name of the app where the reCAPTCHA was solved
ErrorCodes []string `json:"error-codes,omitempty"` // optional
}
// VerifyOption verification options expected for the challenge
type VerifyOption struct {
Threshold float64 // ignored in v2 recaptcha
Action string // ignored in v2 recaptcha
Hostname string
ApkPackageName string
ResponseTime float64
RemoteIP string
}
// ReCAPTCHA recpatcha holder struct, make adding mocking code simpler
type ReCAPTCHA struct {
Secret string
}
// New new ReCAPTCHA instance
// Using environment variables
// export ReCAPTCHA_SECRET="reCaptcha Secret Key"
func New() (*ReCAPTCHA, error) {
return NewWithSecert(os.Getenv("ReCAPTCHA_SECRET"))
}
// NewWithSecert new ReCAPTCHA instance
// get your secret from https://www.google.com/recaptcha/admin
func NewWithSecert(secret string) (*ReCAPTCHA, error) {
if len(secret) == 0 {
return nil, fmt.Errorf("recaptcha secret cannot be blank")
}
return &ReCAPTCHA{
Secret: secret,
}, nil
}
// Verify returns `nil` if no error and the client solved the challenge correctly
func Verify(token string) (error, *reCAPTCHAResponse) {
captcha, err := New()
if err != nil {
return err, nil
}
return captcha.VerifyWithOptions(token, VerifyOption{})
}
// Verify returns `nil` if no error and the client solved the challenge correctly
func (r *ReCAPTCHA) Verify(token string) (error, *reCAPTCHAResponse) {
return r.VerifyWithOptions(token, VerifyOption{})
}
// VerifyWithOptions returns `nil` if no error and the client solved the challenge correctly and all options are natching
// `Threshold` and `Action` are ignored when using V2 version
func (r *ReCAPTCHA) VerifyWithOptions(token string, options VerifyOption) (error, *reCAPTCHAResponse) {
res, err := r.fetch(token, options.RemoteIP)
if err != nil {
logger.Error("confirm:", err)
return err, nil
}
return r.confirm(res, options), &res
}
func (r *ReCAPTCHA) fetch(token, remoteip string) (res reCAPTCHAResponse, err error) {
var req http.Request
req.ParseForm()
req.Form.Add("secret", r.Secret)
req.Form.Add("response", token)
if len(remoteip) > 0 {
req.Form.Add("remoteip", remoteip)
}
body := strings.NewReader(req.Form.Encode())
logger.Info("fetch:", body)
resp, err := http.Post(reCAPTCHALink, "application/x-www-form-urlencoded", body)
if err != nil {
logger.Error("fetch: ", err)
return
}
defer resp.Body.Close()
err = unmarshal(resp.Body, &res)
// debug info
r.showDebug(res)
return
}
func (r *ReCAPTCHA) confirm(res reCAPTCHAResponse, options VerifyOption) (err error) {
if res.ErrorCodes != nil {
err = fmt.Errorf("remote error codes: %v", res.ErrorCodes)
return
}
if !res.Success {
err = fmt.Errorf("invalid challenge solution")
return
}
// the hostname of the site where the reCAPTCHA was solved
if len(options.Hostname) > 0 && options.Hostname != res.Hostname {
err = fmt.Errorf("invalid response hostname '%s', while expecting '%s'", res.Hostname, options.Hostname)
return
}
if options.ResponseTime != 0 {
duration := time.Since(res.ChallengeTS)
if options.ResponseTime < duration.Seconds() {
err = fmt.Errorf("time spent in resolving challenge '%fs', while expecting maximum '%fs'", duration.Seconds(), options.ResponseTime)
return
}
}
// the package name of the app where the reCAPTCHA was solved
if res.ApkPackageName != nil && len(options.ApkPackageName) > 0 && options.ApkPackageName != *res.ApkPackageName {
err = fmt.Errorf("invalid response ApkPackageName '%s', while expecting '%s'", *res.ApkPackageName, options.ApkPackageName)
return
}
// V3 api
err = r.confirmV3(res, options)
return
}
// V3 api
func (r *ReCAPTCHA) confirmV3(res reCAPTCHAResponse, options VerifyOption) (err error) {
// ignored in v2 recaptcha
if res.Score == nil && res.Action == nil {
return
}
// the action name for this request
if res.Action != nil && len(options.Action) > 0 && options.Action != *res.Action {
err = fmt.Errorf("invalid response action '%s', while expecting '%s'", *res.Action, options.Action)
return
}
// the score for this request (0.0 - 1.0)
if res.Score == nil {
return
}
threshold := DefaultTreshold
if options.Threshold != 0 {
threshold = options.Threshold
}
if threshold >= *res.Score {
err = fmt.Errorf("received score '%f', while expecting minimum '%f'", *res.Score, threshold)
return
}
return
}
func (r *ReCAPTCHA) showDebug(res reCAPTCHAResponse) {
logger.Debug("res.Success:", res.Success)
logger.Debug("res.ChallengeTS:", res.ChallengeTS)
logger.Debug("res.Hostname:", res.Hostname)
logger.Debug("res.ErrorCodes:", res.ErrorCodes)
if res.Score != nil {
logger.Debug("res.Score:", *res.Score)
}
if res.Action != nil {
logger.Debug("res.Action:", *res.Action)
}
if res.ApkPackageName != nil {
logger.Debug("res.ApkPackageName:", *res.ApkPackageName)
}
}
func unmarshal(body io.Reader, v interface{}) error {
bodyBytes, err := ioutil.ReadAll(body)
if err != nil {
logger.Errorf("ioutil.ReadAll: %s", err)
return err
}
bodyBytes = bytes.TrimPrefix(bodyBytes, []byte("\xef\xbb\xbf"))
var test interface{}
err = json.Unmarshal(bodyBytes, &test)
logger.Debugf("test: %s", test)
err = json.Unmarshal(bodyBytes, &v)
if err != nil {
logger.Errorf("unmarshal: %s", err)
return err
}
return nil
}
|
package odoo
import (
"fmt"
)
// AccountTaxReport represents account.tax.report model.
type AccountTaxReport struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
CompanyId *Many2One `xmlrpc:"company_id,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
DateFrom *Time `xmlrpc:"date_from,omptempty"`
DateTo *Time `xmlrpc:"date_to,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
JournalIds *Relation `xmlrpc:"journal_ids,omptempty"`
TargetMove *Selection `xmlrpc:"target_move,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// AccountTaxReports represents array of account.tax.report model.
type AccountTaxReports []AccountTaxReport
// AccountTaxReportModel is the odoo model name.
const AccountTaxReportModel = "account.tax.report"
// Many2One convert AccountTaxReport to *Many2One.
func (atr *AccountTaxReport) Many2One() *Many2One {
return NewMany2One(atr.Id.Get(), "")
}
// CreateAccountTaxReport creates a new account.tax.report model and returns its id.
func (c *Client) CreateAccountTaxReport(atr *AccountTaxReport) (int64, error) {
ids, err := c.CreateAccountTaxReports([]*AccountTaxReport{atr})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateAccountTaxReport creates a new account.tax.report model and returns its id.
func (c *Client) CreateAccountTaxReports(atrs []*AccountTaxReport) ([]int64, error) {
var vv []interface{}
for _, v := range atrs {
vv = append(vv, v)
}
return c.Create(AccountTaxReportModel, vv)
}
// UpdateAccountTaxReport updates an existing account.tax.report record.
func (c *Client) UpdateAccountTaxReport(atr *AccountTaxReport) error {
return c.UpdateAccountTaxReports([]int64{atr.Id.Get()}, atr)
}
// UpdateAccountTaxReports updates existing account.tax.report records.
// All records (represented by ids) will be updated by atr values.
func (c *Client) UpdateAccountTaxReports(ids []int64, atr *AccountTaxReport) error {
return c.Update(AccountTaxReportModel, ids, atr)
}
// DeleteAccountTaxReport deletes an existing account.tax.report record.
func (c *Client) DeleteAccountTaxReport(id int64) error {
return c.DeleteAccountTaxReports([]int64{id})
}
// DeleteAccountTaxReports deletes existing account.tax.report records.
func (c *Client) DeleteAccountTaxReports(ids []int64) error {
return c.Delete(AccountTaxReportModel, ids)
}
// GetAccountTaxReport gets account.tax.report existing record.
func (c *Client) GetAccountTaxReport(id int64) (*AccountTaxReport, error) {
atrs, err := c.GetAccountTaxReports([]int64{id})
if err != nil {
return nil, err
}
if atrs != nil && len(*atrs) > 0 {
return &((*atrs)[0]), nil
}
return nil, fmt.Errorf("id %v of account.tax.report not found", id)
}
// GetAccountTaxReports gets account.tax.report existing records.
func (c *Client) GetAccountTaxReports(ids []int64) (*AccountTaxReports, error) {
atrs := &AccountTaxReports{}
if err := c.Read(AccountTaxReportModel, ids, nil, atrs); err != nil {
return nil, err
}
return atrs, nil
}
// FindAccountTaxReport finds account.tax.report record by querying it with criteria.
func (c *Client) FindAccountTaxReport(criteria *Criteria) (*AccountTaxReport, error) {
atrs := &AccountTaxReports{}
if err := c.SearchRead(AccountTaxReportModel, criteria, NewOptions().Limit(1), atrs); err != nil {
return nil, err
}
if atrs != nil && len(*atrs) > 0 {
return &((*atrs)[0]), nil
}
return nil, fmt.Errorf("account.tax.report was not found with criteria %v", criteria)
}
// FindAccountTaxReports finds account.tax.report records by querying it
// and filtering it with criteria and options.
func (c *Client) FindAccountTaxReports(criteria *Criteria, options *Options) (*AccountTaxReports, error) {
atrs := &AccountTaxReports{}
if err := c.SearchRead(AccountTaxReportModel, criteria, options, atrs); err != nil {
return nil, err
}
return atrs, nil
}
// FindAccountTaxReportIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindAccountTaxReportIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(AccountTaxReportModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindAccountTaxReportId finds record id by querying it with criteria.
func (c *Client) FindAccountTaxReportId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(AccountTaxReportModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("account.tax.report was not found with criteria %v and options %v", criteria, options)
}
|
package response
import (
pb "github.com/LILILIhuahuahua/ustc_tencent_game/api/proto"
"github.com/LILILIhuahuahua/ustc_tencent_game/framework"
"github.com/LILILIhuahuahua/ustc_tencent_game/framework/event"
"github.com/LILILIhuahuahua/ustc_tencent_game/tools"
"github.com/golang/protobuf/proto"
)
type HeroQuitResponse struct {
framework.BaseEvent
QuitResult bool
}
func NewHeroQuitResponse(rlt bool) *HeroQuitResponse {
return &HeroQuitResponse{
QuitResult: rlt,
}
}
func (e *HeroQuitResponse) FromMessage(obj interface{}) {
pbMsg := obj.(*pb.HeroQuitResponse)
e.SetCode(int32(pb.GAME_MSG_CODE_HERO_QUIT_RESPONSE))
e.QuitResult = pbMsg.GetQuitResult()
}
func (e *HeroQuitResponse) CopyFromMessage(obj interface{}) event.Event {
pbMsg := obj.(*pb.Response).HeroQuitResponse
resp := &HeroQuitResponse{
QuitResult: pbMsg.GetQuitResult(),
}
resp.SetCode(int32(pb.GAME_MSG_CODE_HERO_QUIT_RESPONSE))
return resp
}
func (e *HeroQuitResponse) ToMessage() interface{} {
return &pb.HeroQuitResponse{
QuitResult: e.QuitResult,
}
}
func (e *HeroQuitResponse) ToGMessageBytes(seqId int32) []byte {
resp := &pb.Response{
HeroQuitResponse: e.ToMessage().(*pb.HeroQuitResponse),
}
msg := pb.GMessage{
MsgType: pb.MSG_TYPE_RESPONSE,
MsgCode: pb.GAME_MSG_CODE_HERO_QUIT_RESPONSE,
Response: resp,
SeqId: seqId,
SendTime: tools.TIME_UTIL.NowMillis(),
}
out, _ := proto.Marshal(&msg)
return out
}
|
package gntagger_test
import (
"io/ioutil"
"os"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
const (
pathLong = "./testdata/seashells_book.txt"
pathShort = "./testdata/short.txt"
pathNamesAnnot = "./testdata/names_annot.json"
)
var (
dataLong []byte
dataShort []byte
dataNamesAnnot []byte
)
func TestGntagger(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Gntagger Suite")
}
var _ = BeforeSuite(func() {
var err error
dataLong, err = ioutil.ReadFile(pathLong)
Expect(err).ToNot(HaveOccurred())
dataShort, err = ioutil.ReadFile(pathShort)
Expect(err).ToNot(HaveOccurred())
dataNamesAnnot, err = ioutil.ReadFile(pathNamesAnnot)
Expect(err).ToNot(HaveOccurred())
})
var _ = AfterSuite(func() {
dir := pathLong + "_gntagger"
err := os.RemoveAll(dir)
Expect(err).ToNot(HaveOccurred())
dir = pathShort + "_gntagger"
err = os.RemoveAll(dir)
Expect(err).ToNot(HaveOccurred())
})
|
package main
import (
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"time"
)
var DogeTestNet3GenesisHash = chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy.
})
//0xe9, 0x55, 0x05, 0x37, 0x0d, 0x4c, 0x3f, 0x46, 0x65, 0xbd, 0x98, 0x15, 0x4f, 0x94, 07, d4, 45, d4, 82, 62, 9d, aa, 06, 36, b6, 04, 73, 64, 62, 87, a0, bb,
// bb 0a 78 26 46 37 40 6b 63 60 aa d9 26 28 4d 54 4d 70 49 f4 51 89 db 56 64 f3 c4 d0 73 50 55 9e
var DogeTestNetParams = chaincfg.Params{
Name: "testnet3",
Net: wire.TestNet3,
DefaultPort: "18333",
DNSSeeds: []chaincfg.DNSSeed{
//{"testnet-seed.bitcoin.jonasschnelli.ch", true},
//{"testnet-seed.bitcoin.schildbach.de", false},
//{"seed.tbtc.petertodd.org", true},
//{"testnet-seed.bluematt.me", false},
},
// Chain parameters
GenesisBlock: nil, // TODO: fix dis
GenesisHash: &DogeTestNet3GenesisHash,
PowLimit: nil, // TODO: fix dis
PowLimitBits: 0x1d00ffff,
BIP0034Height: 21111, // 0000000023b3a96d3484e5abb3755c413e7d41500f8e2a5c3f0dd01299cd8ef8
BIP0065Height: 581885, // 00000000007f6655f22f98e72ed80d8b06dc761d5da09df0fa1dc4be4f861eb6
BIP0066Height: 330776, // 000000002104c8c45e99a8853285a3b592602a3ccde2b832481da85e9e4ba182
CoinbaseMaturity: 100,
SubsidyReductionInterval: 210000,
TargetTimespan: time.Hour * 24 * 14, // 14 days
TargetTimePerBlock: time.Minute * 10, // 10 minutes
RetargetAdjustmentFactor: 4, // 25% less, 400% more
ReduceMinDifficulty: true,
MinDiffReductionTime: time.Minute * 20, // TargetTimePerBlock * 2
GenerateSupported: false,
// Checkpoints ordered from oldest to newest.
Checkpoints: []chaincfg.Checkpoint{
//{546, newHashFromStr("000000002a936ca763904c3c35fce2f3556c559c0214345d31b1bcebf76acb70")},
//{100000, newHashFromStr("00000000009e2958c15ff9290d571bf9459e93b19765c6801ddeccadbb160a1e")},
//{200000, newHashFromStr("0000000000287bffd321963ef05feab753ebe274e1d78b2fd4e2bfe9ad3aa6f2")},
//{300001, newHashFromStr("0000000000004829474748f3d1bc8fcf893c88be255e6d7f571c548aff57abf4")},
//{400002, newHashFromStr("0000000005e2c73b8ecb82ae2dbc2e8274614ebad7172b53528aba7501f5a089")},
//{500011, newHashFromStr("00000000000929f63977fbac92ff570a9bd9e7715401ee96f2848f7b07750b02")},
//{600002, newHashFromStr("000000000001f471389afd6ee94dcace5ccc44adc18e8bff402443f034b07240")},
//{700000, newHashFromStr("000000000000406178b12a4dea3b27e13b3c4fe4510994fd667d7c1e6a3f4dc1")},
//{800010, newHashFromStr("000000000017ed35296433190b6829db01e657d80631d43f5983fa403bfdb4c1")},
//{900000, newHashFromStr("0000000000356f8d8924556e765b7a94aaebc6b5c8685dcfa2b1ee8b41acd89b")},
//{1000007, newHashFromStr("00000000001ccb893d8a1f25b70ad173ce955e5f50124261bbbc50379a612ddf")},
//{1100007, newHashFromStr("00000000000abc7b2cd18768ab3dee20857326a818d1946ed6796f42d66dd1e8")},
//{1200007, newHashFromStr("00000000000004f2dc41845771909db57e04191714ed8c963f7e56713a7b6cea")},
//{1300007, newHashFromStr("0000000072eab69d54df75107c052b26b0395b44f77578184293bf1bb1dbd9fa")},
},
// Consensus rule change deployments.
//
// The miner confirmation window is defined as:
// target proof of work timespan / target proof of work spacing
RuleChangeActivationThreshold: 1512, // 75% of MinerConfirmationWindow
MinerConfirmationWindow: 2016,
Deployments: [chaincfg.DefinedDeployments]chaincfg.ConsensusDeployment{
chaincfg.DeploymentTestDummy: {
BitNumber: 28,
StartTime: 1199145601, // January 1, 2008 UTC
ExpireTime: 1230767999, // December 31, 2008 UTC
},
chaincfg.DeploymentCSV: {
BitNumber: 0,
StartTime: 1456790400, // March 1st, 2016
ExpireTime: 1493596800, // May 1st, 2017
},
chaincfg.DeploymentSegwit: {
BitNumber: 1,
StartTime: 1462060800, // May 1, 2016 UTC
ExpireTime: 1493596800, // May 1, 2017 UTC.
},
},
// Mempool parameters
RelayNonStdTxs: true,
// Human-readable part for Bech32 encoded segwit addresses, as defined in
// BIP 173.
Bech32HRPSegwit: "tb", // always tb for test net
// Address encoding magics
PubKeyHashAddrID: 0x71, // starts with m or n
ScriptHashAddrID: 0xc4, // starts with 2
WitnessPubKeyHashAddrID: 0x03, // starts with QW
WitnessScriptHashAddrID: 0x28, // starts with T7n
PrivateKeyID: 0xf1, // starts with 9 (uncompressed) or c (compressed)
// BIP32 hierarchical deterministic extended key magics
HDPrivateKeyID: [4]byte{0x04, 0x35, 0x83, 0x94}, // starts with tprv
HDPublicKeyID: [4]byte{0x04, 0x35, 0x87, 0xcf}, // starts with tpub
// BIP44 coin type used in the hierarchical deterministic path for
// address generation.
HDCoinType: 1,
}
|
package main
/**
* @website http://albulescu.ro
* @author Cosmin Albulescu <cosmin@albulescu.ro>
*/
import (
"bytes"
"fmt"
"io"
"log"
"net/http"
"os"
"path"
"strconv"
"time"
)
/**
* Is called when progress changes
*/
type ProgressOutput interface {
UpdateProgress(progress float64)
}
type DownloaderWithProgress struct {
accessToken string
progressOutput ProgressOutput
}
type ConsoleProgressOuptut struct{}
func (o *ConsoleProgressOuptut) UpdateProgress(progress float64) {
fmt.Printf("%.0f", progress)
fmt.Println("%")
}
func (d *DownloaderWithProgress) printDownloadPercent(done chan int64,
path string,
total int64) {
var stop bool = false
for {
select {
case <-done:
stop = true
default:
file, err := os.Open(path)
if err != nil {
log.Fatal(err)
}
fi, err := file.Stat()
if err != nil {
log.Fatal(err)
}
size := fi.Size()
if size == 0 {
size = 1
}
var percent float64 = float64(size) / float64(total) * 100
d.progressOutput.UpdateProgress(percent)
}
if stop {
break
}
time.Sleep(time.Second)
}
}
func (d *DownloaderWithProgress) DownloadFile(
url string, name string, dest string) string {
file := path.Base(name)
log.Printf("Downloading file %s n", file, url)
var path bytes.Buffer
path.WriteString(dest)
path.WriteString("/")
path.WriteString(file)
start := time.Now()
out, err := os.Create(path.String())
if err != nil {
fmt.Println(path.String())
panic(err)
}
defer out.Close()
client := &http.Client{CheckRedirect: d.redirectPolicyFunc}
size := d.calculateFileSize(client, url)
done := make(chan int64)
go d.printDownloadPercent(done, path.String(), int64(size))
req, err := http.NewRequest("GET", url, nil)
req.Header.Add("Authorization", "OAuth "+d.accessToken)
resp, err := client.Do(req)
// resp, err := http.Get(url)
if err != nil {
panic(err)
}
defer resp.Body.Close()
n, err := io.Copy(out, resp.Body)
if err != nil {
panic(err)
}
done <- n
elapsed := time.Since(start)
log.Printf("Download completed in %s", elapsed)
return path.String()
}
func (d *DownloaderWithProgress) calculateFileSize(client *http.Client, url string) int {
headResp, err := http.Head(url)
if err != nil {
panic(err)
}
defer headResp.Body.Close()
size, err := strconv.Atoi(headResp.Header.Get("Content-Length"))
if err != nil {
panic(err)
}
return size
}
func (d *DownloaderWithProgress) redirectPolicyFunc(r *http.Request,
via []*http.Request) error {
r.Header.Add("Authorization", "OAuth "+d.accessToken)
return nil
}
|
package main
import (
"bufio"
"errors"
"flag"
"io"
"os"
"strconv"
"strings"
)
type configuration struct {
MFA string
Region string
Bucket string
BatchSize int
RateLimit int
CFactor int
CMax int
Quiet bool
Debug bool
SkipFile string
}
func (conf *configuration) Load() error {
flag.StringVar(&conf.MFA, "mfa", "", "MFA string")
flag.StringVar(&conf.Region, "region", "", "AWS region name to connect to")
flag.StringVar(&conf.Bucket, "bucket", "", "S3 bucket to delete files from")
flag.IntVar(&conf.BatchSize, "batchsize", 870, "Number of objects per batch")
flag.IntVar(&conf.RateLimit, "ratelimit", 3480, "Maximum number of objects to delete per second")
flag.IntVar(&conf.CFactor, "cfactor", 3000, "Time window for calculating concurrency, in milliseconds")
flag.IntVar(&conf.CMax, "cmax", 16, "Maximum number of concurrent requests")
flag.BoolVar(&conf.Quiet, "quiet", false, "Quiet mode")
flag.BoolVar(&conf.Debug, "debug", false, "Debug mode")
flag.StringVar(&conf.SkipFile, "skip", "", "Skip file, containing batch numbers to skip")
flag.Parse()
return conf.Validate()
}
func (conf configuration) Validate() error {
if conf.Bucket == "" {
return errors.New("Bucket is required")
}
if conf.BatchSize < 1 || conf.BatchSize > 1000 {
return errors.New("BatchSize must be between 1 and 1000")
}
if conf.RateLimit < 1 {
return errors.New("RateLimit must be greater than 0")
}
return nil
}
func loadConfig() (*configuration, error) {
c := &configuration{}
return c, c.Load()
}
func loadSkipFile(filename string) (map[int]bool, error) {
ret := make(map[int]bool)
f, err := os.Open(filename)
if err != nil {
return ret, err
}
err = scanInts(f, func(val int) error {
if val < 0 {
return errors.New("Skip file cannot contain negative numbers")
}
ret[val] = true
return nil
})
if closeErr := f.Close(); closeErr != nil {
return ret, closeErr
}
return ret, err
}
func scanInts(r io.Reader, f func(int) error) error {
s := bufio.NewScanner(r)
for {
if ok := s.Scan(); !ok {
if err := s.Err(); err != nil {
return err
}
break
}
val, err := strconv.Atoi(strings.TrimSpace(s.Text()))
if err != nil {
return err
}
err = f(val)
if err != nil {
return err
}
}
return nil
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"time"
jwt "github.com/dgrijalva/jwt-go"
"github.com/dgrijalva/jwt-go/request"
)
func CheckAuth() Middleware {
return func(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
token, err := request.ParseFromRequestWithClaims(r, request.OAuth2Extractor, &Claims{}, func(token *jwt.Token) (interface{}, error) {
publicbyte, err := ioutil.ReadFile("./public.rsa.pub")
if err != nil {
fmt.Println("Error al leer archivo public")
}
publickey, err := jwt.ParseRSAPublicKeyFromPEM(publicbyte)
if err != nil {
fmt.Println("Error al convertir public key")
}
fmt.Println(publickey)
return publickey, nil
})
if err != nil {
//fmt.Println("error en la validacion")
}
if token.Valid {
w.WriteHeader(http.StatusAccepted)
//fmt.Fprintln(w, "aceptado")
f(w, r)
} else {
w.WriteHeader(http.StatusUnauthorized)
fmt.Fprintln(w, "no aceptado")
return
}
}
}
}
func Logging() Middleware {
return func(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
defer func() {
log.Println(r.URL.Path, time.Since(start))
}()
f(w, r)
}
}
}
|
package main
import (
//"fmt"
"strconv"
"testing"
"net/http"
"github.com/ant0ine/go-json-rest/rest/test"
"github.com/bcolucci/moocapic-rating/rating"
)
var handler http.Handler
func Setup() {
conf := rating.DevConf()
api = rating.NewApi(conf)
handler = api.MakeHandler()
api.Database.DropDatabase()
}
func AddAuth(r *http.Request) {
ts := rating.CurrentTimeStr()
key := string(api.PKeyMiddleware.BuildKey(api.Conf.ApiKey, ts))
r.Header.Set("ts", ts)
r.Header.Set("key", key)
}
func CreateRating() *rating.Rating {
return &rating.Rating{
Tenant: "MSPI",
Category: "Products",
ItemId: "someProductId",
Rating: 3,
RatingOn: 5}
}
func TestNoAuth(t *testing.T) {
Setup()
defer api.Session.Close()
r := test.RunRequest(t, handler, test.MakeSimpleRequest("GET", api.Conf.Host.Addr, nil))
r.CodeIs(500)
r.ContentTypeIsJson()
}
func TestInvalidAuth(t *testing.T) {
Setup()
defer api.Session.Close()
req := test.MakeSimpleRequest("GET", api.Conf.Host.Addr, nil)
AddAuth(req)
req.Header.Set("ts", strconv.FormatInt(rating.CurrentTime() + 1, 10))
r := test.RunRequest(t, handler, req)
r.CodeIs(500)
r.ContentTypeIsJson()
}
func TestGetAllEmpty(t *testing.T) {
Setup()
defer api.Session.Close()
req := test.MakeSimpleRequest("GET", api.Conf.Host.Addr, nil)
AddAuth(req)
r := test.RunRequest(t, handler, req)
r.CodeIs(200)
r.ContentTypeIsJson()
r.BodyIs("[]")
}
func TestSave(t *testing.T) {
Setup()
defer api.Session.Close()
rating := CreateRating()
req := test.MakeSimpleRequest("POST", api.Conf.Host.Addr, rating)
AddAuth(req)
r := test.RunRequest(t, handler, req)
r.CodeIs(200)
r.ContentTypeIsJson()
}
func TestGetAll(t *testing.T) {
Setup()
defer api.Session.Close()
// save one
rating := CreateRating()
req := test.MakeSimpleRequest("POST", api.Conf.Host.Addr, rating)
AddAuth(req)
r := test.RunRequest(t, handler, req)
r.CodeIs(200)
// get all
req = test.MakeSimpleRequest("GET", api.Conf.Host.Addr, nil)
AddAuth(req)
r = test.RunRequest(t, handler, req)
r.CodeIs(200)
r.ContentTypeIsJson()
//TODO compare returned Rating
//ratings := []Rating{}
//r.DecodeJsonPayload(ratings)
//fmt.Println(ratings)
} |
package unifi
import (
"bytes"
"encoding/json"
"net/http"
)
// SiteRougeAccessPoint defines a rouge/neighboring access point data
type SiteRougeAccessPoint struct {
ID string `json:"_id"`
Age int `json:"age"`
AccessPointMAC string `json:"ap_mac"`
Band string `json:"band"`
BSSID string `json:"bssid"`
BW int `json:"bw"`
CenterFrequency int `json:"center_freq"`
Channel int `json:"channel"`
ESSID string `json:"essid"`
Frequency int `json:"freq"`
IsAdHoc bool `json:"is_adhoc"`
IsRogue bool `json:"is_rogue"`
IsUbnt bool `json:"is_ubnt"`
LastSeen interface{} `json:"last_seen"`
Noise int `json:"noise"`
OUI string `json:"oui"`
Radio string `json:"radio"`
RadioName string `json:"radio_name"`
ReportTime int64 `json:"report_time"`
RSSI int `json:"rssi"`
RSSIAge int `json:"rssi_age"`
Security string `json:"security"`
Signal int `json:"signal"`
SiteID string `json:"site_id"`
}
// SiteRougeAccessPointResponse contains rouge access point response data
type SiteRougeAccessPointResponse struct {
Meta CommonMeta `json:"meta"`
Data []SiteRougeAccessPoint `json:"data"`
}
// SiteRougeAccessPoints will list rouge/neighboring access points
// site - site to query
// withinHours - search within the last defined hours, defaults to 24 hours
func (c *Client) SiteRougeAccessPoints(site string, seenWithinHours int) (*SiteRougeAccessPointResponse, error) {
if seenWithinHours < 0 {
seenWithinHours = 24
}
payload := map[string]interface{}{
"within": seenWithinHours,
}
data, _ := json.Marshal(payload)
var resp SiteRougeAccessPointResponse
err := c.doSiteRequest(http.MethodGet, site, "stat/rogueap", bytes.NewReader(data), &resp)
return &resp, err
}
// SiteRougeKnownAccessPoints will list known rouge access points
// site - site to query
func (c *Client) SiteRougeKnownAccessPoints(site string) (*SiteRougeAccessPointResponse, error) {
var resp SiteRougeAccessPointResponse
err := c.doSiteRequest(http.MethodGet, site, "rest/rougeknown", nil, &resp)
return &resp, err
}
|
// Copyright 2020 Comcast Cable Communications Management, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plugin
import (
"context"
"fmt"
"github.com/xmidt-org/ears/pkg/event"
"github.com/xmidt-org/ears/pkg/tenant"
"sync"
pkgmanager "github.com/xmidt-org/ears/pkg/plugin/manager"
pkgreceiver "github.com/xmidt-org/ears/pkg/receiver"
)
var _ pkgreceiver.Receiver = (*receiver)(nil)
type receiver struct {
sync.Mutex
id string
name string
plugin string
hash string
tid tenant.Id
next pkgreceiver.NextFn
manager *manager
active bool
receiver pkgreceiver.Receiver
done chan struct{}
}
func (r *receiver) Config() interface{} {
return r.receiver.Config()
}
func (r *receiver) Name() string {
return r.name
}
func (r *receiver) Plugin() string {
return r.plugin
}
func (r *receiver) Tenant() tenant.Id {
return r.tid
}
func (r *receiver) EventSuccessCount() int {
return r.receiver.EventSuccessCount()
}
func (r *receiver) EventSuccessVelocity() int {
return r.receiver.EventSuccessVelocity()
}
func (r *receiver) EventErrorCount() int {
return r.receiver.EventErrorCount()
}
func (r *receiver) EventErrorVelocity() int {
return r.receiver.EventErrorVelocity()
}
func (r *receiver) EventTs() int64 {
return r.receiver.EventTs()
}
func (r *receiver) LogSuccess() {
r.receiver.LogSuccess()
}
func (r *receiver) Trigger(e event.Event) {
//r.receiver.Trigger(e)
r.Lock()
next := r.next
r.Unlock()
if next != nil {
next(e)
}
}
func (r *receiver) Receive(next pkgreceiver.NextFn) error {
if r == nil {
return &pkgmanager.NilPluginError{}
}
if next == nil {
return &pkgreceiver.InvalidConfigError{
Err: fmt.Errorf("next cannot be nil"),
}
}
{
r.Lock()
if !r.active {
r.Unlock()
return &NotRegisteredError{}
}
r.Unlock()
}
r.next = next
// Block
return r.manager.receive(r, next)
}
func (r *receiver) StopReceiving(ctx context.Context) error {
if r == nil {
return &pkgmanager.NilPluginError{}
}
{
r.Lock()
if !r.active {
r.Unlock()
return &NotRegisteredError{}
}
r.Unlock()
}
return r.manager.stopReceiving(ctx, r)
}
func (r *receiver) Unregister(ctx context.Context) error {
if r == nil {
return &pkgmanager.NilPluginError{}
}
{
r.Lock()
if r.manager == nil || !r.active {
r.Unlock()
return &NotRegisteredError{}
}
r.Unlock()
}
return r.manager.UnregisterReceiver(ctx, r)
}
|
package query
import (
"github.com/keptn-contrib/dynatrace-service/internal/sli/unit"
"testing"
)
func TestScaleData(t *testing.T) {
if unit.ScaleData("", "MicroSecond", 1000000.0) != 1000.0 {
t.Errorf("ScaleData incorrectly scales MicroSecond")
}
if unit.ScaleData("", "Byte", 1024.0) != 1.0 {
t.Errorf("ScaleData incorrectly scales Bytes")
}
if unit.ScaleData("builtin:service.response.time", "", 1000000.0) != 1000.0 {
t.Errorf("ScaleData incorrectly scales builtin:service.response.time")
}
}
|
package testdefinition
import (
"fmt"
"path"
argov1 "github.com/argoproj/argo/v2/pkg/apis/workflow/v1alpha1"
apiv1 "k8s.io/api/core/v1"
tmv1beta1 "github.com/gardener/test-infra/pkg/apis/testmachinery/v1beta1"
"github.com/gardener/test-infra/pkg/testmachinery"
"github.com/gardener/test-infra/pkg/testmachinery/config"
)
// TestDefinition represents a TestDefinition which was fetched from locations.
type TestDefinition struct {
Info *tmv1beta1.TestDefinition
Location Location
FileName string
Template *argov1.Template
Volumes []apiv1.Volume
inputArtifacts ArtifactSet
outputArtifacts ArtifactSet
config config.Set
}
// Location is an interface for different testDefLocation types like git or local
type Location interface {
// SetTestDefs adds Testdefinitions to the map.
SetTestDefs(map[string]*TestDefinition) error
// Type returns the tmv1beta1.LocationType type.
Type() tmv1beta1.LocationType
// Name returns the unique name of the location.
Name() string
// GetLocation returns the original TestLocation object
GetLocation() *tmv1beta1.TestLocation
// GitInfo returns the current git information
GitInfo() GitInfo
}
// GitInfo describes additional information about the used sources.
type GitInfo struct {
SHA string
Ref string
}
// GetStdInputArtifacts returns the default input artifacts of testdefionitions.
// The artifacts include kubeconfigs and shared folder inputs
func GetStdInputArtifacts() []argov1.Artifact {
return []argov1.Artifact{
{
Name: testmachinery.ArtifactKubeconfigs,
Path: testmachinery.TM_KUBECONFIG_PATH,
Optional: true,
},
{
Name: testmachinery.ArtifactSharedFolder,
Path: testmachinery.TM_SHARED_PATH,
Optional: true,
},
}
}
// GetUntrustedInputArtifacts returns the untrusted input artifacts of testdefionitions.
// The artifacts only include minimal configuration
func GetUntrustedInputArtifacts() []argov1.Artifact {
return []argov1.Artifact{
{
Name: testmachinery.ArtifactUntrustedKubeconfigs,
Path: path.Join(testmachinery.TM_KUBECONFIG_PATH, tmv1beta1.ShootKubeconfigName),
Optional: true,
},
}
}
// GetStdOutputArtifacts returns the default output artifacts of a step.
// These artifacts include kubeconfigs and the shared folder.
func GetStdOutputArtifacts(global bool) []argov1.Artifact {
kubeconfigArtifact := argov1.Artifact{
Name: testmachinery.ArtifactKubeconfigs,
Path: testmachinery.TM_KUBECONFIG_PATH,
Optional: true,
}
untrustedKubeconfigArtifact := argov1.Artifact{
Name: testmachinery.ArtifactUntrustedKubeconfigs,
Path: path.Join(testmachinery.TM_KUBECONFIG_PATH, tmv1beta1.ShootKubeconfigName),
Optional: true,
}
sharedFolderArtifact := argov1.Artifact{
Name: testmachinery.ArtifactSharedFolder,
Path: testmachinery.TM_SHARED_PATH,
Optional: true,
}
if global {
kubeconfigArtifact.GlobalName = kubeconfigArtifact.Name
kubeconfigArtifact.Name = fmt.Sprintf("%s-global", kubeconfigArtifact.Name)
untrustedKubeconfigArtifact.GlobalName = untrustedKubeconfigArtifact.Name
untrustedKubeconfigArtifact.Name = fmt.Sprintf("%s-global", untrustedKubeconfigArtifact.Name)
sharedFolderArtifact.GlobalName = sharedFolderArtifact.Name
sharedFolderArtifact.Name = fmt.Sprintf("%s-global", sharedFolderArtifact.Name)
}
return []argov1.Artifact{kubeconfigArtifact, untrustedKubeconfigArtifact, sharedFolderArtifact}
}
|
package courses
import (
"bufio"
"fmt"
"io"
"log"
"net/http"
"os"
"strconv"
"strings"
"time"
)
// Course - main struct for course info (№, name, url)
type Course struct {
CourseNum int
CourseName string
CourseURL string
}
func CreateFolderForCourses() {
os.Mkdir("./lessons", 0777)
}
// Download - download video from given courseVide type
func (course *Course) Download() {
// Create the file
path := "./lessons/" + course.CourseName + ".mp4"
filename := path // ex 1) Learning Terminal.mp4
out, err := os.Create(filename)
if err != nil {
log.Println("Cannot create file", err)
}
defer out.Close()
// Get size of file
headResp, _ := http.Head(course.CourseURL)
defer headResp.Body.Close()
size, _ := strconv.Atoi(headResp.Header.Get("Content-length"))
fmt.Println("Size of", course.CourseName, "is", float64(size)/1000000, "mb")
fmt.Println("Download", course.CourseName)
start := time.Now()
done := make(chan int64)
go PrintDownloadPercent(done, path, int64(size))
// Get the data
resp, err := http.Get(course.CourseURL)
if err != nil {
log.Println("Cannot get file", err)
return
}
defer resp.Body.Close()
// Write the body to the file
n, err := io.Copy(out, resp.Body)
if err != nil {
log.Println("Cannot copy body of a response to a file", err)
}
done <- n
elapsed := time.Since(start)
log.Printf("Download completed in %s", elapsed)
}
func PrintDownloadPercent(done chan int64, path string, total int64) {
var stop bool = false
for {
select {
case <-done:
stop = true
default:
file, err := os.Open(path)
if err != nil {
log.Fatal(err)
}
fi, err := file.Stat()
if err != nil {
log.Fatal(err)
}
size := fi.Size()
if size == 0 {
size = 1
}
var percent = float64(size) / float64(total) * 100
fmt.Printf("%.0f", percent)
fmt.Print("=>")
}
if stop {
break
}
time.Sleep(time.Second)
}
}
func AskForDownload(courses []Course) []int {
var cNumsForDownload []int
answer := getInput("Download all " + strconv.Itoa(len(courses)) + " courses (Y/N)")
allCourses := checkAnswer(answer)
if allCourses {
for _, v := range courses {
cNumsForDownload = append(cNumsForDownload, v.CourseNum)
}
fmt.Println("all courses gonna be downloaded")
return cNumsForDownload
} else {
a := getInput("What courses u wannna to download", "ex. --> 2 + 4-15 + 17-21")
args := parseInputForNumbers(a)
positions := getCoursesPosition(args)
fmt.Println("Courses with th-is positions gonna be downloaded: ", positions)
return positions
}
}
func getCoursesPosition(args []string) []int {
positions := make([]int, 0)
for _, v := range args {
if strings.Contains(v, "-") {
ns := parseRange(string(v))
for _, n := range ns {
positions = append(positions, n)
}
} else {
n, _ := strconv.Atoi(v)
positions = append(positions, n)
}
}
return positions
}
func parseRange(argRange string) []int {
ns := make([]int, 0)
var arg string
args := make([]string, 0)
for i, v := range argRange {
if i+1 == len(argRange) {
arg += string(v)
args = append(args, arg)
} else if v == '-' {
args = append(args, arg)
arg = ""
} else {
arg += string(v)
}
}
start, err := strconv.Atoi(args[0])
end, err := strconv.Atoi(args[1])
if err != nil {
log.Println("[ERROR] Conversion", err)
}
for i := start; i <= end; i++ {
ns = append(ns, i)
}
return ns
}
func removeSpaceFromArgs(args []string) []string {
resp := make([]string, 0)
for _, arg := range args {
s := strings.Trim(arg, " ")
resp = append(resp, s)
}
return resp
}
// parseInputForNumbers - get array of arguments, (one or range *with dash*)
func parseInputForNumbers(input string) []string {
allInputs := make([]string, 0)
var arg string
for i, letter := range input {
if i+1 == len(input) {
allInputs = append(allInputs, arg)
} else if letter != '+' { // for input delimetr is + sign
arg += string(letter)
} else {
allInputs = append(allInputs, arg)
arg = ""
}
// fmt.Println("[LETTER]", string(letter))
}
resp := removeSpaceFromArgs(allInputs)
return resp
}
func getInput(prompt ...string) string {
for _, v := range prompt {
fmt.Println(v)
}
r := bufio.NewReader(os.Stdin)
line, err := r.ReadString('\n')
if err != nil {
log.Println("[ERROR] scanning", err)
}
return line
}
func checkAnswer(a string) bool {
if trimAndLow(a) == "y" || trimAndLow(a) == "yes" {
return true
}
return false
}
func trimAndLow(a string) string {
return strings.ToLower(strings.TrimRight(a, "\n"))
}
|
package db
import (
"database/sql"
_ "github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
"github.com/ocoscope/face/utils"
)
const (
CREATE_USER = `
INSERT INTO users
(email, password, first_name, last_name, patronymic_name, number, position, photo, face, access_token, role_id)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
`
UPDATE_ACCESS_TOKEN = "UPDATE users SET access_token = ? WHERE id = ?"
USER_FACE_TOKEN_UPDATE = "UPDATE users SET access_token = ?, face = ? WHERE id = ?"
USER_PASSWORD_UPDATE = "UPDATE users SET access_token = ?, password = ? WHERE id = ?"
USER_DEPARTMENT_UPDATE = "UPDATE users SET department_id = ? WHERE id = ?"
UPDATE_USER = `
UPDATE users SET
first_name = ?, last_name = ?, patronymic_name = ?,
number = ?, position = ? WHERE id = ?
`
UPDATE_USER_ROLE = "UPDATE users SET role_id = ? WHERE id = ?"
UPDATE_USER_FACE = "UPDATE users SET face = ? WHERE id = ?"
UPDATE_USER_PHOTO = "UPDATE users SET photo = ? WHERE id = ?"
CHECK_AUTH_USER = "SELECT id FROM users WHERE id = ? AND access_token = ?"
SEARCH_BY_PHOTO = "SELECT id FROM users WHERE face = ?"
GET_USER_DATA_BY_EMAIL = "SELECT id, password FROM users WHERE email = ?"
GET_USER_DATA_BY_USER_ID = "SELECT id, password FROM users WHERE id = ?"
GET_USER_BY_EMAIL = "SELECT id, access_token FROM users WHERE email = ?"
GET_USER_NAMES = "SELECT id, first_name, last_name, patronymic_name FROM users WHERE id = ?"
GET_USER = `
SELECT email, first_name, last_name, patronymic_name, number, position, photo
FROM users WHERE id = ?
`
GET_USERS = `
SELECT
id, email, first_name, last_name, patronymic_name, number, position, face, role_id
FROM users
`
GET_USERS_NAMES = `
SELECT
id, first_name, last_name, patronymic_name
FROM users
WHERE CHAR_LENGTH(face) > 0
`
SELECT_USERS_IDS = "SELECT id FROM users WHERE CHAR_LENGTH(photo) > 0"
GET_USER_ROLE_ID = "SELECT role_id FROM users WHERE id = ?"
GET_USER_DATA = "SELECT company_id FROM users WHERE photo = ?"
GET_USER_FACE = "SELECT face FROM users WHERE id = ?"
GET_USER_EMAIL = "SELECT email FROM users WHERE id = ?"
GET_ACCESS_TOKEN = "SELECT access_token FROM users WHERE id = ?"
DELETE_USER = "DELETE FROM users WHERE id = ?"
)
type TUser struct {
RoleID int64
Email, Password, FirstName, LastName, PatronymicName, Number, Position, Photo, Face, AccessToken string
}
type TGetUser struct {
Email string `db:"email"`
FirstName string `db:"first_name"`
LastName string `db:"last_name"`
PatronymicName string `db:"patronymic_name"`
Position string `db:"position"`
Photo string `db:"photo"`
Number string `db:"number"`
}
type TSelectUsers struct {
UserID int64 `db:"id"`
Email string `db:"email"`
FirstName string `db:"first_name"`
LastName string `db:"last_name"`
PatronymicName string `db:"patronymic_name"`
Position string `db:"position"`
Face string `db:"face"`
Number string `db:"number"`
RoleID int64 `db:"role_id"`
}
type TGetUserNames struct {
UserID int64 `db:"id"`
FirstName string `db:"first_name"`
LastName string `db:"last_name"`
PatronymicName string `db:"patronymic_name"`
}
type TUpdateUser struct {
UserID int64
Email, FirstName, LastName, PatronymicName, Position, Number string
}
type TUserData struct {
UserID int64 `db:"id"`
Password string `db:"password"`
}
type TGetUserData struct {
UserID int64 `db:"id"`
Password string `db:"password"`
}
type TUserID struct {
UserID int64 `db:"id"`
}
type TUserFace struct {
Face string `db:"face"`
}
type TUserPhoto struct {
Photo string `db:"photo"`
}
type TUserEmail struct {
Email string `db:"email"`
}
type TUserRoleID struct {
RoleID int64 `db:"role_id"`
}
type TGetUserByEmail struct {
UserID int64 `db:"id"`
AccessToken string `db:"access_token"`
}
type TInvitedUserUpdate struct {
UserID int64
AccessToken, Face string
}
type TUserPasswordUpdate struct {
UserID int64
AccessToken, Password string
}
type TUserDataCheck struct {
Email, Password string
}
type TUserTokensCheck struct {
UserID int64
AccessToken, RefreshToken string
}
func CreateUser(database *sqlx.DB, user TUser) (int64, error) {
result, err := database.Exec(
CREATE_USER,
user.Email,
user.Password,
user.FirstName,
user.LastName,
user.PatronymicName,
user.Number,
user.Position,
user.Photo,
user.Face,
user.AccessToken,
user.RoleID,
)
if err != nil {
return 0, err
}
userID, err := result.LastInsertId()
return userID, err
}
func GetUser(database *sqlx.DB, UserID int64) (TGetUser, error) {
var user = TGetUser{}
err := database.Get(&user, GET_USER, UserID)
return user, err
}
func SelectUsers(database *sqlx.DB) ([]TSelectUsers, error) {
var users = []TSelectUsers{}
err := database.Select(&users, GET_USERS)
return users, err
}
func GetUsersNames(database *sqlx.DB) ([]TGetUserNames, error) {
var usersNames = []TGetUserNames{}
err := database.Select(&usersNames, GET_USERS_NAMES)
return usersNames, err
}
func GetUserNames(database *sqlx.DB, userID int64) (TGetUserNames, error) {
var userNames TGetUserNames
err := database.Get(&userNames, GET_USER_NAMES, userID)
return userNames, err
}
func UpdateUser(database *sqlx.DB, user TUpdateUser) (sql.Result, error) {
result, err := database.Exec(UPDATE_USER, user.FirstName, user.LastName, user.PatronymicName, user.Number, user.Position, user.UserID)
return result, err
}
func GetPasswordByEmail(database *sqlx.DB, email string) (TUserData, error) {
var userData TUserData
err := database.Get(&userData, GET_USER_DATA_BY_EMAIL, email)
return userData, err
}
func GetPasswordByUserID(database *sqlx.DB, userID uint) (string, error) {
var userData TUserData
err := database.Get(&userData, GET_USER_DATA_BY_USER_ID, userID)
return userData.Password, err
}
func UpdateAccessToken(conn *sqlx.DB, ID int64, accessToken string) error {
_, err := conn.Exec(UPDATE_ACCESS_TOKEN, accessToken, ID)
return err
}
func UpdateAccessTokenById(conn *sqlx.DB, userID int64) (string, error) {
idString := utils.IntToStr(userID)
accessToken, _ := utils.AccessTokenGenerate(idString)
err := UpdateAccessToken(conn, userID, accessToken)
return accessToken, err
}
func UpdateUserRole(conn *sqlx.DB, roleID int64, userID int64) error {
_, err := conn.Exec(UPDATE_USER_ROLE, roleID, userID)
return err
}
func CheckUserAccessToken(database *sqlx.DB, userID int64, token string) error {
var user TUserID
err := database.Get(&user, CHECK_AUTH_USER, userID, token)
return err
}
func DeleteUser(database *sqlx.DB, id int64) error {
_, err := database.Exec(DELETE_USER, id)
return err
}
func GetUserRoleID(database *sqlx.DB, userID int64) (int64, error) {
var UserRoleID TUserRoleID
err := database.Get(&UserRoleID, GET_USER_ROLE_ID, userID)
return UserRoleID.RoleID, err
}
func UserDepartmentUpdate(departmentID int64, userID int64) error {
db, err := Connect()
if err != nil {
return err
}
defer db.Close()
_, err = db.Exec(USER_DEPARTMENT_UPDATE, departmentID, userID)
return err
}
func UserFaceTokenUpdate(database *sqlx.DB, user TInvitedUserUpdate) error {
_, err := database.Exec(USER_FACE_TOKEN_UPDATE, user.AccessToken, user.Face, user.UserID)
return err
}
func UserFaceUpdate(database *sqlx.DB, newFaceID string, userID int64) error {
_, err := database.Exec(UPDATE_USER_FACE, newFaceID, userID)
return err
}
func UserPasswordUpdate(database *sqlx.DB, user TUserPasswordUpdate) error {
_, err := database.Exec(USER_PASSWORD_UPDATE, user.AccessToken, user.Password, user.UserID)
return err
}
func UserPhotoUpdate(database *sqlx.DB, userID int64, photo string) error {
_, err := database.Exec(UPDATE_USER_PHOTO, photo, userID)
return err
}
func SearchByPhoto(database *sqlx.DB, faceID string) (int64, error) {
var user TUserID
err := database.Get(&user, SEARCH_BY_PHOTO, faceID)
return user.UserID, err
}
func GetUserFace(database *sqlx.DB, userID int64) (string, error) {
var user TUserFace
err := database.Get(&user, GET_USER_FACE, userID)
return user.Face, err
}
func GetUserEmail(database *sqlx.DB, userID int64) (string, error) {
var user TUserEmail
err := database.Get(&user, GET_USER_EMAIL, userID)
return user.Email, err
}
func GetUserByEmail(database *sqlx.DB, email string) (TGetUserByEmail, error) {
var user TGetUserByEmail
err := database.Get(&user, GET_USER_BY_EMAIL, email)
return user, err
}
func GetAccessToken(database *sqlx.DB, userID int64) (string, error) {
type accessToken struct {
AccessToken string `db:"access_token"`
}
var user accessToken
err := database.Get(&user, GET_ACCESS_TOKEN, userID)
return user.AccessToken, err
}
func SelectUsersIDs(database *sqlx.DB) ([]TUserID, error) {
var user = []TUserID{}
err := database.Select(&user, SELECT_USERS_IDS)
return user, err
}
|
package main
import (
"fmt"
"github.com/gin-gonic/gin"
"github.com/smartwalle/alipay/v3"
"github.com/smartwalle/xid"
"log"
"net/http"
)
var aliClient *alipay.Client
const (
kAppId = "2016073100129537"
kPrivateKey = "MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC4UOTKtDstRrjyNPvek9eGqv1RYDmHtLw7olq4lBtqF/D+BfrPd04rMYoivqO5r+v1DQNUGs2yC48eQ4eWb0TSjl/kUy2jHzDcUGiZrGhgxw3e/TJ9w7ix6TrixVg5n542oC1zWstOl75gkiL7EkDfEKo/fZUbLt8aHgBW87NYE19obfAOxGn+YsH5Hpkl3GGby37Hq/mcX4tcWDYN55JPaAcSyjyeyl/uMA19StmQEuU992jqqYWd5Y3z58DotiD4dVAtjB5VogmCkLUcalvUB3N/z3y67GQT+gtOi42VfxnW/JoaXKurHK9Ukmw1GPu97iZAF3Sy19+mOdeR8I+RAgMBAAECggEAAXlEGwrN3lLOb8FUsjbkZkM/u0LVsuwTBTcLGqa0gWinmKBbnQULLvU6cYSssnNho5fzCt0b/+xvvII1t1I0bqqMwbqYhtFdBqXt8CycuQleZwYHPVIvS9zdh6qkRfGsxisJsf5r2bkE4KjKds9yjVYIxnEunAUH66GJxygzquSZQPxgYB3ASRkTzRRowe3ROqaGLF2ejEcvcASKAiIEaQ6Az0sMDtIUAcqFcN1mp9TQ+UyOgNsTw8qWHKv6z6XwjSfYcVfNhR5bcsf+3zr8CKK7cD6f6cjtIrrDSeDdbjdqXTMEYEOV4qs2PH6mNJwx2V1mAZKNZ3bIRGSiKme+sQKBgQD5UDqLBySXtqsCpMC31SvuHGOlkwEQAKslytJvE1kerh4VV9DCIyRsV9v57GZSc8HGo/AQq7dJAa3X9mumZ4vL8RIrtMkuU+scwT+AzPx8jeibNKLq1GI4GaPmdHJaJsBGtA2VUphWCw1HvHegJGgPUoymfpOc8iKwMdQZO3ZHxwKBgQC9QmWJ6zoBvkIOZdoNHpOMrHkIphzJagC/2dbdH6x58fEX787Nmd8yL3mU189EENmFJAdw9d5kPzi8Nxa84oIsbsQSTaYuF0VN6Kw+dZstoJ3U4pf5ReKjWiRNk6waeg6pzEYwH7mDbLcGBw+0+gW08KJLfsyl6aejJ75i9cnd5wKBgHa7UZYiabfi46BXq/wghlJYrNAOqWPgnaFa7Uq+0SN+Uo9hieba6565XOayQaykujUKn+qgjKI1LYB7N5tBFt+iSEAOUf1BM+g21DJX7Sq4Pn2j3K6vRLNo6ph2/nqWl91UJF/nvOrFSqbOR745eGFLs/Yas9v7qK92m4cEvXjDAoGAG2cOrp55YqE6jT0gCkBAGuEqER+EEYGgpCaVXqTkYy+tucqGBezejTSkhPGOWAucgxOJZEilL5ybyVyslSKyuF49U20cv5Ws+i/TKKP8mOmlkJpSaMw+mWpG0VitVZQQpXMnQnaFdMr74QqKsqh0xRMGXKn6VZd0J0Js5YUy+kcCgYEA5dxAzYHV2yH2/b/Uau99VUtp+xE3BssiEO5CiVLEvzLLQlqcqtFO3ptTMqfTeqe98iiYcr5EMSqLHte1qQeGziyKzPvMHkjrTPMDccVur0L2fcE+WBy2fNdBDzQQ5k3ra27/i5NcRzNAhoBXBNVRwnrLXpaUEjy7ERCoR6y1XkE="
kServerPort = "9989"
kServerDomain = "http://127.0.0.1" + ":" + kServerPort
)
func main() {
var err error
if aliClient, err = alipay.New(kAppId, kPrivateKey, false); err != nil {
log.Println("初始化支付宝失败", err)
return
}
// 使用支付宝证书
if err = aliClient.LoadAppPublicCertFromFile("appCertPublicKey_2016073100129537.crt"); err != nil {
log.Println("加载证书发生错误", err)
return
}
if err = aliClient.LoadAliPayRootCertFromFile("alipayRootCert.crt"); err != nil {
log.Println("加载证书发生错误", err)
return
}
if err = aliClient.LoadAliPayPublicCertFromFile("alipayCertPublicKey_RSA2.crt"); err != nil {
log.Println("加载证书发生错误", err)
return
}
var s = gin.Default()
s.GET("/alipay", pay)
s.GET("/callback", callback)
s.POST("/notify", notify)
s.Run(":" + kServerPort)
}
func pay(c *gin.Context) {
var tradeNo = fmt.Sprintf("%d", xid.Next())
var p = alipay.TradePagePay{}
p.NotifyURL = kServerDomain + "/notify"
p.ReturnURL = kServerDomain + "/callback"
p.Subject = "支付测试:" + tradeNo
p.OutTradeNo = tradeNo
p.TotalAmount = "10.00"
p.ProductCode = "FAST_INSTANT_TRADE_PAY"
url, _ := aliClient.TradePagePay(p)
c.Redirect(http.StatusTemporaryRedirect, url.String())
}
func callback(c *gin.Context) {
c.Request.ParseForm()
//ok, err := aliClient.VerifySign(c.Request.Form)
//if err != nil {
// log.Println("回调验证签名发生错误", err)
// return
//}
//
//if ok == false {
// log.Println("回调验证签名未通过")
// return
//}
var outTradeNo = c.Request.Form.Get("out_trade_no")
var p = alipay.TradeQuery{}
p.OutTradeNo = outTradeNo
rsp, err := aliClient.TradeQuery(p)
if err != nil {
c.String(http.StatusBadRequest, "验证订单 %s 信息发生错误: %s", outTradeNo, err.Error())
return
}
if rsp.IsSuccess() == false {
c.String(http.StatusBadRequest, "验证订单 %s 信息发生错误: %s-%s", outTradeNo, rsp.Content.Msg, rsp.Content.SubMsg)
return
}
c.String(http.StatusOK, "订单 %s 支付成功", outTradeNo)
}
func notify(c *gin.Context) {
c.Request.ParseForm()
ok, err := aliClient.VerifySign(c.Request.Form)
if err != nil {
log.Println("异步通知验证签名发生错误", err)
return
}
if ok == false {
log.Println("异步通知验证签名未通过")
return
}
log.Println("异步通知验证签名通过")
var outTradeNo = c.Request.Form.Get("out_trade_no")
var p = alipay.TradeQuery{}
p.OutTradeNo = outTradeNo
rsp, err := aliClient.TradeQuery(p)
if err != nil {
log.Printf("异步通知验证订单 %s 信息发生错误: %s \n", outTradeNo, err.Error())
return
}
if rsp.IsSuccess() == false {
log.Printf("异步通知验证订单 %s 信息发生错误: %s-%s \n", outTradeNo, rsp.Content.Msg, rsp.Content.SubMsg)
return
}
log.Printf("订单 %s 支付成功 \n", outTradeNo)
}
|
package collections
import "time"
type KeyObj struct {
Key string
lru time.Time
}
func NewKeyObj(key string) *KeyObj {
return &KeyObj{
Key: key,
lru: time.Now(),
}
}
func (k *KeyObj) IdleTime() time.Duration {
return time.Now().Sub(k.lru)
}
func (k *KeyObj) UpdateTime() {
k.lru = time.Now()
}
func (k *KeyObj) LruTime() time.Time {
return k.lru
}
|
package main
// Leetcode 172. (easy)
func trailingZeroes(n int) int {
count := 0
for n >= 5 {
count += n/5
n /= 5
}
return count
}
|
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"github.com/uber/kraken/lib/dockerregistry"
"github.com/uber/kraken/lib/store"
"github.com/uber/kraken/lib/upstream"
"github.com/uber/kraken/metrics"
"github.com/uber/kraken/nginx"
"github.com/uber/kraken/proxy/registryoverride"
"github.com/uber/kraken/utils/httputil"
"go.uber.org/zap"
)
// Config defines proxy configuration
type Config struct {
CAStore store.CAStoreConfig `yaml:"castore"`
Registry dockerregistry.Config `yaml:"registry"`
BuildIndex upstream.ActiveConfig `yaml:"build_index"`
Origin upstream.ActiveConfig `yaml:"origin"`
ZapLogging zap.Config `yaml:"zap"`
Metrics metrics.Config `yaml:"metrics"`
RegistryOverride registryoverride.Config `yaml:"registryoverride"`
Nginx nginx.Config `yaml:"nginx"`
TLS httputil.TLSConfig `yaml:"tls"`
}
|
package wire
type RepoAccessQuery struct {
User string
Path string
}
type RepoAccessInfo struct {
Path string
Push bool
}
type CreateRepo struct {
Name string
Description string
Public bool
}
// Repo is used to export basic information about a repository.
// Public states whether a repository is publicly available.
// Shared states whether a repository is shared with a collaborator.
type Repo struct {
Name string
Owner string
Description string
Head string
Public bool
Shared bool
}
type Branch struct {
Name string
Commit string
}
type GitHook struct {
Name string `json:"name"`
HookArgs []string `json:"hookargs,omitempty"`
RepoPath string `json:"repopath"`
RefLines []RefLine `json:"ref_lines,omitempty"`
}
type RefLine struct {
OldRef string `json:"oldref"`
NewRef string `json:"newref"`
RefName string `json:"refname"`
}
// CommitSummary represents a subset of information from a git commit.
type CommitSummary struct {
Commit string `json:"commit"`
Committer string `json:"committer"`
Author string `json:"author"`
DateIso string `json:"dateiso"`
DateRelative string `json:"daterel"`
Subject string `json:"subject"`
Changes []string `json:"changes"`
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.