text stringlengths 11 4.05M |
|---|
package main
import (
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"time"
mqtt "github.com/eclipse/paho.mqtt.golang"
"github.com/jakebailey/irc"
flags "github.com/jessevdk/go-flags"
"github.com/joho/godotenv"
yaml "gopkg.in/yaml.v2"
)
const twitchIRC = "irc.chat.twitch.tv:6697"
var (
errEmptyNick = errors.New("empty nick")
errEmptyPass = errors.New("empty pass")
errNonOauthPass = errors.New("pass did not start with oauth")
errBadTopics = errors.New("pub and sub topics are the same or empty")
errBadQOS = errors.New("invalid QOS")
errChannelsNoTopic = errors.New("channels provided without publish topic")
errEmptyChannel = errors.New("empty channel name")
)
var args = struct {
MQTTBroker string `long:"mqtt-broker" env:"MQTT_BROKER" required:"true"`
ConfigPath string `long:"config" env:"CONFIG"`
Debug bool `long:"debug" env:"DEBUG" description:"enables debug logging"`
}{
ConfigPath: "config.yaml",
}
type Config struct {
Connections []*Connection
}
func main() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
if err := godotenv.Load(); err != nil {
if !os.IsNotExist(err) {
log.Fatal(err)
}
}
if _, err := flags.Parse(&args); err != nil {
os.Exit(1)
}
b, err := ioutil.ReadFile(args.ConfigPath)
if err != nil {
log.Fatal(err)
}
var config Config
if err := yaml.Unmarshal(b, &config); err != nil {
log.Fatal(err)
}
exit := false
for i, c := range config.Connections {
if err := c.validate(); err != nil {
log.Println(i, err)
exit = true
}
}
if exit {
os.Exit(1)
}
cOpts := mqtt.NewClientOptions()
cOpts.SetClientID(fmt.Sprintf("%d%d", time.Now().UnixNano(), rand.Intn(10)))
cOpts.SetCleanSession(false)
cOpts.AddBroker(args.MQTTBroker)
client := mqtt.NewClient(cOpts)
if t := client.Connect(); t.Wait() && t.Error() != nil {
log.Fatal(t.Error())
}
defer client.Disconnect(0)
stop := make(chan struct{})
wg := &sync.WaitGroup{}
wg.Add(len(config.Connections))
for _, c := range config.Connections {
go c.run(wg, stop, client)
}
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
<-c
close(stop)
wg.Wait()
}
type Connection struct {
Nick string
Pass string
Publish struct {
Topic string
QOS byte
Channels []string
}
Subscribe struct {
Topic string
QOS byte
}
}
func (c *Connection) validate() error {
if c.Nick == "" {
return errEmptyNick
}
if c.Pass == "" {
return errEmptyPass
}
if !strings.HasPrefix(c.Pass, "oauth:") {
return errNonOauthPass
}
if c.Publish.Topic == c.Subscribe.Topic {
return errBadTopics
}
if len(c.Publish.Channels) > 0 && c.Publish.Topic == "" {
return errChannelsNoTopic
}
if c.Publish.QOS > 2 || c.Subscribe.QOS > 2 {
return errBadQOS
}
for _, s := range c.Publish.Channels {
if s == "" {
return errEmptyChannel
}
}
return nil
}
func (c *Connection) run(wg *sync.WaitGroup, stop <-chan struct{}, client mqtt.Client) {
defer wg.Done()
var mu sync.Mutex
conn, err := createIRCConn(c.Nick, c.Pass)
if err != nil {
log.Println(err)
return
}
defer conn.Close()
if err := join(conn, c.Publish.Channels...); err != nil {
log.Fatal(err)
}
go func() {
<-stop
mu.Lock()
defer mu.Unlock()
if err := quit(conn); err != nil {
log.Fatal(err)
}
}()
if c.Subscribe.Topic != "" {
log.Printf("subscribing to %s at QOS %d", c.Subscribe.Topic, c.Subscribe.QOS)
if t := client.Subscribe(c.Subscribe.Topic, c.Subscribe.QOS, func(c mqtt.Client, mq mqtt.Message) {
var msg struct {
Channel string
Message string
}
if err := json.Unmarshal(mq.Payload(), &msg); err != nil {
log.Println(err)
return
}
if msg.Channel == "" {
log.Println("empty channel")
return
}
if msg.Channel[0] != '#' {
msg.Channel = "#" + msg.Channel
}
if msg.Message == "" {
log.Println("empty message")
return
}
m := &irc.Message{
Command: "PRIVMSG",
Params: []string{msg.Channel},
Trailing: msg.Message,
}
if args.Debug {
log.Println("<", m.String())
}
mu.Lock()
defer mu.Unlock()
if err := conn.Encode(m); err != nil {
log.Println(err)
}
}); t.Wait() && t.Error() != nil {
log.Fatal(t.Error())
}
}
if c.Publish.Topic != "" {
log.Printf("publishing to %s at QOS %d", c.Publish.Topic, c.Publish.QOS)
}
for {
var m irc.Message
if err := conn.Decode(&m); err != nil {
if err == io.EOF {
break
}
log.Fatal(err)
}
if args.Debug {
log.Println(">", m.Raw)
} else {
switch m.Command {
case "PRIVMSG", "NOTICE", "USERNOTICE", "PING", "CLEARCHAT", "HOSTTARGET":
// Do nothing.
default:
log.Println(">", m.Raw)
}
}
if m.Command == "PING" {
m.Command = "PONG"
if err := conn.Encode(&m); err != nil {
log.Println(err)
}
continue
}
if c.Publish.Topic != "" {
b, err := json.Marshal(m)
if err != nil {
log.Println(err)
continue
}
t := client.Publish(c.Publish.Topic, c.Publish.QOS, false, b)
if err := t.Error(); err != nil {
log.Println(err)
}
}
if m.Command == "RECONNECT" {
log.Println("server sent RECONNECT, restarting process")
time.Sleep(time.Second)
restartProcess()
}
}
}
func createIRCConn(nick, pass string) (irc.Conn, error) {
tconn, err := tls.Dial("tcp", twitchIRC, nil)
if err != nil {
return nil, err
}
conn := irc.NewBaseConn(tconn)
if err := login(conn, nick, pass); err != nil {
return nil, err
}
if err := capReq(conn,
"twitch.tv/tags",
"twitch.tv/commands",
); err != nil {
return nil, err
}
return conn, nil
}
func login(conn irc.Encoder, nick, pass string) error {
err := conn.Encode(&irc.Message{
Command: "PASS",
Params: []string{pass},
})
if err != nil {
return err
}
return conn.Encode(&irc.Message{
Command: "NICK",
Params: []string{nick},
})
}
func capReq(conn irc.Encoder, caps ...string) error {
if len(caps) == 0 {
return nil
}
return conn.Encode(&irc.Message{
Command: "CAP",
Params: []string{"REQ"},
Trailing: strings.Join(caps, " "),
})
}
func join(conn irc.Encoder, channels ...string) error {
if len(channels) == 0 {
return nil
}
for i, s := range channels {
if s[0] != '#' {
channels[i] = "#" + s
}
}
return conn.Encode(&irc.Message{
Command: "JOIN",
Params: []string{strings.Join(channels, ",")},
})
}
func quit(conn irc.Encoder) error {
return conn.Encode(&irc.Message{
Command: "QUIT",
})
}
var (
argv0 = os.Args[0]
argv = os.Args
envv = os.Environ()
)
func restartProcess() {
log.Fatal(syscall.Exec(argv0, argv, envv))
}
|
package day3
import (
ds "aoc/datastructures"
"aoc/util"
)
type rucksack struct {
original string
}
func (r *rucksack) comp1() string {
mid := len(r.original) / 2
return r.original[:mid]
}
func (r *rucksack) comp2() string {
mid := len(r.original) / 2
return r.original[mid:]
}
func newRucksack(input string) rucksack {
return rucksack{input}
}
func (r *rucksack) FindDupe() rune {
a := ds.NewSet(util.ConvertToRunes(r.comp1()))
b := ds.NewSet(util.ConvertToRunes(r.comp2()))
dupes := a.Intersection(b)
var dupe rune
for key := range dupes {
dupe = key
}
return dupe
}
var key = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
func Part1(input []string) int {
total := 0
for _, item := range input {
rs := newRucksack(item)
dupe := rs.FindDupe()
for i, char := range key {
if dupe == char {
total += i + 1
break
}
}
}
return total
}
func Part2(input []string) int {
total := 0
for i := 0; i < len(input); i += 3 {
elf1 := ds.NewSet(util.ConvertToRunes(input[i]))
elf2 := ds.NewSet(util.ConvertToRunes(input[i+1]))
elf3 := ds.NewSet(util.ConvertToRunes(input[i+2]))
dupes := elf1.Intersection(elf2).Intersection(elf3)
var dupe rune
for key := range dupes {
dupe = key
}
for i, char := range key {
if dupe == char {
total += i + 1
break
}
}
}
return total
}
|
package database
import (
"FPproject/Backend/log"
"FPproject/Backend/models"
"time"
"github.com/google/uuid"
)
func (d *Database) InsertFood(f models.Food) (string, error) {
id := uuid.New().String()
res, err := d.db.Exec("INSERT INTO food(id, merchant_id, name, price, status, description, imglink, calories, created, updated) VALUES(?,?,?,?,?,?,?,?,?)",
id, f.MerchantID, f.Name, f.Price, f.Status, f.Description, f.ImgLink, f.Calories, time.Now(), time.Now())
if err != nil {
log.Warning.Println(err)
return "", err
}
affected, err := res.RowsAffected()
if err != nil {
log.Warning.Println(err)
return "", err
} else if affected < 1 {
log.Warning.Println(ErrNoRowsAffected)
return "", ErrNoRowsAffected
}
return id, nil
}
func (d *Database) DelFood(id string) (string, error) {
res, err := d.db.Exec("DELETE FROM food WHERE id=?", id)
if err != nil {
log.Warning.Println(err)
return "", err
}
affected, err := res.RowsAffected()
if err != nil {
log.Warning.Println(err)
return "", err
} else if affected < 1 {
log.Warning.Println(ErrNoRowsAffected)
return "", ErrNoRowsAffected
}
return id, nil
}
func (d *Database) UpdateFood(f models.Food) (string, error) {
res, err := d.db.Exec("UPDATE food SET name=?, price=?, status=?, description=?, imglink=?, calories=?,updated=? WHERE id=?",
f.Name, f.Price, f.Status, f.Description, f.ImgLink, f.Calories, time.Now(), f.ID)
if err != nil {
log.Warning.Println(err)
return "", err
}
affected, err := res.RowsAffected()
if err != nil {
log.Warning.Println(err)
return "", err
} else if affected < 1 {
log.Warning.Println(ErrNoRowsAffected)
return "", ErrNoRowsAffected
}
return f.ID, nil
}
func (d *Database) GetFood(id string) (models.Food, error) {
var f models.Food
err := d.db.QueryRow("SELECT * FROM food WHERE id=?", id).Scan(&f.ID,
&f.MerchantID, &f.Name, &f.Price, &f.Status, &f.Description, &f.ImgLink, &f.Calories, &f.Created, &f.Updated)
if err != nil {
log.Warning.Println(err)
return f, err
}
return f, nil
}
func (d *Database) GetFoodByMerchant(id string) ([]models.Food, error) {
var food models.Food
var foods []models.Food
r, err := d.db.Query("SELECT * FROM food WHERE merchant_id=?", id)
if err != nil {
log.Warning.Println(err)
return nil, err
}
for r.Next() {
if err := r.Scan(&food.ID, &food.MerchantID, &food.Name, &food.Price, &food.Status, &food.Description, &food.ImgLink, &food.Calories, &food.Created, &food.Updated); err != nil {
log.Warning.Println(err)
return nil, err
}
foods = append(foods, food)
}
return foods, nil
}
|
package problem0215
import "testing"
func TestFindKthLargest(t *testing.T) {
t.Log(findKthLargest([]int{3, 2, 3, 1, 2, 4, 5, 5, 6}, 4))
}
|
package main
import (
"flag"
"fmt"
"os"
"os/signal"
"syscall"
"testing"
log "github.com/Sirupsen/logrus"
"github.com/tzmartin/namedpiper"
)
func upload(t *testing.T) {
/* this is a comment style*/
w := client.Bucket("sai-corp-dev-session-ingest").Object("obj").NewWriter(ctx)
w.Resumable = true
_, err := w.Write([]byte("hello world"))
t.Log("I can print a special message")
} |
package main
import (
"database/sql"
"log"
"fmt"
"github.com/yydzero/mnt/util/reflect"
_ "github.com/lib/pq"
"flag"
"sync"
)
var port int
var count int
func main() {
log.SetFlags(log.Ltime | log.Lshortfile)
flag.IntVar(&port, "p", 5432, "Default port to connect")
flag.IntVar(&count, "c", 10, "Default port to connect")
flag.Parse()
var wg sync.WaitGroup
for i := 0; i < count; i++ {
wg.Add(1)
go testOneServer(&wg, port + i)
}
wg.Wait()
}
func testOneServer(wg *sync.WaitGroup, port int) {
defer wg.Done()
// Now use lib/pq to send some info.
url := fmt.Sprintf("user=%s dbname=test host=localhost port=%d sslmode=disable", reflect.GetCurrentUsername(), port)
db, err := sql.Open("postgres", url)
if err != nil {
log.Fatal(err)
}
testSimpleQuery(db)
//testExtendedQuery(db)
}
func testSimpleQuery(db *sql.DB) {
// Simple Query
age := 27
rows, err := db.Query("SELECT name, age, description FROM users WHERE age>20")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
// Iterate Query Result.
for rows.Next() {
var name string
var desc string
if err := rows.Scan(&name, &age, &desc); err != nil {
log.Fatal(err)
}
//fmt.Printf("%s is %d, %q\n", name, age, desc)
}
if err := rows.Err(); err != nil {
log.Fatal(err)
}
}
func testExtendedQuery(db *sql.DB) {
age := 20
rows, err := db.Query("SELECT name, age, description FROM users WHERE age > $1", age)
if err != nil {
log.Fatal(err)
}
defer rows.Close()
// Iterate Query Result.
for rows.Next() {
var name string
var desc string
if err := rows.Scan(&name, &age, &desc); err != nil {
log.Fatal(err)
}
fmt.Printf("%s is %d, %q\n", name, age, desc)
}
if err := rows.Err(); err != nil {
log.Fatal(err)
}
}
|
/*
The api package is a Go package designed to provider
tools to handle API requests. The primarily goal is to support HTTP API endpoints.
*/
package api
|
// Copyright (c) 2015 RightScale, Inc. - see LICENSE
package main
// Omega: Alt+937
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"regexp"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/ghttp"
"github.com/rightscale/rsc/recording"
)
// Iterate through all recorded test cases and play them back
var _ = Describe("Recorded request", func() {
// Open the recording file
f, err := os.Open("recorder/recording.json")
if err != nil {
fmt.Fprintf(os.Stdout, "Cannot open recording: %s\n", err.Error())
os.Exit(1)
}
decoder := json.NewDecoder(f)
// Iterate through test cases
for {
var testCase recording.Recording
err := decoder.Decode(&testCase)
if err == io.EOF {
break
} else if err != nil {
fmt.Fprintf(os.Stderr, "JSON decode: %s\n", err.Error())
}
// Perform the test by running main() with the command line args set
It(strings.Join(testCase.CmdArgs, " "), func() {
//server := httptest.NewTLSServer(http.HandlerFunc(handler))
server := ghttp.NewServer()
defer server.Close()
// construct list of verifiers
url := regexp.MustCompile(`https?://[^/]+(/[^?]+)\??(.*)`).
FindStringSubmatch(testCase.RR.URI)
//fmt.Fprintf(os.Stderr, "URL: %#v\n", url)
handlers := []http.HandlerFunc{
ghttp.VerifyRequest(testCase.RR.Verb, url[1], url[2]),
}
if len(testCase.RR.ReqBody) > 0 {
handlers = append(handlers,
ghttp.VerifyJSON(testCase.RR.ReqBody))
}
for k := range testCase.RR.ReqHeader {
handlers = append(handlers,
ghttp.VerifyHeaderKV(k, testCase.RR.ReqHeader.Get(k)))
}
respHeader := make(http.Header)
for k, v := range testCase.RR.RespHeader {
respHeader[k] = v
}
handlers = append(handlers,
ghttp.RespondWith(testCase.RR.Status, testCase.RR.RespBody,
respHeader))
server.AppendHandlers(ghttp.CombineHandlers(handlers...))
os.Args = append([]string{
"rsc", "--noAuth", "--dump", "debug",
"--host", strings.TrimPrefix(server.URL(), "http://")},
testCase.CmdArgs...)
//fmt.Fprintf(os.Stderr, "testing \"%s\"\n", strings.Join(os.Args, `" "`))
// capture stdout and intercept calls to osExit
stdoutBuf := bytes.Buffer{}
SetOutput(&stdoutBuf)
exitCode := 99
osExit = func(code int) { exitCode = code }
stderrBuf := bytes.Buffer{}
SetErrorOutput(&stderrBuf)
main()
// Verify that stdout and the exit code are correct
//fmt.Fprintf(os.Stderr, "Exit %d %d\n", exitCode, testCase.ExitCode)
//fmt.Fprintf(os.Stderr, "stdout got <<%q>>\n expected <<%q>>\n",
// stdoutBuf.String(), testCase.Stdout)
//fmt.Fprintf(os.Stderr, "stderr got <<%q>>\n", stderrBuf.String())
Ω(exitCode).Should(Equal(testCase.ExitCode), "Exit code doesn't match")
Ω(stdoutBuf.String()).Should(Equal(testCase.Stdout), "Stdout doesn't match")
})
}
})
|
package main
import (
"fmt"
"log"
"os"
"github.com/Azure/azure-sdk-for-go/services/eventgrid/mgmt/2018-01-01/eventgrid"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
)
var (
defaultLocation = "westeurope"
defaultActiveDirectoryEndpoint = azure.PublicCloud.ActiveDirectoryEndpoint
defaultResourceManagerEndpoint = azure.PublicCloud.ResourceManagerEndpoint
subscriptionID = getEnvVarOrExit("AZ_SUBSCRIPTION_ID")
tenantID = getEnvVarOrExit("AZ_TENANT_ID")
clientID = getEnvVarOrExit("AZ_CLIENT_ID")
clientSecret = getEnvVarOrExit("AZ_CLIENT_SECRET")
)
func getEventGridClient() (eventgrid.EventSubscriptionsClient, error) {
var subscriptionsClient eventgrid.EventSubscriptionsClient
oAuthConfig, err := adal.NewOAuthConfig(defaultActiveDirectoryEndpoint, tenantID)
if err != nil {
return subscriptionsClient, fmt.Errorf("cannot get oauth config: %v", err)
}
token, err := adal.NewServicePrincipalToken(*oAuthConfig, clientID, clientSecret, defaultResourceManagerEndpoint)
if err != nil {
return subscriptionsClient, fmt.Errorf("cannot get service principal token: %v", err)
}
subscriptionsClient = eventgrid.NewEventSubscriptionsClient(subscriptionID)
subscriptionsClient.Authorizer = autorest.NewBearerAuthorizer(token)
return subscriptionsClient, nil
}
func getEnvVarOrExit(varName string) string {
value := os.Getenv(varName)
if value == "" {
log.Fatalf("missing environment variable %s\n", varName)
}
return value
}
|
package main
func shipWithinDays(weights []int, days int) int {
// 能否用weight装载力的船实现?
canShipWithin := func(weight int) bool {
// 临时累加值
sum := 0
// 趟数
count := 1
for _, w := range weights {
sum += w
if sum > weight {
sum = w
count++
if count > days {
return false
}
}
}
return count <= days
}
// 二分查找最小满足条件的值
binarySearch := func(left, right int) int {
for left < right {
mid := (left + right) >> 1
if canShipWithin(mid) {
right = mid
} else {
left = mid + 1
}
}
return left
}
// 最小条件为货物的最大重量
// 最大条件为货物总重量
max, sum := 0, 0
for _, w := range weights {
if w > max {
max = w
}
sum += w
}
return binarySearch(max, sum)
}
|
// laser framework types
package types
//laser config struct
type LaserConfig struct {
Connection ConnectionInfo
}
//database connection struct
type ConnectionInfo struct {
Server string
Database string
User string
Password string
}
|
package main
import (
"context"
"database/sql"
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
_ "github.com/go-sql-driver/mysql"
"gopkg.in/yaml.v3"
"git.scc.kit.edu/sdm/lsdf-checksum/scaleadpt"
)
func determineFSSubpath(filesystemName, rootDir string) (string, string, error) {
fs := scaleadpt.OpenFileSystem(filesystemName)
mountRoot, err := fs.GetMountRoot()
if err != nil {
return "", "", fmt.Errorf("determineFSSubpath: %w", err)
}
subpath, err := filepath.Rel(mountRoot, rootDir)
if err != nil {
return "", "", fmt.Errorf("determineFSSubpath: %w", err)
}
return subpath, mountRoot, nil
}
func lookupBin(name string, extraPath string) (string, error) {
for _, dir := range filepath.SplitList(extraPath) {
binPath := filepath.Join(dir, name)
info, err := os.Stat(binPath)
if os.IsNotExist(err) {
continue
} else if err != nil {
return "", fmt.Errorf("lookupBin: stat on %s: %w", binPath, err)
}
if info.Mode().IsRegular() {
return binPath, nil
}
}
return exec.LookPath(name)
}
type configSupplements struct {
ChecksumBin string
ChtreeBin string
GentreeBin string
FilesystemRoot string
ChecksumConfig checksumConfig
DataSourceName string
Driver string
}
func prepareConfigSupplements(config *Config) (*configSupplements, error) {
var cs configSupplements
var err error
if !config.Gentree.Skip {
cs.GentreeBin, err = lookupBin("gentree", config.BinPath)
if err != nil {
return nil, fmt.Errorf("prepareConfigSupplements: %w", err)
}
}
cs.ChtreeBin, err = lookupBin("chtree", config.BinPath)
if err != nil {
return nil, fmt.Errorf("prepareConfigSupplements: %w", err)
}
cs.ChecksumBin, err = lookupBin("lsdf-checksum-master", config.BinPath)
if err != nil {
return nil, fmt.Errorf("prepareConfigSupplements: %w", err)
}
subpath, filesystemRoot, err := determineFSSubpath(config.FilesystemName, config.RootDir)
if err != nil {
return nil, fmt.Errorf("prepareConfigSupplements: %w", err)
}
cs.FilesystemRoot = filesystemRoot
cs.ChecksumConfig = checksumConfig{
BaseConfigPath: config.Checksum.BaseConfig,
FileSystemName: config.FilesystemName,
FileSystemSubpath: subpath,
CoverDir: config.Checksum.CoverDir,
}
if config.ClearDatabase {
m, err := loadConfigFileIntoMap(config.Checksum.BaseConfig)
if err != nil {
return nil, fmt.Errorf("prepareConfigSupplements: %w", err)
}
cs.Driver = m.Get("db.driver").Str("mysql")
cs.DataSourceName = m.Get("db.datasourcename").Str()
if cs.DataSourceName == "" {
return nil, fmt.Errorf("prepareConfigSupplements: no db.datasourcename available in checksum config")
}
}
return &cs, nil
}
func clearDatabase(driver, dataSourceName string) error {
db, err := sql.Open(driver, dataSourceName)
if err != nil {
return fmt.Errorf("clearDatabase: open database: %w", err)
}
defer db.Close()
ctx := context.Background()
tx, err := db.BeginTx(ctx, &sql.TxOptions{})
if err != nil {
return fmt.Errorf("clearDatabase: begin txn: %w", err)
}
defer tx.Rollback()
var databaseName sql.NullString
row := tx.QueryRowContext(ctx, "SELECT DATABASE();")
err = row.Scan(&databaseName)
if err != nil {
return fmt.Errorf("clearDatabase: retrieve: %w", err)
}
if !databaseName.Valid {
return fmt.Errorf("clearDatabase: No database used in dataSourceName")
}
var tableNames []string
rows, err := tx.QueryContext(ctx, "SELECT TABLE_NAME FROM information_schema.tables WHERE TABLE_SCHEMA=?;", databaseName.String)
if err != nil {
return fmt.Errorf("clearDatabase: query table names: %w", err)
}
for rows.Next() {
var tableName string
err = rows.Scan(&tableName)
if err != nil {
break
}
tableNames = append(tableNames, tableName)
}
if err != nil {
_ = rows.Close()
return fmt.Errorf("clearDatabase: iterate over rows: %w", err)
}
err = rows.Close()
if err != nil {
return fmt.Errorf("clearDatabase: close rows iterator: %w", err)
}
_, err = tx.ExecContext(ctx, "SET FOREIGN_KEY_CHECKS = 0;")
if err != nil {
return fmt.Errorf("clearDatabase: disable foreign key checks: %w", err)
}
for _, name := range tableNames {
log.Println("dropping table", name)
_, err = tx.ExecContext(ctx, "DROP TABLE IF EXISTS "+name+";")
if err != nil {
return fmt.Errorf("clearDatabase: drop table %s: %w", name, err)
}
}
_, err = tx.ExecContext(ctx, "SET FOREIGN_KEY_CHECKS = 1;")
if err != nil {
return fmt.Errorf("clearDatabase: enable foreign key checks: %w", err)
}
err = tx.Commit()
if err != nil {
return fmt.Errorf("clearDatabase: commit txn: %w", err)
}
err = db.Close()
if err != nil {
return fmt.Errorf("clearDatabase: close database: %w", err)
}
return nil
}
func PerformTest(config *Config) error {
cs, err := prepareConfigSupplements(config)
if err != nil {
log.Printf("gentree returned error, aborting: %v", err)
return err
}
if config.ClearDatabase {
log.Println("Starting clearing of database")
err = clearDatabase(cs.Driver, cs.DataSourceName)
log.Println("Finished clearing of database")
if err != nil {
log.Printf("clearing of database returned error, aborting: %v", err)
return err
}
}
log.Println("Starting initialisation (gentree, initial checksum run)")
err = performTestInitialisation(config, cs)
log.Println("Finished initialisation")
if err != nil {
log.Printf("Error during initialisation: %v", err)
return err
}
for i, step := range config.Steps {
stepName := fmt.Sprintf("#%d", i+1)
log.Printf("Starting step %s", stepName)
err = performTestStep(stepName, config, step, cs)
log.Printf("Finished step %s", stepName)
if err != nil {
log.Printf("Error during step %s, aborting: %v", stepName, err)
return err
}
}
return nil
}
func assertWarningsMatchChanges(warningsRootDir string, warnings []checksumWarning, changes []changedFile) error {
corruptedFiles := make(map[string]struct{})
for _, change := range changes {
if !change.CorruptedOnly {
continue
}
relPath, err := filepath.Rel(warningsRootDir, change.Path)
if err != nil {
return fmt.Errorf("assertWarningsMatchChanges: relative path from changes: %w", err)
}
fsPath := filepath.Join("/", relPath)
if _, ok := corruptedFiles[fsPath]; ok {
return fmt.Errorf("assertWarningsMatchChanges: Duplicate path in changes: %s", fsPath)
}
corruptedFiles[fsPath] = struct{}{}
}
warnedFiles := make(map[string]struct{})
for _, warning := range warnings {
if _, ok := warnedFiles[warning.Path]; ok {
return fmt.Errorf("assertWarningsMatchChanges: Duplicate path in warnings: %s", warning.Path)
}
warnedFiles[warning.Path] = struct{}{}
}
for path := range warnedFiles {
if _, ok := corruptedFiles[path]; !ok {
return fmt.Errorf("assertWarningsMatchChanges: warning not in changes: %s", path)
}
delete(corruptedFiles, path)
}
for path := range corruptedFiles {
return fmt.Errorf("assertWarningsMatchChanges: change not in warnings: %s", path)
}
return nil
}
func performTestInitialisation(config *Config, cs *configSupplements) error {
var err error
if !config.Gentree.Skip {
log.Println("Starting initial gentree")
err = runGentree(cs.GentreeBin, gentreeConfig{
BaseConfigPath: config.Gentree.BaseConfig,
RootDir: config.RootDir,
})
log.Println("Finished initial gentree")
if err != nil {
log.Printf("gentree returned error, aborting: %v", err)
return err
}
} else {
log.Println("Skipping initial gentree as per configuration")
}
log.Println("Starting initial checksum full run")
var logPath string
if config.Checksum.LogDir != "" {
logPath = filepath.Join(config.Checksum.LogDir, "checksum.log.initial.full.log")
}
var raceLogPath string
if config.Checksum.RaceLogDir != "" {
raceLogPath = filepath.Join(config.Checksum.RaceLogDir, "checksum.race_log.initial.full")
}
err = runChecksumRun(cs.ChecksumBin, checksumRunConfig{
ChecksumConfig: cs.ChecksumConfig,
RunMode: CRSM_FULL,
LogPath: logPath,
RaceLogPath: raceLogPath,
})
log.Println("Finished initial checksum full run")
if err != nil {
log.Printf("Checksum full run returned error, aborting: %v", err)
return err
}
// TODO: Check that run was created and finished
warnings, err := runChecksumWarnings(cs.ChecksumBin, checksumWarningsConfig{
ChecksumConfig: cs.ChecksumConfig,
OnlyLastRun: true,
})
if err != nil {
log.Printf("checksum warnings returned error, aborting: %v", err)
return err
}
if len(warnings) > 0 {
return fmt.Errorf("checksum incremental run returned warnings")
}
return nil
}
func corruptedOnlyCount(changes []changedFile) (count int) {
// Does not perform deduplication on files... but those are not supposed
// to occur anyway.
for _, change := range changes {
if change.CorruptedOnly {
count++
}
}
return
}
func performTestStep(name string, config *Config, step ConfigStep, cs *configSupplements) error {
var err error
log.Println("Starting chtree")
changes, err := runChtree(cs.ChtreeBin, buildChtreeConfig(config.RootDir, step.ChangeLikelihood, step.CorruptLikelihood))
log.Println("Finished chtree")
if err != nil {
log.Printf("chtree returned error, aborting: %v", err)
return err
}
if step.CorruptLikelihood > 0 && corruptedOnlyCount(changes) == 0 {
log.Printf("WARNING: chtree did not produce any corruptions although corrupt likelihood (is %f) > 0", step.CorruptLikelihood)
}
if step.PerformIncrementalRun {
log.Println("Starting checksum incremental run")
var logPath string
if config.Checksum.LogDir != "" {
logPath = filepath.Join(config.Checksum.LogDir, "checksum.log.step"+name+".incremental.log")
}
var raceLogPath string
if config.Checksum.RaceLogDir != "" {
raceLogPath = filepath.Join(config.Checksum.RaceLogDir, "checksum.race_log.step"+name+".incremental")
}
err = runChecksumRun(cs.ChecksumBin, checksumRunConfig{
ChecksumConfig: cs.ChecksumConfig,
RunMode: CRSM_INCREMENTAL,
LogPath: logPath,
RaceLogPath: raceLogPath,
})
log.Println("Finished checksum incremental run")
if err != nil {
log.Printf("Checksum incremental run returned error, aborting: %v", err)
return err
}
// TODO: Check that run was created and finished
warnings, err := runChecksumWarnings(cs.ChecksumBin, checksumWarningsConfig{
ChecksumConfig: cs.ChecksumConfig,
OnlyLastRun: true,
})
if err != nil {
log.Printf("checksum warnings returned error, aborting: %v", err)
return err
}
if len(warnings) > 0 {
return fmt.Errorf("checksum incremental run returned warnings")
}
}
log.Println("Starting checksum full run")
var logPath string
if config.Checksum.LogDir != "" {
logPath = filepath.Join(config.Checksum.LogDir, "checksum.log.step"+name+".incremental.log")
}
var raceLogPath string
if config.Checksum.RaceLogDir != "" {
raceLogPath = filepath.Join(config.Checksum.RaceLogDir, "checksum.race_log.step"+name+".incremental")
}
err = runChecksumRun(cs.ChecksumBin, checksumRunConfig{
ChecksumConfig: cs.ChecksumConfig,
RunMode: CRSM_FULL,
LogPath: logPath,
RaceLogPath: raceLogPath,
})
log.Println("Finished checksum full run")
if err != nil {
log.Printf("Checksum full run returned error, aborting: %v", err)
return err
}
// TODO: Check that run was created and finished
warnings, err := runChecksumWarnings(cs.ChecksumBin, checksumWarningsConfig{
ChecksumConfig: cs.ChecksumConfig,
OnlyLastRun: true,
})
if err != nil {
log.Printf("checksum warnings returned error, aborting: %v", err)
return err
}
err = assertWarningsMatchChanges(cs.FilesystemRoot, warnings, changes)
if err != nil {
log.Printf("Mismatch in checksum warnings: %v", err)
return err
}
return nil
}
type Config struct {
FilesystemName string `yaml:"filesystem_name"`
RootDir string `yaml:"root_dir"`
BinPath string `yaml:"bin_path"`
ClearDatabase bool `yaml:"clear_database"`
Gentree ConfigGentree `yaml:"gentree"`
Checksum ConfigChecksum `yaml:"checksumming"`
Steps []ConfigStep `yaml:"steps"`
}
type ConfigGentree struct {
BaseConfig string `yaml:"base_config"`
Skip bool `yaml:"skip,omitempty"`
}
type ConfigChecksum struct {
BaseConfig string `yaml:"base_config"`
LogDir string `yaml:"log_dir"`
RaceLogDir string `yaml:"race_log_dir"`
CoverDir string `yaml:"cover_dir"`
}
type ConfigStep struct {
ChangeLikelihood float64 `yaml:"change_likelihood"`
CorruptLikelihood float64 `yaml:"corrupt_likelihood"`
PerformIncrementalRun bool `yaml:"perform_incremental_run,omitempty"`
}
func readConfig(path string) (*Config, error) {
configFile, err := os.Open(path)
if err != nil {
panic(err)
}
defer configFile.Close()
config := &Config{}
dec := yaml.NewDecoder(configFile)
err = dec.Decode(config)
if err != nil {
return nil, err
}
return config, nil
}
func main() {
if len(os.Args) != 2 {
fmt.Println("Usage:", os.Args[0], "<config.yaml>")
os.Exit(1)
}
config, err := readConfig(os.Args[1])
if err != nil {
panic(err)
}
err = PerformTest(config)
if err != nil {
panic(err)
}
}
|
package main
import (
"encoding/xml"
"fmt"
"io/ioutil"
"log"
"strings"
spew "github.com/davecgh/go-spew/spew"
)
type RssFeed struct {
XMLName xml.Name `xml:"rss"`
Channel RssChannel `xml:"channel"`
}
type RssChannel struct {
XMLName xml.Name `xml:"channel"`
Language string `xml:"language"`
Items []RssChannelItem `xml:"item"`
LastBuildDate string `xml:"lastBuildDate"`
}
type RssChannelItem struct {
Title string `xml:"title"`
Description string `xml:"description"`
PubDate string `xml:"pubDate"`
}
func parseDescription(body string) {
r := strings.NewReplacer(
"<", "<",
">", ">",
"<br>", "<br />")
fmt.Printf("%v", r.Replace(body))
}
func main() {
file, err := ioutil.ReadFile("menu.xml")
if err != nil {
log.Fatalf("WHAT? %s\n", err)
}
v := RssFeed{}
r := strings.NewReplacer(
"&", "&",
"<br>", "<br />")
finalXml := []byte(r.Replace(string(file)))
if err := xml.Unmarshal(finalXml, &v); err != nil {
fmt.Printf("what? %+v\n", err)
}
spew.Dump(v)
parseDescription(v.Channel.Items[0].Description)
}
|
package main
import (
"fmt"
csv_conv "github.com/garupanojisan/csv-conv"
"log"
"os"
)
func main() {
f, err := os.Open("./example.csv")
if err != nil {
log.Fatal(err)
}
defer f.Close()
conv, err := csv_conv.NewConverter(f)
if err != nil {
log.Fatal(err)
}
// change column names
changed, err := conv.ChangeColumnName(map[string]string{
"a": "A",
"あ": "ア",
})
if err != nil {
log.Fatal(err)
}
fmt.Println(changed)
// append a new column
append, err := conv.AppendColumn("new", "")
if err != nil {
log.Fatal(err)
}
fmt.Println(append)
}
|
// Exercise 10_distributed guides you through using the replay in a distributed system where
// creating runs, workflow consumers and activity consumers and event consumers are being
// processed/called from different processes. Each process directly communicates with the
// replay client (in this case the replay DBClient) treating it almost like a common event bus.
//
// To participate, each process just needs access to the typedreplay generated API which defines the namespace
// names and types as well as access to the same replay backend.
//
// The workflow is the same "hello world" example as 00/01. The four processes are:
// - r: Creates new workflow runs.
// - a: Registers and processes the activity function.
// - w: Registers and processes the workflow function.
// - c: Consumes and logs events.
//
// To start a process, provide it's name as argument to this main program.
package main
import (
"context"
"flag"
"fmt"
"time"
"github.com/corverroos/replay/typedreplay"
"github.com/luno/fate"
"github.com/luno/jettison/errors"
"github.com/luno/jettison/j"
"github.com/luno/jettison/log"
tut "github.com/corverroos/replaytutorial"
)
// Increase showme to 1 to show next hidden solution.
//go:generate go run ../lib/showme 0
// Step 0: main functions always just call tut.Main(Main).
func main() {
tut.Main(Main)
}
// Step 1: Replay always requires protobufs, so generate your types.
//go:generate protoc --go_out=plugins=grpc:. ./pb.proto
// Step 2: typedreplay requires a locally defined Backends dependency wrapper type.
type Backends struct {
tut.State
}
// Step 3: Define activity and typedreplay namespace
// Print prints the provided message.
func Print(ctx context.Context, b Backends, f fate.Fate, msg *String) (*Empty, error) {
log.Info(ctx, msg.Value)
return new(Empty), nil
}
var _ = typedreplay.Namespace{
Name: "10_distributed",
Workflows: []typedreplay.Workflow{
{
Name: "hello",
Description: "Hello workflow just prints 'Hello {name}'",
Input: new(String),
},
},
Activities: []typedreplay.Activity{
{
Name: "print",
Description: "Prints the provided message",
Func: Print,
},
},
// ExposeRegisterFuncs so we get access to individual Register functions instead of single `startReplayLoops`.
ExposeRegisterFuncs: true,
}
// Step 4: Generate the typedreplay API and define the workflow function.
//go:generate typedreplay
func Hello(flow helloFlow, name *String) {
flow.Sleep(time.Second * 2)
msg := fmt.Sprintf("Hello %s", name.Value)
flow.Print(&String{Value: msg})
}
// ttl defines how long a process should live before exiting.
var ttl = flag.Duration("ttl", time.Minute, "TTL of the process")
// Step 5: Define your Main function which is equivalent to a main function, just with some prepared state.
func Main(ctx context.Context, s tut.State) error {
if len(flag.Args()) != 1 {
return errors.New("please provide the process name as single argument: go run thispkg a/r/w/c")
}
proc := flag.Arg(0)
switch proc {
case "r":
// TODO(you): The r process should just create a new workflow run and then exit.
//showme:hidden r
case "a":
// TODO(you): The a process should register the Print activity and then block below until TTL is reached.
//showme:hidden a
case "w":
// TODO(you): The w process should register the Hello workflow and then block below until TTL is reached.
//showme:hidden w
case "c":
// TODO(you): The c process should consume and consume and RunCreated and RunCompleted events.
//showme:hidden c
}
log.Info(ctx, "process started", j.MKV{"proc": proc, "ttl": *ttl})
select {
case <-time.After(*ttl):
log.Info(ctx, "process ttl reached")
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// Step 6: Executed the programs and confirm the same expected output
//go:generate go run github.com/corverroos/replaytutorial/10_distributed -server_loops=true c
//go:generate go run github.com/corverroos/replaytutorial/10_distributed -server_loops=false w
//go:generate go run github.com/corverroos/replaytutorial/10_distributed -server_loops=false a
// Executed this multiple times to create multiple runs.
//go:generate go run github.com/corverroos/replaytutorial/10_distributed -server_loops=false r
// Expected output of "c":
// I 17:41:46.005 10_distributed/replay.go:138: process started[proc=c,ttl=1m0s]
// I 17:41:58.583 10_distributed/replay.go:121: run created[consumer=10_distributed,run=09f9b171-b47d-4cce-9c82-da1a2b493802]
// I 17:42:01.073 10_distributed/replay.go:125: run completed[consumer=10_distributed,run=09f9b171-b47d-4cce-9c82-da1a2b493802]
// I 17:42:07.227 10_distributed/replay.go:121: run created[consumer=10_distributed,run=97506cfc-951f-48c8-afc4-18976f451e03]
// I 17:42:10.109 10_distributed/replay.go:125: run completed[consumer=10_distributed,run=97506cfc-951f-48c8-afc4-18976f451e03]
// Expected output of "a":
// I 17:41:55.236 10_distributed/replay.go:138: process started[proc=a,ttl=1m0s]
// I 17:42:01.069 10_distributed/replay.go:56: Hello world[consumer=replay_activity/10_distributed/print,replay_run=09f9b171-b47d-4cce-9c82-da1a2b493802]
// I 17:42:10.102 10_distributed/replay.go:56: Hello world[consumer=replay_activity/10_distributed/print,replay_run=97506cfc-951f-48c8-afc4-18976f451e03]
// Experiments:
// - What happens if either w or a is not running?
// - Why should only one process run the server loops?
|
package handlers
import (
. "github.com/paulbellamy/mango"
"github.com/sunfmin/mangotemplate"
"html/template"
)
type provider struct {
}
type Header struct {
}
func (p *provider) LayoutData(env Env) interface{} {
return &Header{}
}
func LayoutAndRenderer() (l Middleware, r Middleware) {
tpl, err := template.ParseGlob("templates/*/*.html")
if err != nil {
panic(err)
}
l = mangotemplate.MakeLayout(tpl, "main", &provider{})
r = mangotemplate.MakeRenderer(tpl)
return
}
|
package p10
func numPairsDivisibleBy60(time []int) int {
if time == nil || len(time) == 0 {
return 0
}
h := map[int]int{}
t := 0
for i := 0; i < len(time); i++ {
time[i] = time[i] % 60
if time[i] == 0 {
if v, ok := h[time[i]]; ok {
t += v
}
} else {
if v, ok := h[60-time[i]]; ok {
t += v
}
}
h[time[i]]++
}
return t
}
|
package main
import "fmt"
func main() {
DateCalculator()
firstDate, secondDate := inputDates()
message, _ := calculateDifference(firstDate, secondDate)
fmt.Println(display(message))
}
|
package usecase
import (
"fmt"
"net/http"
"regexp"
"strconv"
"strings"
"time"
entity "silverfish/silverfish/entity"
"github.com/PuerkitoBio/goquery"
"github.com/go-rod/rod"
"github.com/sirupsen/logrus"
)
// FetcherHappymh export
type FetcherHappymh struct {
Fetcher
}
// NewFetcherHappymh export
func NewFetcherHappymh(dns string) *FetcherHappymh {
fh := new(FetcherHappymh)
fh.NewFetcher(false, &dns)
return fh
}
// GetChapterURL export
func (fh *FetcherHappymh) GetChapterURL(comic *entity.Comic, chapterURL string) *string {
url := "https://" + comic.DNS + chapterURL
return &url
}
// CrawlComic export
func (fh *FetcherHappymh) CrawlComic(url *string) (*entity.Comic, error) {
doc, docErr := fh.FetchDoc(url)
if docErr != nil {
return nil, docErr
}
id := fh.GenerateID(url)
info, infoErr := fh.FetchComicInfo(id, doc, nil)
if infoErr != nil {
return nil, fmt.Errorf("Something wrong while fetching info: %s", infoErr.Error())
}
chapters := fh.FetchChapterInfo(doc, nil, info.Title, *url)
if len(chapters) == 0 {
logrus.Print("Chapters is empty. Strange...")
}
comic := &entity.Comic{
DNS: *fh.dns,
URL: *url,
Chapters: chapters,
}
comic.SetComicInfo(info)
return comic, nil
}
// FetchComicInfo export
func (fh *FetcherHappymh) FetchComicInfo(comicID *string, doc *goquery.Document, cookie []*http.Cookie) (*entity.ComicInfo, error) {
title := doc.Find("h2.mg-title").Text()
author := doc.Find("p.mg-sub-title:nth-of-type(2) > a").Text()
description := doc.Find("div.manga-introduction > mip-showmore").Text()
coverURL, ok := doc.Find("div.mg-cover > mip-img").Attr("src")
if title == "" || author == "" || description == "" || !ok {
return nil, fmt.Errorf("Something missing, title: %s, author: %s, description: %s, coverURL: %s", title, author, description, coverURL)
}
return &entity.ComicInfo{
IsEnable: true,
ComicID: *comicID,
Title: title,
Author: author,
Description: description,
CoverURL: coverURL,
LastCrawlTime: time.Now(),
}, nil
}
// FetchChapterInfo export
func (fh *FetcherHappymh) FetchChapterInfo(doc *goquery.Document, cookie []*http.Cookie, title, url string) []entity.ComicChapter {
chapters := []entity.ComicChapter{}
textData := doc.Find("mip-data:nth-of-type(3)").Text()
exp, _ := regexp.Compile(`"chapterList":\[.*?\],`)
data := exp.FindString(textData)
arrayDataStr, _ := strconv.Unquote(strings.Replace(strconv.Quote(data[16:len(data)-2]), `\\u`, `\u`, -1))
chapterDatas := strings.Split(arrayDataStr, "},{")
titleExp, _ := regexp.Compile(`\"chapterName\":\".*?\"`)
idExp, _ := regexp.Compile(`\"id\":\".*?\"`)
for i := 0; i < len(chapterDatas); i++ {
title := titleExp.FindString(chapterDatas[i])[15:]
id := idExp.FindString(chapterDatas[i])[6:]
chapters = append([]entity.ComicChapter{
entity.ComicChapter{
Title: title[:len(title)-1],
URL: fmt.Sprintf("%s/%s", url, id[:len(id)-1]),
ImageURL: []string{},
},
}, chapters...)
}
return chapters
}
// UpdateComicInfo export
func (fh *FetcherHappymh) UpdateComicInfo(comic *entity.Comic) (*entity.Comic, error) {
doc, docErr := fh.FetchDoc(&comic.URL)
if docErr != nil {
return nil, docErr
}
info, infoErr := fh.FetchComicInfo(&comic.ComicID, doc, nil)
if infoErr != nil {
return nil, fmt.Errorf("Something wrong while fetching info: %s", infoErr.Error())
}
chapters := fh.FetchChapterInfo(doc, nil, comic.Title, comic.URL)
if len(chapters) == 0 {
logrus.Print("Chapters is empty. Strange...")
}
comic.LastCrawlTime = time.Now()
comic.SetComicInfo(info)
comic.Chapters = chapters
return comic, nil
}
// FetchComicChapter export
func (fh *FetcherHappymh) FetchComicChapter(comic *entity.Comic, index int) ([]string, error) {
comicURLs := []string{}
browser := rod.New()
defer browser.MustClose()
page := browser.MustConnect().MustPage(comic.Chapters[index].URL)
page.Race().Element("div#iframeContainer_0").MustHandle(func(e *rod.Element) {
imgJSONs := page.MustEval("JSON.parse(ReadJs.dct('c1zbnttrabim', ss))").Arr()
for i := 0; i < len(imgJSONs); i++ {
comicURLs = append(comicURLs, imgJSONs[i].Get("url").String())
}
}).MustDo()
return comicURLs, nil
}
|
package main
import (
"fmt"
"log"
"os/exec"
"strings"
"time"
"github.com/shanghuiyang/rpi-devices/iot"
)
const (
cpuInterval = 5 * time.Minute
)
func main() {
onenetCfg := &iot.OneNetConfig{
Token: iot.OneNetToken,
API: iot.OneNetAPI,
}
cloud := iot.NewCloud(onenetCfg)
if cloud == nil {
log.Printf("cpumonitor]failed to new OneNet iot cloud")
return
}
monitor := &cpuMonitor{
cloud: cloud,
}
monitor.start()
}
type cpuMonitor struct {
cloud iot.Cloud
}
// Start ...
func (c *cpuMonitor) start() {
log.Printf("[cpumonitor]cpu monitor start working")
for {
f, err := c.idle()
if err != nil {
log.Printf("[cpumonitor]failed to get cpu idle, error: %v", err)
time.Sleep(30 * time.Second)
continue
}
v := &iot.Value{
Device: "cpu",
Value: f,
}
go c.cloud.Push(v)
time.Sleep(cpuInterval)
}
}
// Idle is to get idle cpu in %
// $ top -n 2 -d 1
// ---------------------------------------------------------------------------------
// top - 20:04:01 up 9 min, 2 users, load average: 0.22, 0.22, 0.18
// Tasks: 72 total, 1 running, 71 sleeping, 0 stopped, 0 zombie
// %Cpu(s): 2.0 us, 2.0 sy, 0.0 ni, 96.0 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
// MiB Mem : 432.7 total, 330.8 free, 34.7 used, 67.2 buff/cache
// MiB Swap: 100.0 total, 100.0 free, 0.0 used. 347.1 avail Mem
// ---------------------------------------------------------------------------------
func (c *cpuMonitor) idle() (float32, error) {
cmd := exec.Command("top", "-b", "-n", "3", "-d", "3")
out, err := cmd.CombinedOutput()
if err != nil {
return -1, err
}
str := string(out)
lines := strings.Split(str, "\n")
var cpuline string
for _, line := range lines {
if strings.Contains(line, "Cpu") {
cpuline = line
}
}
var cpu string
items := strings.Split(cpuline, " ")
for i, item := range items {
if item == "id," && i > 0 {
cpu = items[i-1]
}
}
var v float32
if n, err := fmt.Sscanf(cpu, "%f", &v); n != 1 || err != nil {
return 0, fmt.Errorf("failed to parse")
}
return v, nil
}
|
package bitmap
import "testing"
func TestBitMap(t *testing.T) {
bitmap := NewBitMap(2 << 32)
bitmap.set(1023)
t.Log(bitmap.get(111024))
t.Log(bitmap.get(1023))
}
|
// Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
// package coreutil provides functions to describe interface of the core contract
// in a compact way
package coreutil
import (
"fmt"
"github.com/iotaledger/wasp/packages/coretypes"
"github.com/iotaledger/wasp/packages/hashing"
"github.com/iotaledger/wasp/packages/kv/dict"
)
// ContractInterface represents smart contract interface
type ContractInterface struct {
Name string
hname coretypes.Hname
Description string
ProgramHash hashing.HashValue
Functions map[coretypes.Hname]ContractFunctionInterface
}
// ContractFunctionInterface represents entry point interface
type ContractFunctionInterface struct {
Name string
Handler Handler
ViewHandler ViewHandler
}
// Funcs declares init entry point and a list of full and view entry points
func Funcs(init Handler, fns []ContractFunctionInterface) map[coretypes.Hname]ContractFunctionInterface {
ret := map[coretypes.Hname]ContractFunctionInterface{
coretypes.EntryPointInit: Func("init", init),
}
for _, f := range fns {
hname := f.Hname()
if _, ok := ret[hname]; ok {
panic(fmt.Sprintf("Duplicate function: %s", f.Name))
}
handlers := 0
if f.Handler != nil {
handlers += 1
}
if f.ViewHandler != nil {
handlers += 1
}
if handlers != 1 {
panic("Exactly one of Handler, ViewHandler must be set")
}
ret[hname] = f
}
return ret
}
// Func declares a full entry point: its name and its handler
func Func(name string, handler Handler) ContractFunctionInterface {
return ContractFunctionInterface{
Name: name,
Handler: handler,
}
}
// Func declares a view entry point: its name and its handler
func ViewFunc(name string, handler ViewHandler) ContractFunctionInterface {
return ContractFunctionInterface{
Name: name,
ViewHandler: handler,
}
}
type Handler func(ctx coretypes.Sandbox) (dict.Dict, error)
type ViewHandler func(ctx coretypes.SandboxView) (dict.Dict, error)
func (i *ContractInterface) WithFunctions(init Handler, funcs []ContractFunctionInterface) {
i.Functions = Funcs(init, funcs)
}
func (i *ContractInterface) GetFunction(name string) (*ContractFunctionInterface, bool) {
f, ok := i.Functions[coretypes.Hn(name)]
return &f, ok
}
func (i *ContractInterface) GetEntryPoint(code coretypes.Hname) (coretypes.EntryPoint, bool) {
f, ok := i.Functions[code]
return &f, ok
}
func (i *ContractInterface) GetDescription() string {
return i.Description
}
// Hname caches the value
func (i *ContractInterface) Hname() coretypes.Hname {
if i.hname == 0 {
i.hname = coretypes.Hn(i.Name)
}
return i.hname
}
func (i *ContractInterface) ContractID(chainID coretypes.ChainID) coretypes.ContractID {
return coretypes.NewContractID(chainID, i.Hname())
}
func (f *ContractFunctionInterface) Hname() coretypes.Hname {
return coretypes.Hn(f.Name)
}
func (f *ContractFunctionInterface) Call(ctx coretypes.Sandbox) (dict.Dict, error) {
if f.IsView() {
return nil, coretypes.ErrWrongTypeEntryPoint
}
ret, err := f.Handler(ctx)
if err != nil {
ctx.Log().Debugf("error occurred: '%v'", err)
}
return ret, err
}
func (f *ContractFunctionInterface) CallView(ctx coretypes.SandboxView) (dict.Dict, error) {
if !f.IsView() {
return nil, coretypes.ErrWrongTypeEntryPoint
}
ret, err := f.ViewHandler(ctx)
if err != nil {
ctx.Log().Debugf("error occurred: '%v'", err)
}
return ret, err
}
func (f *ContractFunctionInterface) IsView() bool {
return f.ViewHandler != nil
}
|
package ipfs
import (
"context"
"fmt"
"time"
"github.com/ipfs/go-ipfs/core"
"github.com/ipfs/go-ipfs/core/coreapi"
iface "github.com/ipfs/interface-go-ipfs-core"
"github.com/ipfs/interface-go-ipfs-core/options"
nsopts "github.com/ipfs/interface-go-ipfs-core/options/namesys"
path "github.com/ipfs/interface-go-ipfs-core/path"
peer "github.com/libp2p/go-libp2p-core/peer"
record "github.com/libp2p/go-libp2p-record"
)
// PublishIPNS publishes a content id to ipns
func PublishIPNS(node *core.IpfsNode, id string, key string, timeout time.Duration) (iface.IpnsEntry, error) {
api, err := coreapi.NewCoreAPI(node)
if err != nil {
return nil, err
}
if key == "" {
key = "self" // default value in ipns module
}
opts := []options.NamePublishOption{
options.Name.Key(key),
}
ctx, cancel := context.WithTimeout(node.Context(), timeout)
defer cancel()
return api.Name().Publish(ctx, path.New(id), opts...)
}
// ResolveIPNS resolves an ipns path to an ipfs path
func ResolveIPNS(node *core.IpfsNode, name peer.ID, timeout time.Duration) (path.Path, error) {
api, err := coreapi.NewCoreAPI(node)
if err != nil {
return nil, err
}
key := fmt.Sprintf("/ipns/%s", name.Pretty())
opts := []options.NameResolveOption{
options.Name.ResolveOption(nsopts.Depth(1)),
options.Name.ResolveOption(nsopts.DhtRecordCount(4)),
options.Name.ResolveOption(nsopts.DhtTimeout(timeout)),
}
ctx, cancel := context.WithTimeout(node.Context(), timeout)
defer cancel()
return api.Name().Resolve(ctx, key, opts...)
}
// IpnsSubs shows current name subscriptions
func IpnsSubs(node *core.IpfsNode) ([]string, error) {
if node.PSRouter == nil {
return nil, fmt.Errorf("IPNS pubsub subsystem is not enabled")
}
var paths []string
for _, key := range node.PSRouter.GetSubscriptions() {
ns, k, err := record.SplitKey(key)
if err != nil || ns != "ipns" {
// not necessarily an error.
continue
}
pid, err := peer.IDFromString(k)
if err != nil {
log.Errorf("ipns key not a valid peer ID: %s", err)
continue
}
paths = append(paths, "/ipns/"+peer.IDB58Encode(pid))
}
return paths, nil
}
|
/*
Package dev ...
L298N is an motor driver
which can be used to control the direction and speed of DC motors.
Spec:
_________________________________________
| |
| |
OUT1 -| L298N |- OUT3
OUT2 -| |- OUT4
| |
|_________________________________________|
| | | | | | | | |
12v GND 5V EN1 IN1 IN2 IN3 IN4 EN2
Pins:
- OUT1: dc motor A+
- OUT2: dc motor A-
- OUT3: dc motor B+
- OUT4: dc motor B-
- IN1: input 1 for motor A
- IN2: input 2 for motor A
- IN3: input 3 for motor B
- IN4: input 1 for motor B
- EN1: enable pin for motor A
- EN2: enable pin for motor B
*/
package dev
import (
"github.com/stianeikeland/go-rpio"
)
// L298N ...
type L298N struct {
in1 rpio.Pin
in2 rpio.Pin
in3 rpio.Pin
in4 rpio.Pin
ena rpio.Pin
enb rpio.Pin
}
// NewL298N ...
func NewL298N(in1, in2, in3, in4, ena, enb uint8) *L298N {
l := &L298N{
in1: rpio.Pin(in1),
in2: rpio.Pin(in2),
in3: rpio.Pin(in3),
in4: rpio.Pin(in4),
ena: rpio.Pin(ena),
enb: rpio.Pin(enb),
}
l.in1.Output()
l.in2.Output()
l.in3.Output()
l.in4.Output()
l.in1.Low()
l.in2.Low()
l.in3.Low()
l.in4.Low()
l.ena.Pwm()
l.enb.Pwm()
l.ena.Freq(50 * 100)
l.enb.Freq(50 * 100)
l.Speed(30)
return l
}
// Forward ...
func (l *L298N) Forward() {
l.in1.High()
l.in2.Low()
l.in3.High()
l.in4.Low()
}
// Backward ...
func (l *L298N) Backward() {
l.in1.Low()
l.in2.High()
l.in3.Low()
l.in4.High()
}
// Left ...
func (l *L298N) Left() {
l.in1.Low()
l.in2.High()
l.in3.High()
l.in4.Low()
}
// Right ...
func (l *L298N) Right() {
l.in1.High()
l.in2.Low()
l.in3.Low()
l.in4.High()
}
// Stop ...
func (l *L298N) Stop() {
l.in1.Low()
l.in2.Low()
l.in3.Low()
l.in4.Low()
}
// Speed ...
func (l *L298N) Speed(s uint32) {
l.ena.DutyCycle(0, 100)
l.enb.DutyCycle(0, 100)
l.ena.DutyCycle(s, 100)
l.enb.DutyCycle(s, 100)
}
|
package hpke
import (
"context"
)
type stubFetcher struct {
key *PublicKey
}
func (f stubFetcher) FetchPublicKey(_ context.Context) (*PublicKey, error) {
return f.key, nil
}
// NewStubKeyFetcher returns a new KeyFetcher which returns a fixed key.
func NewStubKeyFetcher(key *PublicKey) KeyFetcher {
return stubFetcher{key}
}
|
/*
Copyright 2015 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubeclient
import (
"crypto/tls"
"crypto/x509"
"github.com/crunchydata/crunchy-postgresql-manager-openshift/logit"
"github.com/crunchydata/crunchy-postgresql-manager-openshift/template"
"io/ioutil"
"net/http"
)
type MyPod struct {
CurrentState struct {
Status string
}
}
func getHttpClient() (*http.Client, error) {
var caFile = "/var/cpm/keys/openshift/ca.crt"
var certFile = "/var/cpm/keys/openshift/admin.crt"
var keyFile = "/var/cpm/keys/openshift/admin.key"
var client *http.Client
// Load client cert
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
logit.Error.Println(err.Error())
return client, err
}
// Load CA cert
caCert, err := ioutil.ReadFile(caFile)
if err != nil {
logit.Error.Println(err.Error())
return client, err
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
// Setup HTTPS client
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: caCertPool,
}
tlsConfig.BuildNameToCertificate()
transport := &http.Transport{TLSClientConfig: tlsConfig}
client = &http.Client{Transport: transport}
return client, nil
}
// DeleteService deletes a kube service
// kubeURL - the URL to kube
// ID - the ID of the service we want to delete
// it returns an error is there was a problem
func DeleteService(kubeURL string, ID string) error {
logit.Info.Println("deleting service " + ID)
/**
client, err := getHttpClient()
if err != nil {
logit.Error.Println(err.Error())
return err
}
*/
/**
// DELETE service
var url = kubeURL + "/api/v1/services/" + ID
logit.Info.Println("url is " + url)
request, err2 := http.NewRequest("DELETE", url, nil)
if err2 != nil {
logit.Error.Println(err2.Error())
return err2
}
resp, err := client.Do(request)
if err != nil {
logit.Error.Println(err.Error())
return err
}
defer resp.Body.Close()
// Dump response
data, err3 := ioutil.ReadAll(resp.Body)
if err3 != nil {
logit.Error.Println(err3.Error())
return err3
}
logit.Info.Println(string(data))
*/
return nil
}
// DeletePod deletes a kube pod that should already exist
// kubeURL - the URL to kube
// ID - the ID of the Pod we want to delete
// it returns an error is there was a problem
func DeletePod(kubeURL string, ID string) error {
logit.Info.Println("deleting pod " + ID)
/**
client, err4 := getHttpClient()
if err4 != nil {
logit.Error.Println(err4.Error())
return err4
}
*/
// DELETE pod
/**
var url = kubeURL + "/api/v1/pods/" + ID
logit.Info.Println("url is " + url)
request, err2 := http.NewRequest("DELETE", url, nil)
if err2 != nil {
logit.Error.Println(err2.Error())
return err2
}
resp, err := client.Do(request)
if err != nil {
logit.Error.Println(err.Error())
return err
}
defer resp.Body.Close()
// Dump response
data, err2 := ioutil.ReadAll(resp.Body)
if err2 != nil {
logit.Error.Println(err2.Error())
return err2
}
logit.Info.Println(string(data))
*/
return nil
}
// CreatePod creates a new pod and service using passed in values
// kubeURL - the URL to the kube
// podInfo - the params used to configure the pod
// return an error if anything goes wrong
func CreatePod(kubeURL string, podInfo template.KubePodParams) error {
//client, err := getHttpClient()
logit.Info.Println("creating pod " + podInfo.ID)
//use a pod template to build the pod definition
data, err := template.KubeNodePod(podInfo)
if err != nil {
logit.Error.Println("CreatePod:" + err.Error())
return err
}
logit.Info.Println(string(data[:]))
//var bodyType = "application/json"
//var url = kubeURL + "/api/v1/pods"
//logit.Info.Println("url is " + url)
// POST POD
/*
resp, err := client.Post(url, bodyType, bytes.NewReader(data))
if err != nil {
logit.Error.Println(err.Error())
return err
}
defer resp.Body.Close()
// Dump response
data, err = ioutil.ReadAll(resp.Body)
if err != nil {
logit.Error.Println(err.Error())
return err
}
logit.Info.Println(string(data))
*/
return nil
}
// GetPods gets all the pods
// kubeURL - the URL to the kube
// podInfo - the params used to configure the pod
// return an error if anything goes wrong
func GetPods(kubeURL string, podInfo template.KubePodParams) error {
logit.Info.Println("creating pod " + podInfo.ID)
/**
client, err := getHttpClient()
if err != nil {
logit.Error.Println(err.Error())
return err
}
*/
//use a pod template to build the pod definition
data, err := template.KubeNodePod(podInfo)
if err != nil {
logit.Error.Println("CreatePod:" + err.Error())
return err
}
logit.Info.Println(string(data[:]))
// Do GET something
/**
resp, err2 := client.Get(kubeURL + "/api/v1/pods")
if err2 != nil {
logit.Error.Println(err2.Error())
return err2
}
defer resp.Body.Close()
// Dump response
data, err3 := ioutil.ReadAll(resp.Body)
if err3 != nil {
logit.Error.Println(err3.Error())
return err3
}
logit.Info.Println(string(data))
return nil
}
// GetPod gets information about a single pod from kube
// kubeURL - the URL to the kube
// podName - the pod name
// return an error if anything goes wrong
func GetPod(kubeURL string, podName string) (MyPod, error) {
var podInfo MyPod
logit.Info.Println("getting pod info " + podName)
/**
client, err := getHttpClient()
if err != nil {
logit.Error.Println(err.Error())
return podInfo, err
}
// Do GET something
resp, err2 := client.Get(kubeURL + "/api/v1/pods/" + podName)
if err2 != nil {
logit.Error.Println(err2.Error())
return podInfo, err2
}
defer resp.Body.Close()
// Dump response
data, err3 := ioutil.ReadAll(resp.Body)
if err3 != nil {
logit.Error.Println(err3.Error())
return podInfo, err3
}
logit.Info.Println(string(data))
err2 = json.Unmarshal(data, &podInfo)
if err2 != nil {
logit.Error.Println("error in unmarshalling pod " + err2.Error())
return podInfo, err2
}
*/
return nil
}
// CreateService creates a service
// kubeURL - the URL to the kube
// podName - the pod name
// return an error if anything goes wrong
func CreateService(kubeURL string, serviceInfo template.KubeServiceParams) error {
var s1data []byte
var err error
logit.Info.Println("create service called")
/**
client, err := getHttpClient()
if err != nil {
logit.Error.Println(err.Error())
return err
}
*/
s1data, err = template.KubeNodeService(serviceInfo)
if err != nil {
logit.Error.Println("CreateService:" + err.Error())
return err
}
logit.Info.Println("create service request...")
logit.Info.Println(string(s1data[:]))
/**
// POST admin SERVICE at port 13000
resp1, err1 := client.Post(serviceurl, bodyType, bytes.NewReader(s1data))
if err1 != nil {
logit.Error.Println(err1.Error())
return err1
}
defer resp1.Body.Close()
// Dump response
data, err4 := ioutil.ReadAll(resp1.Body)
if err4 != nil {
logit.Error.Println(err4.Error())
return err4
}
logit.Info.Println("create service response..." + string(data))
*/
return nil
}
|
package main
import (
"fmt"
"os"
"os/signal"
"bazil.org/fuse"
"bazil.org/fuse/fs"
"main/passFS"
)
func main() {
sourceDir := os.Args[1]
mountDir := os.Args[2]
err := mount(sourceDir, mountDir)
if err != nil {
fmt.Println(err)
}
}
func mount(sourceDir, mountDir string) error {
c, err := fuse.Mount(mountDir)
if err != nil {
return err
}
defer c.Close()
go unmount(mountDir)
p := passFS.New(sourceDir)
err = fs.Serve(c, p)
if err != nil {
return err
}
<-c.Ready
if err := c.MountError; err != nil {
return err
}
return nil
}
func unmount(mountDir string) {
cc := make(chan os.Signal, 1)
signal.Notify(cc, os.Interrupt)
for range cc {
fuse.Unmount(mountDir)
}
}
|
// Copyright 2020 The Operator-SDK Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collector
import (
rbacv1 "k8s.io/api/rbac/v1"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
// TODO(estroz): there's a significant amount of code dupliation here, a byproduct of Go's type system.
// However at least a few bits can be refactored so each method is smaller.
const (
// This service account exists in every namespace as the default.
defaultServiceAccountName = "default"
serviceAccountKind = "ServiceAccount"
)
// SplitCSVPermissionsObjects splits roles that should be written to a CSV as permissions (in)
// from roles and role bindings that should be written directly to the bundle (out).
func (c *Manifests) SplitCSVPermissionsObjects() (in, out []controllerutil.Object) { //nolint:dupl
roleMap := make(map[string]*rbacv1.Role)
for i := range c.Roles {
roleMap[c.Roles[i].GetName()] = &c.Roles[i]
}
roleBindingMap := make(map[string]*rbacv1.RoleBinding)
for i := range c.RoleBindings {
roleBindingMap[c.RoleBindings[i].GetName()] = &c.RoleBindings[i]
}
// Check for unbound roles.
for roleName, role := range roleMap {
hasRef := false
for _, roleBinding := range roleBindingMap {
roleRef := roleBinding.RoleRef
if roleRef.Kind == "Role" && (roleRef.APIGroup == "" || roleRef.APIGroup == rbacv1.SchemeGroupVersion.Group) {
if roleRef.Name == roleName {
hasRef = true
break
}
}
}
if !hasRef {
out = append(out, role)
delete(roleMap, roleName)
}
}
// If a role is bound and:
// 1. the binding only has one subject and it is a service account that maps to a deployment service account,
// add the role to in.
// 2. the binding only has one subject and it does not map to a deployment service account or is not a service account,
// add both role and binding to out.
// 3. the binding has more than one subject and:
// a. one of those subjects is a deployment's service account, add both role and binding to out and role to in.
// b. none of those subjects is a service account or maps to a deployment's service account, add both role and binding to out.
deploymentSANames := make(map[string]struct{})
for _, dep := range c.Deployments {
saName := dep.Spec.Template.Spec.ServiceAccountName
if saName == "" {
saName = defaultServiceAccountName
}
deploymentSANames[saName] = struct{}{}
}
inRoleNames := make(map[string]struct{})
outRoleNames := make(map[string]struct{})
outRoleBindingNames := make(map[string]struct{})
for _, binding := range c.RoleBindings {
roleRef := binding.RoleRef
if roleRef.Kind == "Role" && (roleRef.APIGroup == "" || roleRef.APIGroup == rbacv1.SchemeGroupVersion.Group) {
numSubjects := len(binding.Subjects)
if numSubjects == 1 {
// cases (1) and (2).
if _, hasSA := deploymentSANames[binding.Subjects[0].Name]; hasSA && binding.Subjects[0].Kind == serviceAccountKind {
inRoleNames[roleRef.Name] = struct{}{}
} else {
outRoleNames[roleRef.Name] = struct{}{}
outRoleBindingNames[binding.GetName()] = struct{}{}
}
} else {
// case (3).
for _, subject := range binding.Subjects {
if _, hasSA := deploymentSANames[subject.Name]; hasSA && subject.Kind == serviceAccountKind {
// case (3a).
inRoleNames[roleRef.Name] = struct{}{}
}
}
// case (3b).
outRoleNames[roleRef.Name] = struct{}{}
outRoleBindingNames[binding.GetName()] = struct{}{}
}
}
}
for roleName := range inRoleNames {
if role, hasRoleName := roleMap[roleName]; hasRoleName {
in = append(in, role)
}
}
for roleName := range outRoleNames {
if role, hasRoleName := roleMap[roleName]; hasRoleName {
out = append(out, role)
}
}
for roleBindingName := range outRoleBindingNames {
if roleBinding, hasRoleBindingName := roleBindingMap[roleBindingName]; hasRoleBindingName {
out = append(out, roleBinding)
}
}
return in, out
}
// SplitCSVClusterPermissionsObjects splits cluster roles that should be written to a CSV as clusterPermissions (in)
// from cluster roles and cluster role bindings that should be written directly to the bundle (out).
func (c *Manifests) SplitCSVClusterPermissionsObjects() (in, out []controllerutil.Object) { //nolint:dupl
roleMap := make(map[string]*rbacv1.ClusterRole)
for i := range c.ClusterRoles {
roleMap[c.ClusterRoles[i].GetName()] = &c.ClusterRoles[i]
}
roleBindingMap := make(map[string]*rbacv1.ClusterRoleBinding)
for i := range c.ClusterRoleBindings {
roleBindingMap[c.ClusterRoleBindings[i].GetName()] = &c.ClusterRoleBindings[i]
}
// Check for unbound roles.
for roleName, role := range roleMap {
hasRef := false
for _, roleBinding := range roleBindingMap {
roleRef := roleBinding.RoleRef
if roleRef.Kind == "ClusterRole" && (roleRef.APIGroup == "" || roleRef.APIGroup == rbacv1.SchemeGroupVersion.Group) {
if roleRef.Name == roleName {
hasRef = true
break
}
}
}
if !hasRef {
out = append(out, role)
delete(roleMap, roleName)
}
}
// If a role is bound and:
// 1. the binding only has one subject and it is a service account that maps to a deployment service account,
// add the role to in.
// 2. the binding only has one subject and it does not map to a deployment service account or is not a service account,
// add both role and binding to out.
// 3. the binding has more than one subject and:
// a. one of those subjects is a deployment's service account, add both role and binding to out and role to in.
// b. none of those subjects is a service account or maps to a deployment's service account, add both role and binding to out.
deploymentSANames := make(map[string]struct{})
for _, dep := range c.Deployments {
saName := dep.Spec.Template.Spec.ServiceAccountName
if saName == "" {
saName = defaultServiceAccountName
}
deploymentSANames[saName] = struct{}{}
}
inRoleNames := make(map[string]struct{})
outRoleNames := make(map[string]struct{})
outRoleBindingNames := make(map[string]struct{})
for _, binding := range c.ClusterRoleBindings {
roleRef := binding.RoleRef
if roleRef.Kind == "ClusterRole" && (roleRef.APIGroup == "" || roleRef.APIGroup == rbacv1.SchemeGroupVersion.Group) {
numSubjects := len(binding.Subjects)
if numSubjects == 1 {
// cases (1) and (2).
if _, hasSA := deploymentSANames[binding.Subjects[0].Name]; hasSA && binding.Subjects[0].Kind == serviceAccountKind {
inRoleNames[roleRef.Name] = struct{}{}
} else {
outRoleNames[roleRef.Name] = struct{}{}
outRoleBindingNames[binding.GetName()] = struct{}{}
}
} else {
// case (3).
for _, subject := range binding.Subjects {
if _, hasSA := deploymentSANames[subject.Name]; hasSA && subject.Kind == serviceAccountKind {
// case (3a).
inRoleNames[roleRef.Name] = struct{}{}
}
}
// case (3b).
outRoleNames[roleRef.Name] = struct{}{}
outRoleBindingNames[binding.GetName()] = struct{}{}
}
}
}
for roleName := range inRoleNames {
if role, hasRoleName := roleMap[roleName]; hasRoleName {
in = append(in, role)
}
}
for roleName := range outRoleNames {
if role, hasRoleName := roleMap[roleName]; hasRoleName {
out = append(out, role)
}
}
for roleBindingName := range outRoleBindingNames {
if roleBinding, hasRoleBindingName := roleBindingMap[roleBindingName]; hasRoleBindingName {
out = append(out, roleBinding)
}
}
return in, out
}
|
// Copyright 2019 The OpenSDS Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nimble
const (
DriverName = "hpe_nimble"
)
const (
ThickLuntype = 0
ThinLuntype = 1
MaxNameLength = 31
MaxDescriptionLength = 170
PortNumPerContr = 2
PwdExpired = 3
PwdReset = 4
)
// Error Code
const (
ErrorUnauthorizedToServer = "SM_http_unauthorized"
ErrorSmVolSizeDecreased = "SM_vol_size_decreased"
ErrorSmHttpConflict = "SM_http_conflict"
)
|
package main
import (
"log"
"github.com/langzhenjun/xiuhu/utils"
)
// const adminAddress = "0xf4cf445afe8945f76dea4cbcb80e82d18a4940ed"
// const adminKey = `{"address":"f4cf445afe8945f76dea4cbcb80e82d18a4940ed","crypto":{"cipher":"aes-128-ctr","ciphertext":"5ea7cc5184f29e108b9d8c59656479f5dd9430e372930e43795d6786d01d8b00","cipherparams":{"iv":"dce98618e1481ac4b55ce7f84efca685"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"cd892a78881b14d05ba90e266716c9b4784aa865a8fd895b709ffeab45183b8e"},"mac":"df384ac124e77d0f2b6aca4e775a641c82530485962a21a817e74e051aea8d9e"},"id":"63caf746-be4e-400d-84f9-ac275992a04e","version":3}`
func main() {
configs := utils.LoadConfigs("./configs.json")
log.Printf("%v", configs.MainAccount.KeyJSON)
// 创建一个到本地节点的连接
// client, err := ethclient.Dial(".geth/geth.ipc")
// if err != nil {
// log.Fatalf("Failed to connect to the Ethereum client: %v\r\n", err)
// }
// //
// auth, err := bind.NewTransactor(strings.NewReader(adminKey), "")
// if err != nil {
// log.Fatalf("Failed to create authorized transactor: %v", err)
// }
// ERC20, err := examples.NewTokenERC20(common.HexToAddress("0x4b933105503fbb8d806c5bc64645a3a14979aa7f"), client)
// // 发布合约
// // contractAddress, _, tokenDeployed, err := contracts.DeployTokenERC20(auth, client, big.NewInt(100), "TEST", "TEST")
// if err != nil {
// log.Fatalf("Failed to deploy contract: %v", err)
// }
// log.Printf("Deployed contract: 0x%x\n", contractAddress)
// ctx := context.Background()
// transferChan := make(chan *examples.TokenERC20Transfer)
// froms := []common.Address{common.HexToAddress("0xf4cf445afe8945f76dea4cbcb80e82d18a4940ed")}
// sub, err := ERC20.WatchTransfer(&bind.WatchOpts{Context: ctx}, transferChan, froms, nil)
// if err != nil {
// log.Fatalf("Failed to watch transfer: %v\r\n", err)
// }
// defer sub.Unsubscribe()
// tx, err := ERC20.Transfer(auth, common.HexToAddress("0xdc4bb8b33c0aa1eb028c73b27a988c4e0b56140a"), big.NewInt(123456789))
// if err != nil {
// log.Fatalf("Failed to request token transfer: %v", err)
// }
// log.Printf("Transfer pending: 0x%x\n", tx.Hash())
// _, err = bind.WaitMined(ctx, client, tx)
// if err != nil {
// log.Fatalf("tx mining error:%v\n", err)
// }
// valFrom, _ := ERC20.BalanceOf(nil, common.HexToAddress("0xf4cf445afe8945f76dea4cbcb80e82d18a4940ed"))
// valTo, _ := ERC20.BalanceOf(nil, common.HexToAddress("0xdc4bb8b33c0aa1eb028c73b27a988c4e0b56140a"))
// fmt.Printf("after transfere:%d, %d\n", valFrom, valTo)
// name, err := ERC20.Name(&bind.CallOpts{Pending: true})
// if err != nil {
// log.Fatalf("Failed to retrieve pending name: %v", err)
// }
// fmt.Println("Pending name:", name)
// for tran := range transferChan {
// log.Printf("%v, %v, %v\r\n", tran.From.Hex(), tran.To.Hex(), tran.Value)
// }
}
|
package bootiso
import (
"context"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"runtime/debug"
"strings"
"github.com/u-root/u-root/pkg/boot"
"github.com/u-root/u-root/pkg/boot/grub"
"github.com/u-root/u-root/pkg/boot/kexec"
"github.com/u-root/u-root/pkg/boot/syslinux"
"github.com/u-root/u-root/pkg/boot/util"
"github.com/u-root/u-root/pkg/mount"
"github.com/u-root/u-root/pkg/mount/block"
"github.com/u-root/u-root/pkg/mount/loop"
"github.com/u-root/u-root/pkg/uio"
"golang.org/x/sys/unix"
)
type Config struct {
Label string
KernelPath string
InitrdPath string
Cmdline string
}
// ParseConfigFromISO mounts the iso file, attempts to parse the config file,
// and returns a list of bootable boot.OSImage objects representing the parsed configs
func ParseConfigFromISO(isoPath string, configType string) ([]boot.OSImage, error) {
tmp, err := ioutil.TempDir("", "mnt-")
if err != nil {
return nil, fmt.Errorf("Error creating mount dir: %v", err)
}
defer os.RemoveAll(tmp)
loopdev, err := loop.New(isoPath, "iso9660", "")
if err != nil {
return nil, fmt.Errorf("Error creating loop device: %v", err)
}
mp, err := loopdev.Mount(tmp, unix.MS_RDONLY|unix.MS_NOATIME)
if err != nil {
return nil, fmt.Errorf("Error mounting loop device: %v", err)
}
defer mp.Unmount(0)
images, err := parseConfigFile(tmp, configType)
if err != nil {
return nil, fmt.Errorf("Error parsing config: %v", err)
}
return images, nil
}
// LoadCustomConfigs is an alternative to ParseConfigFromISO that allows us
// to define the boot parameters ourselves (in a list of Config objects)
// instead of parsing them from a config file
func LoadCustomConfigs(isoPath string, configs []Config) ([]boot.OSImage, error) {
tmpDir, err := ioutil.TempDir("", "mnt-")
if err != nil {
return nil, fmt.Errorf("Error on ioutil.TempDir; in %s, and got %v", debug.Stack(), err)
}
loopdev, err := loop.New(isoPath, "iso9660", "")
if err != nil {
return nil, fmt.Errorf("Error on loop.New; in %s, and got %v", debug.Stack(), err)
}
mp, err := loopdev.Mount(tmpDir, unix.MS_RDONLY|unix.MS_NOATIME)
if err != nil {
return nil, fmt.Errorf("Error on loopdev.Mount; in %s, and got %v", debug.Stack(), err)
}
var images []boot.OSImage
var files []*os.File
copied := make(map[string]*os.File)
defer func() {
for _, f := range files {
if err = f.Close(); err != nil {
log.Print(err)
}
}
if err = mp.Unmount(unix.MNT_FORCE); err != nil {
log.Fatal(err)
}
// Use Remove rather than RemoveAll to avoid
// removal if the directory is not empty
if err = os.Remove(tmpDir); err != nil {
log.Fatal(err)
}
}()
for _, c := range configs {
var tmpKernel, tmpInitrd *os.File
// Copy kernel to temp if we haven't already
if _, ok := copied[c.KernelPath]; !ok {
kernel, err := os.Open(path.Join(tmpDir, c.KernelPath))
if err != nil {
return nil, fmt.Errorf("Error on os.Open; in %s, and got %v", debug.Stack(), err)
}
files = append(files, kernel)
// Temp files are not added to the files list
// since they need to stay open for later reading
tmpKernel, err = ioutil.TempFile("", "kernel-")
if err != nil {
return nil, fmt.Errorf("Error on ioutil.TempFile; in %s, and got %v", debug.Stack(), err)
}
if _, err = io.Copy(tmpKernel, kernel); err != nil {
return nil, fmt.Errorf("Error on io.Copy; in %s, and got %v", debug.Stack(), err)
}
if _, err = tmpKernel.Seek(0, 0); err != nil {
return nil, fmt.Errorf("Error on tmpKernel.Seek; in %s, and got %v", debug.Stack(), err)
}
copied[c.KernelPath] = tmpKernel
} else {
tmpKernel = copied[c.KernelPath]
}
// Copy initrd to temp if we haven't already
if _, ok := copied[c.InitrdPath]; !ok {
initrd, err := os.Open(path.Join(tmpDir, c.InitrdPath))
if err != nil {
return nil, fmt.Errorf("Error on os.Open; in %s, and got %v", debug.Stack(), err)
}
files = append(files, initrd)
tmpInitrd, err = ioutil.TempFile("", "initrd-")
if err != nil {
return nil, fmt.Errorf("Error on ioutil.TempFile; in %s, and got %v", debug.Stack(), err)
}
if _, err = io.Copy(tmpInitrd, initrd); err != nil {
return nil, fmt.Errorf("Error on io.Copy; in %s, and got %v", debug.Stack(), err)
}
if _, err = tmpInitrd.Seek(0, 0); err != nil {
return nil, fmt.Errorf("Error on tmpInitrd.Seek; in %s, and got %v", debug.Stack(), err)
}
copied[c.InitrdPath] = tmpInitrd
} else {
tmpInitrd = copied[c.InitrdPath]
}
images = append(images, &boot.LinuxImage{
Name: c.Label,
Kernel: tmpKernel,
Initrd: tmpInitrd,
Cmdline: c.Cmdline,
})
}
return images, nil
}
// BootFromPmem copies the ISO to pmem0 and boots
// given the syslinux configuration with the provided label
func BootFromPmem(isoPath string, configLabel string, configType string) error {
pmem, err := os.OpenFile("/dev/pmem0", os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
return fmt.Errorf("Error opening persistent memory device: %v", err)
}
iso, err := os.Open(isoPath)
if err != nil {
return fmt.Errorf("Error opening ISO: %v", err)
}
defer iso.Close()
if _, err := io.Copy(pmem, iso); err != nil {
return fmt.Errorf("Error copying from ISO to pmem: %v", err)
}
if err = pmem.Close(); err != nil {
return fmt.Errorf("Error closing persistent memory device: %v", err)
}
tmp, err := ioutil.TempDir("", "mnt")
if err != nil {
return fmt.Errorf("Error creating temp directory: %v", err)
}
defer os.RemoveAll(tmp)
if _, err := mount.Mount("/dev/pmem0", tmp, "iso9660", "", unix.MS_RDONLY|unix.MS_NOATIME); err != nil {
return fmt.Errorf("Error mounting pmem0 to temp directory: %v", err)
}
configOpts, err := parseConfigFile(tmp, configType)
if err != nil {
return fmt.Errorf("Error retrieving syslinux config options: %v", err)
}
osImage := findConfigOptionByLabel(configOpts, configLabel)
if osImage == nil {
return fmt.Errorf("Config option with the requested label does not exist")
}
// Need to convert from boot.OSImage to boot.LinuxImage to edit the Cmdline
linuxImage, ok := osImage.(*boot.LinuxImage)
if !ok {
return fmt.Errorf("Error converting from boot.OSImage to boot.LinuxImage")
}
localCmd, err := ioutil.ReadFile("/proc/cmdline")
if err != nil {
return fmt.Errorf("Error accessing /proc/cmdline")
}
cmdline := strings.TrimSuffix(string(localCmd), "\n") + " " + linuxImage.Cmdline
linuxImage.Cmdline = cmdline
if err := linuxImage.Load(true); err != nil {
return err
}
if err := kexec.Reboot(); err != nil {
return err
}
return nil
}
// next two functions hoisted from u-root kexec. We will remove
// them when the u-root kexec becomes capable of using the 32-bit
// entry point. 32-bit entry is essential to working on chromebooks.
func copyToFile(r io.Reader) (*os.File, error) {
f, err := ioutil.TempFile("", "webboot")
if err != nil {
return nil, fmt.Errorf("Error on ioutil.TempFile; in %s, and got %v", debug.Stack(), err)
}
defer f.Close()
if _, err := io.Copy(f, r); err != nil {
return nil, fmt.Errorf("Error on io.Copy; in %s, and got %v", debug.Stack(), err)
}
if err := f.Sync(); err != nil {
return nil, fmt.Errorf("Error on f.Sync; in %s, and got %v", debug.Stack(), err)
}
readOnlyF, err := os.Open(f.Name())
if err != nil {
return nil, fmt.Errorf("Error on os.Open; in %s, and got %v", debug.Stack(), err)
}
return readOnlyF, nil
}
// kexecCmd boots via the classic kexec command, if it exists
func cmdKexecLoad(li *boot.LinuxImage, verbose bool) error {
if li.Kernel == nil {
return errors.New("LinuxImage.Kernel must be non-nil")
}
kernel, initrd := uio.Reader(util.TryGzipFilter(li.Kernel)), uio.Reader(li.Initrd)
if verbose {
// In verbose mode, print a dot every 5MiB. It is not pretty,
// but it at least proves the files are still downloading.
progress := func(r io.Reader, dot string) io.Reader {
return &uio.ProgressReadCloser{
RC: ioutil.NopCloser(r),
Symbol: dot,
Interval: 5 * 1024 * 1024,
W: os.Stdout,
}
}
kernel = progress(kernel, "K")
initrd = progress(initrd, "I")
}
// It seams inefficient to always copy, in particular when the reader
// is an io.File but that's not sufficient, os.File could be a socket,
// a pipe or some other strange thing. Also kexec_file_load will fail
// (similar to execve) if anything as the file opened for writing.
// That's unfortunately something we can't guarantee here - unless we
// make a copy of the file and dump it somewhere.
k, err := copyToFile(kernel)
if err != nil {
return err
}
defer k.Close()
kargs := []string{"-d", "-l", "--entry-32bit", "--command-line=" + li.Cmdline}
var i *os.File
if li.Initrd != nil {
i, err = copyToFile(initrd)
if err != nil {
return err
}
defer i.Close()
kargs = append(kargs, "--initrd="+i.Name())
}
log.Printf("Kernel: %s", k.Name())
kargs = append(kargs, k.Name())
if i != nil {
log.Printf("Initrd: %s", i.Name())
}
log.Printf("Command line: %s", li.Cmdline)
log.Printf("Kexec args: %q", kargs)
out, err := exec.Command("/sbin/kexec", kargs...).CombinedOutput()
if err != nil {
err = fmt.Errorf("Load failed; output %q, err %v", out, err)
}
return err
}
func cmdKexecReboot(verbose bool) error {
o, err := exec.Command("/sbin/kexec", "-d", "-e").CombinedOutput()
if err != nil {
err = fmt.Errorf("Exec failed; output %q, err %v", o, err)
}
return err
}
func BootCachedISO(osImage boot.OSImage, kernelParams string) error {
// Need to convert from boot.OSImage to boot.LinuxImage to edit the Cmdline
linuxImage, ok := osImage.(*boot.LinuxImage)
if !ok {
return fmt.Errorf("Error converting from boot.OSImage to boot.LinuxImage")
}
linuxImage.Cmdline = linuxImage.Cmdline + " " + kernelParams
// We prefer to use the kexec command for now, if possible, as it can
// use the 32-bit entry point.
if _, err := os.Stat("/sbin/kexec"); err != nil {
if err := cmdKexecLoad(linuxImage, true); err != nil {
return err
}
if err := cmdKexecReboot(true); err != nil {
return err
}
}
if err := linuxImage.Load(true); err != nil {
return err
}
if err := kexec.Reboot(); err != nil {
return err
}
return nil
}
// VerifyChecksum takes a path to the ISO and its checksum
// and compares the calculated checksum on the ISO against the checksum.
// It returns true if the checksum was correct, false if the checksum
// was incorrect, the calculated checksum, and an error.
func VerifyChecksum(isoPath, checksum, checksumType string) (bool, string, error) {
iso, err := os.Open(isoPath)
if err != nil {
return false, "", err
}
defer iso.Close()
var hash hash.Hash
switch checksumType {
case "md5":
hash = md5.New()
case "sha1":
hash = sha1.New()
case "sha256":
hash = sha256.New()
default:
return false, "", fmt.Errorf("Unknown checksum type.")
}
if _, err := io.Copy(hash, iso); err != nil {
return false, "", err
}
calcChecksum := hex.EncodeToString(hash.Sum(nil))
return calcChecksum == checksum, calcChecksum, nil
}
func findConfigOptionByLabel(configOptions []boot.OSImage, configLabel string) boot.OSImage {
for _, config := range configOptions {
if config.Label() == configLabel {
return config
}
}
return nil
}
func parseConfigFile(mountDir string, configType string) ([]boot.OSImage, error) {
devs, err := block.GetBlockDevices()
if err != nil {
return nil, fmt.Errorf("Error on block.GetBlockDevices; in %s, and got %v", debug.Stack(), err)
}
mp := &mount.Pool{}
if configType == "syslinux" {
return syslinux.ParseLocalConfig(context.Background(), mountDir)
} else if configType == "grub" {
return grub.ParseLocalConfig(context.Background(), mountDir, devs, mp)
}
// If no config type was specified, try both grub and syslinux
configOpts, err := syslinux.ParseLocalConfig(context.Background(), mountDir)
if err == nil && len(configOpts) != 0 {
return configOpts, err
}
return grub.ParseLocalConfig(context.Background(), mountDir, devs, mp)
}
|
package main
import (
"fmt"
)
//Constants can only be declared outside the function
const Pi = 3.14
func main() {
//Type interface
i := 42 // int
f := 3.142 // float64
g := 0.867 + 0.5i // complex128
//Type Conversions
a := 42
b := float64(a)
c := uint(b)
fmt.Println("Value of Pi =", Pi)
fmt.Printf("i is of type %T\n", i)
fmt.Printf("f is of type %T\n", f)
fmt.Printf("g is of type %T\n", g)
fmt.Printf("a is of type %T\n", a)
fmt.Printf("b is of type %T\n", b)
fmt.Printf("c is of type %T\n", c)
}
|
package main
import (
"fmt"
"github.com/garyburd/redigo/redis"
"log"
"math/rand"
"strconv"
"time"
)
/*
场景:排行榜应用,取TOP N操作
使用:Redis Sorted Set, 有序集合
最新N个数据是以某个条件为权重,比如按点赞的次数排序,这时候就需要sorted set
将你要排序的值设置成sorted set的score,将具体的数据设置成相应的value,每次只需要执行一条ZADD命令即可
重点:
1. 如何设计score值
*/
var pool *redis.Pool
func init() {
redisAddress := "192.168.3.158:6379"
poolSize := 20
pool = &redis.Pool{
MaxIdle: poolSize,
IdleTimeout: time.Minute,
Dial: func() (conn redis.Conn, err error) {
conn, err = redis.Dial("tcp", redisAddress)
if err != nil {
return nil, err
}
return conn, nil
},
}
}
func GetRedisConn() redis.Conn {
return pool.Get()
}
func main() {
go CreateComments()
ShowComments("user_1")
time.Sleep(5 * time.Second)
ShowComments("user_1")
}
func CreateComments() {
conn := GetRedisConn()
defer conn.Close()
userNum := 10
// 写入评论数据
for {
userId := rand.Intn(userNum)
account := "user_" + strconv.Itoa(userId)
//点赞次数
thumbUpNum := rand.Intn(1000)
comment := fmt.Sprintf("comment at %s, thumb-up num:%d",
time.Now().Format("2006-01-02 15:04:05"), thumbUpNum)
//fmt.Printf("%s has %s\n", account, comment)
_, err := conn.Do("ZADD", account, thumbUpNum, comment)
if err != nil {
log.Fatal(err)
}
time.Sleep(200 * time.Millisecond)
}
}
func ShowComments(account string) {
conn := GetRedisConn()
defer conn.Close()
// ZREVRANGE, 按照score值,从大到小排序,取5条记录
comments, err := redis.Values(conn.Do("ZREVRANGE", account, 0, 4))
if err != nil {
log.Fatal(err)
}
fmt.Printf("\n%s lastest comments:\n", account)
for _, comment := range comments {
fmt.Printf("\t%s\n", comment)
}
}
|
package testproxy
import (
"crypto/tls"
"fmt"
"io"
"net"
)
type T struct {
FromAddr string
ToAddr string
Ln net.Listener
lastID uint
conns map[uint]net.Conn
closed bool
TLSConfig *tls.Config
}
func (p *T) GetNextID() uint {
if p.lastID >= ^uint(0) {
p.lastID = 0
}
p.lastID++
return p.lastID
}
func (p *T) Run() error {
var (
ln net.Listener
err error
)
p.closed = false
p.lastID = 0
p.conns = map[uint]net.Conn{}
if p.TLSConfig != nil {
fmt.Printf("Test Kafka Proxy listening with TLS at %s\n", p.FromAddr)
ln, err = tls.Listen("tcp", p.FromAddr, p.TLSConfig)
} else {
ln, err = net.Listen("tcp", p.FromAddr)
}
if err != nil {
return err
}
p.Ln = ln
go func() {
for {
if p.closed {
break
}
conn, err := ln.Accept()
if err != nil {
fmt.Printf("Could not accept connection: %v\n", err)
return
}
p.conns[p.GetNextID()] = conn
go p.handleConnection(conn)
}
}()
return nil
}
func (p *T) handleConnection(c net.Conn) {
fmt.Printf("Connection FROM: %s; Connecting TO: %s\n", c.RemoteAddr(), p.ToAddr)
dstConn, err := net.Dial("tcp", p.ToAddr)
if err != nil {
fmt.Printf("Error connecting to destination: %v\n", err)
return
}
go func() {
for {
data := make([]byte, 1024)
n, err := c.Read(data)
if n > 0 {
_, err = dstConn.Write(data[:n])
if err != nil {
fmt.Printf("Error writing to dst connection: %v\n", err)
return
}
}
if err != nil {
if err != io.EOF {
fmt.Printf("Error reading from client connection: %v\n", err)
}
_ = dstConn.Close()
break
}
}
}()
for {
data := make([]byte, 1024)
n, err := dstConn.Read(data)
if n > 0 {
_, err = c.Write(data[:n])
if err != nil {
fmt.Printf("Error writing to dst connection: %v\n", err)
return
}
}
if err != nil {
fmt.Printf("Error reading from dst connection: %v\n", err)
_ = dstConn.Close()
break
}
}
_ = dstConn.Close()
}
func (p *T) Close() {
p.closed = true
_ = p.Ln.Close()
for _, c := range p.conns {
_ = c.Close()
}
}
|
package entity
import (
"time"
)
type UmsMember struct {
Id int64 `json:"id" xorm:"pk autoincr BIGINT(20) 'id'"`
MemberLevelId int64 `json:"member_level_id" xorm:"default NULL BIGINT(20) 'member_level_id'"`
Username string `json:"username" xorm:"default 'NULL' comment('用户名') VARCHAR(64) 'username'"`
Password string `json:"password" xorm:"default 'NULL' comment('密码') VARCHAR(64) 'password'"`
Nickname string `json:"nickname" xorm:"default 'NULL' comment('昵称') VARCHAR(64) 'nickname'"`
Phone string `json:"phone" xorm:"default 'NULL' comment('手机号码') VARCHAR(64) 'phone'"`
Status int `json:"status" xorm:"default NULL comment('帐号启用状态:0->禁用;1->启用') INT(1) 'status'"`
CreateTime time.Time `json:"create_time" xorm:"default 'NULL' comment('注册时间') DATETIME 'create_time'"`
Icon string `json:"icon" xorm:"default 'NULL' comment('头像') VARCHAR(500) 'icon'"`
Gender int `json:"gender" xorm:"default NULL comment('性别:0->未知;1->男;2->女') INT(1) 'gender'"`
Birthday time.Time `json:"birthday" xorm:"default 'NULL' comment('生日') DATE 'birthday'"`
City string `json:"city" xorm:"default 'NULL' comment('所做城市') VARCHAR(64) 'city'"`
Job string `json:"job" xorm:"default 'NULL' comment('职业') VARCHAR(100) 'job'"`
PersonalizedSignature string `json:"personalized_signature" xorm:"default 'NULL' comment('个性签名') VARCHAR(200) 'personalized_signature'"`
SourceType int `json:"source_type" xorm:"default NULL comment('用户来源') INT(1) 'source_type'"`
Integration int `json:"integration" xorm:"default NULL comment('积分') INT(11) 'integration'"`
Growth int `json:"growth" xorm:"default NULL comment('成长值') INT(11) 'growth'"`
LuckeyCount int `json:"luckey_count" xorm:"default NULL comment('剩余抽奖次数') INT(11) 'luckey_count'"`
HistoryIntegration int `json:"history_integration" xorm:"default NULL comment('历史积分数量') INT(11) 'history_integration'"`
}
|
package core
import (
"context"
konsen "github.com/lizhaoliu/konsen/v2/proto_gen"
)
// RaftService defines methods exposed by a Raft service.
type RaftService interface {
// AppendEntries sends AppendEntries request to the remote server.
AppendEntries(ctx context.Context, in *konsen.AppendEntriesReq) (*konsen.AppendEntriesResp, error)
// RequestVote sends RequestVote request to the remote server.
RequestVote(ctx context.Context, in *konsen.RequestVoteReq) (*konsen.RequestVoteResp, error)
// AppendData sends AppendData request to the remote server.
AppendData(ctx context.Context, in *konsen.AppendDataReq) (*konsen.AppendDataResp, error)
}
|
package geebolt
import (
"fmt"
"reflect"
"unsafe"
)
const pageHeaderSize = unsafe.Sizeof(page{})
const branchPageElementSize = unsafe.Sizeof(branchPageElement{})
const leafPageElementSize = unsafe.Sizeof(leafPageElement{})
const maxKeysPerPage = 1024
const (
branchPageFlag uint16 = iota
leafPageFlag
metaPageFlag
freelistPageFlag
)
type page struct {
id uint64
flags uint16
count uint16
overflow uint32
}
type leafPageElement struct {
pos uint32
ksize uint32
vsize uint32
}
type branchPageElement struct {
pos uint32
ksize uint32
pgid uint64
}
func (p *page) typ() string {
switch p.flags {
case branchPageFlag:
return "branch"
case leafPageFlag:
return "leaf"
case metaPageFlag:
return "meta"
case freelistPageFlag:
return "freelist"
}
return fmt.Sprintf("unknown<%02x>", p.flags)
}
func (p *page) meta() *meta {
return (*meta)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + pageHeaderSize))
}
func (p *page) dataPtr() unsafe.Pointer {
return unsafe.Pointer(&reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(p)) + pageHeaderSize,
Len: int(p.count),
Cap: int(p.count),
})
}
func (p *page) leafPageElement(index uint16) *leafPageElement {
off := pageHeaderSize + uintptr(index)*leafPageElementSize
return (*leafPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + off))
}
func (p *page) leafPageElements() []leafPageElement {
if p.count == 0 {
return nil
}
return *(*[]leafPageElement)(p.dataPtr())
}
func (p *page) branchPageElement(index uint16) *branchPageElement {
off := pageHeaderSize + uintptr(index)*branchPageElementSize
return (*branchPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + off))
}
func (p *page) branchPageElements() []branchPageElement {
if p.count == 0 {
return nil
}
return *(*[]branchPageElement)(p.dataPtr())
}
|
// Copyright (C) 2019 Cisco Systems Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package watchdog
import (
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"gopkg.in/tomb.v2"
)
type WatchDog struct {
log *logrus.Entry
t *tomb.Tomb
}
func NewWatchDog(log *logrus.Entry, t *tomb.Tomb) *WatchDog {
return &WatchDog{
log: log,
t: t,
}
}
func (wd *WatchDog) Wait(myChan chan interface{}, msg string) interface{} {
ticker := time.NewTicker(time.Second * 5)
nbTicks := 0
defer ticker.Stop()
for {
select {
case value := <-myChan:
return value
case <-wd.t.Dying():
return nil
case <-ticker.C:
nbTicks++
if nbTicks >= 30 {
wd.t.Kill(errors.Errorf("Timeout waiting for config from felix"))
} else if nbTicks >= 6 { // Start warning after 6 ticks, i.e. 30sec
wd.log.Warn(msg)
} else {
wd.log.Info(msg)
}
}
}
}
|
package rawrecording
import (
"compress/gzip"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"sort"
)
type Reader struct {
currentPack *FramePack
currentFrameNdx int // index in currentPack
currentPackNdx int
packs *io.SectionReader
indexTimeOffsets []float64
indexFileOffsets []int64
Meta Meta
}
func NewReaderFromSectionReader(source *io.SectionReader) (*Reader, error) {
var err error
sourceSize := source.Size()
if sourceSize < 8 {
return nil, fmt.Errorf("file too small (size: %d), couldn't find meta data", sourceSize)
}
source.Seek(sourceSize-8, 0)
var metaSize int64
if err := binary.Read(source, binary.LittleEndian, &metaSize); err != nil {
return nil, fmt.Errorf("couldn't read meta data size: %s", err)
}
if metaSize < 0 {
return nil, fmt.Errorf("meta data size negative: %d", metaSize)
}
if sourceSize-8 < metaSize {
return nil, fmt.Errorf("file too small (size: %d), can't contain meta data of size %d", sourceSize, metaSize)
}
var metaReader *gzip.Reader
if metaReader, err = gzip.NewReader(io.NewSectionReader(source, sourceSize-8-metaSize, metaSize)); err != nil {
return nil, fmt.Errorf("couldn't read meta data: %s", err)
}
var metaBytes []byte
if metaBytes, err = ioutil.ReadAll(metaReader); err != nil {
return nil, fmt.Errorf("couldn't read meta data: %s", err)
}
result := &Reader{
currentFrameNdx: -1,
currentPackNdx: -1,
}
if err = (&result.Meta).Unmarshal(metaBytes); err != nil {
return nil, fmt.Errorf("couldn't parse meta data: %s", err)
}
result.indexTimeOffsets = make([]float64, len(result.Meta.PackIndex.Entries))
result.indexFileOffsets = make([]int64, len(result.Meta.PackIndex.Entries)+1)
fileOffset := int64(0)
for i, entry := range result.Meta.PackIndex.Entries {
result.indexTimeOffsets[i] = entry.Offset
result.indexFileOffsets[i] = fileOffset
fileOffset += int64(entry.PackSize)
}
result.indexFileOffsets[len(result.Meta.PackIndex.Entries)] = fileOffset
result.packs = io.NewSectionReader(source, 0, sourceSize-8-metaSize)
return result, nil
}
func (reader *Reader) readPack(packNdx int) error {
if reader.currentPackNdx == packNdx {
// already have pack
return nil
}
packReader, err := gzip.NewReader(io.NewSectionReader(
reader.packs,
reader.indexFileOffsets[packNdx],
reader.indexFileOffsets[packNdx+1]-reader.indexFileOffsets[packNdx]))
if err != nil {
return fmt.Errorf("couldn't read pack at index %d: %s", packNdx, err)
}
packBytes, err := ioutil.ReadAll(packReader)
if err != nil {
return fmt.Errorf("couldn't unzip pack at index %d: %s", packNdx, err)
}
var framePack FramePack
if err := (&framePack).Unmarshal(packBytes); err != nil {
return fmt.Errorf("couldn't parse frame pack at index %d: %s", packNdx, err)
}
if 0 == len(framePack.Frames) {
return fmt.Errorf("frame pack at index %d is empty", packNdx)
}
reader.currentPackNdx = packNdx
reader.currentPack = &framePack
reader.currentFrameNdx = 0
return nil
}
func (reader *Reader) Seek(offset float64) error {
if 0 == len(reader.indexTimeOffsets) {
return nil
}
// search the lowest index so that the first offset in the next pack is
// larger than the one we seek
packNdx := sort.Search(len(reader.indexTimeOffsets)-1, func(i int) bool {
return reader.indexTimeOffsets[i+1] > offset
})
if err := reader.readPack(packNdx); err != nil {
return err
}
curFrames := reader.currentPack.Frames
reader.currentFrameNdx = sort.Search(len(curFrames)-1, func(i int) bool {
return curFrames[i+1].Offset > offset
})
return nil
}
// returns nil as Frame without error if there are no more frames
func (reader *Reader) ReadFrame() (*Frame, error) {
if -1 == reader.currentPackNdx {
if err := reader.readPack(0); err != nil {
return nil, err
}
}
if reader.currentFrameNdx >= len(reader.currentPack.Frames) {
if reader.currentPackNdx+1 >= len(reader.indexTimeOffsets) {
return nil, nil
}
if err := reader.readPack(reader.currentPackNdx + 1); err != nil {
return nil, err
}
}
result := &reader.currentPack.Frames[reader.currentFrameNdx]
reader.currentFrameNdx++
return result, nil
}
|
package main
import "testing"
func TestSubjectAndMessage(t *testing.T) {
testCases := []struct {
name string
s string
wantSubject, wantMessage string
}{
{
"oneline",
"just one line",
"", "just one line",
},
{
"twoline",
"line1\nline2",
"", "line1\nline2",
},
{
"one-and-one",
"subject\n\nmessage",
"subject", "message",
},
{
"one-and-two",
"subject\n\nmessage1\nmessage2",
"subject", "message1\nmessage2",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
gotSubject, gotMessage := subjectAndMessage(tc.s)
if gotSubject != tc.wantSubject {
t.Errorf("Subject mismatch\nGot: %q; Want: %q", gotSubject, tc.wantSubject)
}
if gotMessage != tc.wantMessage {
t.Errorf("Message mismatch\nGot: %q; Want: %q", gotMessage, tc.wantMessage)
}
})
}
}
|
package doublylinkedlist
import (
"fmt"
)
// Node doubly linked list node
type Node struct {
value interface{}
prev *Node
next *Node
}
// NewNode create new node with value, prev and next link
func NewNode(value interface{}, prev *Node, next *Node) *Node {
return &Node{value: value, prev: prev, next: next}
}
// Value get node value
func (node *Node) Value() interface{} {
return node.value
}
// SetValue set node value
func (node *Node) SetValue(value interface{}) {
node.value = value
}
// String implement Stringer
func (node *Node) String() string {
return fmt.Sprint(node.value)
}
|
package strings
import (
"strings"
"unicode"
)
// 1.1
// Implement an algorithm to determine if a string has all unique characters.
func allCharsUnique(str string) bool {
seen := make(map[rune]bool)
for _, c := range str {
if seen[c] {
return false
} else {
seen[c] = true
}
}
return true
}
// 1.1.b
// What if you can not use additional data structures?
func allCharsUniqueNoStructures(str string) bool {
for i := 0; i < len(str)-1; i++ {
for j := (i + 1); j < len(str); j++ {
if str[i] == str[j] {
return false
}
}
}
return true
}
// 1.2
// Write code to reverse a C-Style String. (C-String means that “abcd” is
// represented as five characters, including the null character.)
func reverseCString(str string) string {
// Strings in Go are immutable, []rune is more similar to C's char[]
runes := []rune(str)
// -2 to ignore the null char
for i, j := 0, len(runes)-2; i < j; i, j = i+1, j-1 {
runes[i], runes[j] = runes[j], runes[i]
}
return string(runes)
}
// 1.3
// Design an algorithm and write code to remove the duplicate characters in a
// string without using any additional buffer. NOTE: One or two additional
// variables are fine. An extra copy of the array is not.
func removeDuplicateChars(str string) string {
// Strings in Go are immutable, []rune is more similar to C's char[]
runes := []rune(str)
for i := 0; i < len(runes); i++ {
for j := i + 1; j < len(runes); {
if runes[i] == runes[j] {
runes = runes[:j+copy(runes[j:], runes[j+1:])]
} else {
j++
}
}
}
return string(runes)
}
// 1.4
// Write a method to decide if two strings are anagrams or not.
func areAnagrams(str1 string, str2 string) bool {
if len(str1) != len(str2) {
return false
}
runeCount := make(map[rune]int)
for _, char := range str1 {
runeCount[char]++
}
for _, char := range str2 {
runeCount[char]--
}
for _, count := range runeCount {
if count != 0 {
return false
}
}
return true
}
// 1.4.b
// After running benchmarks, doesn't seem like this solution is the most optimal // overall, it requires less memory, but takes longer, without a doubt, I would
// choose the solution above as it is easier to read, and faster
func areAnagramsOptimized(str1 string, str2 string) bool {
if len(str1) != len(str2) {
return false
}
runeCount := make(map[rune]int)
var numUniqueChars int
var numCompleteChecks int
for _, char := range str1 {
runeCount[char]++
if runeCount[char] == 0 {
numUniqueChars++
}
}
for i, char := range str2 {
if runeCount[char] == 0 {
return false
}
runeCount[char]--
if runeCount[char] == 0 {
numCompleteChecks++
if numCompleteChecks == numUniqueChars {
return i == len(str2)-1
}
}
}
return false
}
// 1.5
// Write a method to replace all spaces in a string with ‘%20’.
func encodeSpaces(str string) string {
var runes []rune
for _, char := range str {
if unicode.IsSpace(char) {
runes = append(runes, '%', '2', '0')
} else {
runes = append(runes, char)
}
}
return string(runes)
}
// 1.5.b encode spaces in place
func encodeSpacesInPlace(str string) string {
// Golang strings are immutable, modify the []rune in place
runes := []rune(str)
for i := 0; i < len(runes); i++ {
if unicode.IsSpace(runes[i]) {
runes = append(runes[:i], append([]rune("%20"), runes[i+1:]...)...)
}
}
return string(runes)
}
// Given an image represented by an NxN matrix, where each pixel in the image is
// 4 bytes, write a method to rotate the image by 90 degrees. Can you do this
// in place?
func rotateSquareMatrix(matrix [][]int) [][]int {
n := len(matrix)
blank := make([][]int, n)
for i := 0; i < n; i++ {
blank[i] = make([]int, n)
}
for row := range matrix {
for col := range matrix[row] {
blank[row][col] = matrix[n-1-col][row]
}
}
return blank
}
func rotateSquareMatrixInPlace(matrix [][]int) [][]int {
n := len(matrix)
for layer := 0; layer < n; layer++ {
first := layer
last := n - 1 - layer
for i := first; i < last; i++ {
offset := i - first
top := matrix[first][i] // save top
matrix[first][i] = matrix[last-offset][first] // left -> top
matrix[last-offset][first] = matrix[last][last-offset] // bottom -> left
matrix[last][last-offset] = matrix[i][last] // right -> bottom
matrix[i][last] = top // top -> right
}
}
return matrix
}
// Assume you have a method isSubstring which checks if one word is a substring
// of another. Given two strings, s1 and s2, write code to check if s2 is a
// rotation of s1 using only one call to isSubstring (i.e., “waterbottle” is a
// rotation of “erbottlewat”).
func isRotatation(str1 string, str2 string) bool {
if len(str1) == len(str2) {
return strings.Contains(str1+str1, str2)
}
return false
}
|
package item
import "github.com/gofiber/fiber/v2"
type IHandler interface {
Create(c *fiber.Ctx) error
FindAllByOrderID(c *fiber.Ctx) error
}
type iHandler struct {
service IService
}
func NewIHandler(s IService) IHandler {
return &iHandler{s}
}
func (h *iHandler) Create(c *fiber.Ctx) error {
return c.JSON("Create")
}
func (h *iHandler) FindAllByOrderID(c *fiber.Ctx) error {
return c.JSON("FindAllByOrderID")
}
|
package main
import "strconv"
func isPalindrome(x int) bool {
if x < 0 {
return false
}
str := strconv.Itoa(x)
i, j := 0, len(str)-1
for i < j {
if str[i] == str[j] {
i++
j--
continue
}
return false
}
return true
}
|
package amqp
import (
"context"
"github.com/Azure/go-amqp"
)
// Session is an interface for the subset of go-amqp *Session functions that we
// actually use, adapted slightly to also interact with our own custom Sender
// interface. Using these interfaces in our messaging abstraction, instead of
// using the go-amqp types directly, allows for the possibility of utilizing
// mock implementations for testing purposes. Adding only the subset of
// functions that we actually use limits the effort involved in creating such
// mocks.
type Session interface {
// NewSender opens a new sender link on the session.
NewSender(opts ...amqp.LinkOption) (Sender, error)
// NewReceiver opens a new receiver link on the session.
NewReceiver(opts ...amqp.LinkOption) (Receiver, error)
// Close gracefully closes the session.
Close(ctx context.Context) error
}
type session struct {
session *amqp.Session
}
func (s *session) NewSender(opts ...amqp.LinkOption) (Sender, error) {
return s.session.NewSender(opts...)
}
func (s *session) NewReceiver(opts ...amqp.LinkOption) (Receiver, error) {
return s.session.NewReceiver(opts...)
}
func (s *session) Close(ctx context.Context) error {
return s.session.Close(ctx)
}
|
package main
import (
"fmt"
"github.com/joho/godotenv"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
"github.com/stretchr/gomniauth"
"github.com/stretchr/gomniauth/providers/google"
"github.com/stretchr/objx"
"net/http"
"os"
)
func main(){
_ = godotenv.Load()
gomniauth.SetSecurityKey(os.Getenv("SECURITY_KEY"))
gomniauth.WithProviders(
google.New(
os.Getenv("CLIENT_ID"),
os.Getenv("SECRET_VALUE"),
os.Getenv("REDIRECT_URL"),
),
)
// Echo instance
e := echo.New()
// Middleware
e.Use(middleware.Logger())
e.Use(middleware.Recover())
e.Use(middleware.CORS())
e.GET("/auth/callback", callback)
e.GET("/v1/auth/login", login)
// Routes
e.GET("/", hello)
e.GET("/clear", clear)
// Start server
e.Logger.Fatal(e.Start(":8080"))
}
func hello(c echo.Context) error {
return c.String(http.StatusOK, "Hello, World!")
}
func login(c echo.Context) error{
provider, err := gomniauth.Provider("google")
if err != nil{
panic(err)
}
loginURL, err := provider.GetBeginAuthURL(nil, nil)
if err != nil{
panic(err)
}
return c.JSON(200, loginURL)
}
func callback(c echo.Context) error{
provider, err := gomniauth.Provider("google")
if err != nil{
panic(err)
}
cred, err := provider.CompleteAuth(objx.MustFromURLQuery(c.QueryString()))
if err != nil{
panic(err)
}
user, err := provider.GetUser(cred)
if err != nil{
panic(err)
}
fmt.Println(user)
return c.JSON(200, "YES!!!")
}
func clear(c echo.Context) error{
return c.JSON(http.StatusOK, "clear!!!!!")
} |
// SPDX-License-Identifier: Apache-2.0
// Copyright © 2020 Intel Corporation
package af
import (
"context"
"net/http"
)
func deletePfdAppTransaction(cliCtx context.Context, afCtx *Context,
pfdID string, appID string) (*http.Response, error) {
cliCfg := NewConfiguration(afCtx)
cli := NewClient(cliCfg)
resp, err := cli.PfdManagementAppDeleteAPI.PfdAppTransactionDelete(cliCtx,
afCtx.cfg.AfID, pfdID, appID)
if err != nil {
return resp, err
}
return resp, nil
}
// DeletePfdAppTransaction function
func DeletePfdAppTransaction(w http.ResponseWriter, r *http.Request) {
var (
err error
resp *http.Response
pfdTrans string
appID string
)
afCtx := r.Context().Value(keyType("af-ctx")).(*Context)
if afCtx == nil {
errRspHeader(&w, "APP DELETE", "af-ctx retrieved from request is nil",
http.StatusInternalServerError)
return
}
cliCtx, cancel := context.WithCancel(context.Background())
defer cancel()
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
pfdTrans = getPfdTransIDFromURL(r)
appID = getPfdAppIDFromURL(r)
resp, err = deletePfdAppTransaction(cliCtx, afCtx, pfdTrans, appID)
if err != nil {
if resp != nil {
errRspHeader(&w, "APP DELETE", err.Error(), resp.StatusCode)
} else {
errRspHeader(&w, "APP DELETE", err.Error(),
http.StatusInternalServerError)
}
return
}
w.WriteHeader(resp.StatusCode)
}
|
package main
func main() {
stringChan := make(chan<- string, 3)
intChan := make(<-chan int, 3)
stringChan <- "hello world"
<- intChan
// <- stringChan
// intChan <- 2
} |
package discovery
import (
"reflect"
"testing"
querypb "github.com/youtube/vitess/go/vt/proto/query"
"github.com/youtube/vitess/go/vt/topo"
)
func TestFilterByReplicationLag(t *testing.T) {
// 0 tablet
got := FilterByReplicationLag([]*TabletStats{})
if len(got) != 0 {
t.Errorf("FilterByReplicationLag([]) = %+v, want []", got)
}
// 1 serving tablet
ts1 := &TabletStats{
Tablet: topo.NewTablet(1, "cell", "host1"),
Serving: true,
Stats: &querypb.RealtimeStats{},
}
ts2 := &TabletStats{
Tablet: topo.NewTablet(2, "cell", "host2"),
Serving: false,
Stats: &querypb.RealtimeStats{},
}
got = FilterByReplicationLag([]*TabletStats{ts1, ts2})
if len(got) != 1 {
t.Errorf("len(FilterByReplicationLag([{Tablet: {Uid: 1}, Serving: true}, {Tablet: {Uid: 2}, Serving: false}])) = %v, want 1", len(got))
}
if len(got) > 0 && !reflect.DeepEqual(got[0], ts1) {
t.Errorf("FilterByReplicationLag([{Tablet: {Uid: 1}, Serving: true}, {Tablet: {Uid: 2}, Serving: false}]) = %+v, want %+v", got[0], ts1)
}
// lags of (1s, 1s, 1s, 30s)
ts1 = &TabletStats{
Tablet: topo.NewTablet(1, "cell", "host1"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1},
}
ts2 = &TabletStats{
Tablet: topo.NewTablet(2, "cell", "host2"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1},
}
ts3 := &TabletStats{
Tablet: topo.NewTablet(3, "cell", "host3"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1},
}
ts4 := &TabletStats{
Tablet: topo.NewTablet(4, "cell", "host4"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 30},
}
got = FilterByReplicationLag([]*TabletStats{ts1, ts2, ts3, ts4})
if len(got) != 4 || !reflect.DeepEqual(got[0], ts1) || !reflect.DeepEqual(got[1], ts2) || !reflect.DeepEqual(got[2], ts3) || !reflect.DeepEqual(got[3], ts4) {
t.Errorf("FilterByReplicationLag([1s, 1s, 1s, 30s]) = %+v, want all", got)
}
// lags of (5s, 10s, 15s, 120s)
ts1 = &TabletStats{
Tablet: topo.NewTablet(1, "cell", "host1"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 5},
}
ts2 = &TabletStats{
Tablet: topo.NewTablet(2, "cell", "host2"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 10},
}
ts3 = &TabletStats{
Tablet: topo.NewTablet(3, "cell", "host3"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 15},
}
ts4 = &TabletStats{
Tablet: topo.NewTablet(4, "cell", "host4"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 120},
}
got = FilterByReplicationLag([]*TabletStats{ts1, ts2, ts3, ts4})
if len(got) != 3 || !reflect.DeepEqual(got[0], ts1) || !reflect.DeepEqual(got[1], ts2) || !reflect.DeepEqual(got[2], ts3) {
t.Errorf("FilterByReplicationLag([5s, 10s, 15s, 120s]) = %+v, want [5s, 10s, 15s]", got)
}
// lags of (30m, 35m, 40m, 45m)
ts1 = &TabletStats{
Tablet: topo.NewTablet(1, "cell", "host1"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 30 * 60},
}
ts2 = &TabletStats{
Tablet: topo.NewTablet(2, "cell", "host2"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 35 * 60},
}
ts3 = &TabletStats{
Tablet: topo.NewTablet(3, "cell", "host3"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 40 * 60},
}
ts4 = &TabletStats{
Tablet: topo.NewTablet(4, "cell", "host4"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 45 * 60},
}
got = FilterByReplicationLag([]*TabletStats{ts1, ts2, ts3, ts4})
if len(got) != 4 || !reflect.DeepEqual(got[0], ts1) || !reflect.DeepEqual(got[1], ts2) || !reflect.DeepEqual(got[2], ts3) || !reflect.DeepEqual(got[3], ts4) {
t.Errorf("FilterByReplicationLag([30m, 35m, 40m, 45m]) = %+v, want all", got)
}
// lags of (1s, 1s, 1m, 40m, 40m) - not run filter the second time as first run removed two items.
ts1 = &TabletStats{
Tablet: topo.NewTablet(1, "cell", "host1"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1},
}
ts2 = &TabletStats{
Tablet: topo.NewTablet(2, "cell", "host2"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1},
}
ts3 = &TabletStats{
Tablet: topo.NewTablet(3, "cell", "host3"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1 * 60},
}
ts4 = &TabletStats{
Tablet: topo.NewTablet(4, "cell", "host4"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 40 * 60},
}
ts5 := &TabletStats{
Tablet: topo.NewTablet(5, "cell", "host5"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 40 * 60},
}
got = FilterByReplicationLag([]*TabletStats{ts1, ts2, ts3, ts4, ts5})
if len(got) != 3 || !reflect.DeepEqual(got[0], ts1) || !reflect.DeepEqual(got[1], ts2) || !reflect.DeepEqual(got[2], ts3) {
t.Errorf("FilterByReplicationLag([1s, 1s, 1m, 40m, 40m]) = %+v, want [1s, 1s, 1m]", got)
}
// lags of (1s, 1s, 10m, 40m) - run filter twice to remove two items
ts1 = &TabletStats{
Tablet: topo.NewTablet(1, "cell", "host1"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1},
}
ts2 = &TabletStats{
Tablet: topo.NewTablet(2, "cell", "host2"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1},
}
ts3 = &TabletStats{
Tablet: topo.NewTablet(3, "cell", "host3"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 10 * 60},
}
ts4 = &TabletStats{
Tablet: topo.NewTablet(4, "cell", "host4"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 40 * 60},
}
got = FilterByReplicationLag([]*TabletStats{ts1, ts2, ts3, ts4})
if len(got) != 2 || !reflect.DeepEqual(got[0], ts1) || !reflect.DeepEqual(got[1], ts2) {
t.Errorf("FilterByReplicationLag([1s, 1s, 10m, 40m]) = %+v, want [1s, 1s]", got)
}
// lags of (1m, 100m) - return at least 2 items to avoid overloading if the 2nd one is not delayed too much
ts1 = &TabletStats{
Tablet: topo.NewTablet(1, "cell", "host1"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1 * 60},
}
ts2 = &TabletStats{
Tablet: topo.NewTablet(2, "cell", "host2"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 100 * 60},
}
got = FilterByReplicationLag([]*TabletStats{ts1, ts2})
if len(got) != 2 || !reflect.DeepEqual(got[0], ts1) || !reflect.DeepEqual(got[1], ts2) {
t.Errorf("FilterByReplicationLag([1m, 100m]) = %+v, want all", got)
}
// lags of (1m, 3h) - return 1 if the 2nd one is delayed too much
ts1 = &TabletStats{
Tablet: topo.NewTablet(1, "cell", "host1"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 1 * 60},
}
ts2 = &TabletStats{
Tablet: topo.NewTablet(2, "cell", "host2"),
Serving: true,
Stats: &querypb.RealtimeStats{SecondsBehindMaster: 3 * 60 * 60},
}
got = FilterByReplicationLag([]*TabletStats{ts1, ts2})
if len(got) != 1 || !reflect.DeepEqual(got[0], ts1) {
t.Errorf("FilterByReplicationLag([1m, 3h]) = %+v, want [1m]", got)
}
}
|
package main
import (
"house365.com/studyGo/06day/mylogger"
"time"
)
/**需求
* 支持往不同的地方输出日志
* 日志分级别
* 1.debug
2.info
3.warning
4.error
5.fatal
日志要支持开关控制
日志要有时间,行号,文件名,日志级别,日志信息
日志文件要切割
*/
func main() {
//log :=mylogger.NewLog("debug")
log := mylogger.NewFileLogger("debug", "./", "zhoulin.log", 10*1024)
for {
id := 10010
name := "理想"
log.Debug("这是一条Debug日志,id:%d,name:%s", id, name)
log.Info("这是一条Info日志")
log.Warning("这是一条Warning日志")
log.Error("这是一条Error日志")
log.Fatal("这是一条Fatal日志")
time.Sleep(time.Second)
}
}
|
package main
import (
"regexp"
"strconv"
)
type MetaData struct {
Title string
Author string
PubYear int
}
func GetMetaData(filename string) MetaData {
metaData := MetaData{}
re, err := regexp.Compile(`(.+) - (.+) \((\d{4})\)`)
if err == nil {
result := re.FindStringSubmatch(filename)
if len(result) > 0 {
metaData.Author = result[1]
metaData.Title = result[2]
metaData.PubYear, _ = strconv.Atoi(result[3])
return metaData
}
}
re, err = regexp.Compile(`(.+) - (.+)`)
if err == nil {
result := re.FindStringSubmatch(filename)
if len(result) > 0 {
metaData.Author = result[1]
metaData.Title = result[2]
return metaData
}
}
re, err = regexp.Compile(`(.+) \((\d{4})\)`)
if err == nil {
result := re.FindStringSubmatch(filename)
if len(result) > 0 {
metaData.Title = result[1]
metaData.PubYear, _ = strconv.Atoi(result[2])
return metaData
}
}
metaData.Title = filename
return metaData
}
|
package cmd
import (
"context"
"encoding/hex"
"fmt"
"os"
"regexp"
"sort"
"strings"
"sync"
"golang.org/x/sync/errgroup"
humanize "github.com/dustin/go-humanize"
"github.com/grrtrr/clcv2"
"github.com/olekukonko/tablewriter"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
// Flags
var showFlags struct {
GroupDetails bool // Whether to print group details instead of showing the contained servers
GroupTree bool // Whether to display groups in tree format
GroupID bool // Whether to display the group (hex) UUID at the right hand side
IP bool // Whether to just display server IPs (implies GroupTree and GroupDetails)
}
func init() {
Show.Flags().BoolVar(&showFlags.GroupDetails, "group", false, "Print group details rather than the contained servers")
Show.Flags().BoolVar(&showFlags.GroupTree, "tree", false, "Display nested group structure in tree format")
Show.Flags().BoolVar(&showFlags.GroupID, "id", true, "Print the UUID of the group as well")
Show.Flags().BoolVar(&showFlags.IP, "ip", false, "Print IP addresses of servers as well")
Root.AddCommand(Show)
}
var Show = &cobra.Command{
Use: "ls [group|server [group|server]...]",
Aliases: []string{"dir", "show", "list"},
Short: "Show server(s)/groups(s)",
Long: "Display detailed server/group information. Group information requires -l to be set.",
RunE: func(cmd *cobra.Command, args []string) error {
var nodeCallback func(context.Context, *clcv2.GroupInfo) error
var servers, groups []string
var root *clcv2.Group
var err error
// Showing IP information implies printing the nested group structure
if showFlags.IP {
showFlags.GroupTree = true
showFlags.GroupDetails = true
nodeCallback = queryServerState
}
switch l := len(args); l {
case 1: // Allow user to specify data center name as sole argument
if regexp.MustCompile(`^[[:alpha:]]{2}\d$`).MatchString(args[0]) {
conf.Location = args[0]
args = append(args[:0], "")
showFlags.GroupTree = true
}
case 0: // The default behaviour is to list all the servers/groups in the default data centre.
args = append(args, "")
showFlags.GroupTree = true
}
if showFlags.GroupDetails || showFlags.GroupTree {
for _, name := range args {
isServer, where, err := groupOrServer(name)
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", err)
} else if isServer {
servers = append(servers, where)
} else if conf.Location == "" && showFlags.GroupTree {
// Printing group trees: requires to resolve the root first.
return errors.Errorf("Location argument (-l) is required in order to traverse nested groups.")
} else {
groups = append(groups, where)
}
}
} else if servers, err = extractServerNames(args); err != nil { // just show a list of servers
fmt.Fprintf(os.Stderr, "Failed to extract server names: %s\n", err)
return nil
}
// Aggregate displaying of servers
if l := len(servers); l == 1 {
showServerByName(client, servers[0])
} else if l > 1 {
showServers(client, servers)
}
for _, uuid := range groups {
if (uuid == "" || showFlags.GroupTree) && root == nil {
root, err = client.GetGroups(conf.Location)
if err != nil {
return errors.Errorf("Failed to look up groups at %s: %s", conf.Location, err)
}
}
if showFlags.GroupTree {
start := root
if uuid != "" {
start = clcv2.FindGroupNode(root, func(g *clcv2.Group) bool { return g.Id == uuid })
if start == nil {
return errors.Errorf("Failed to look up group %q in %s - is the location correct?", uuid, conf.Location)
}
}
tree, err := clcv2.WalkGroupHierarchy(context.TODO(), start, nodeCallback)
if err != nil {
return errors.Errorf("failed to process %s group hierarchy: %s", conf.Location, err)
}
printGroupStructure(tree, "")
} else if uuid == "" {
showGroup(client, root)
} else if rootNode, err := client.GetGroup(uuid); err != nil {
fmt.Fprintf(os.Stderr, "Failed to query HW group %q: %s\n", uuid, err)
} else {
showGroup(client, rootNode)
}
}
return nil
},
}
// showGroup displays details of Hardware Group folder @root
func showGroup(client *clcv2.CLIClient, root *clcv2.Group) {
fmt.Printf("Group %q in %s:\n", root.Name, root.LocationId)
fmt.Printf("ID: %s\n", root.Id)
fmt.Printf("Description: %s\n", root.Description)
fmt.Printf("Type: %s\n", root.Type)
fmt.Printf("Status: %s\n", root.Status)
if len(root.CustomFields) > 0 {
fmt.Println("Custom fields:", root.CustomFields)
}
// ChangeInfo
createdStr := humanize.Time(root.ChangeInfo.CreatedDate)
/* The CreatedBy field can be an email address, or an API Key (hex string) */
if _, err := hex.DecodeString(root.ChangeInfo.CreatedBy); err == nil {
createdStr += " via API Key"
} else {
createdStr += " by " + root.ChangeInfo.CreatedBy
}
fmt.Printf("Created: %s\n", createdStr)
modifiedStr := humanize.Time(root.ChangeInfo.ModifiedDate)
/* The ModifiedBy field can be an email address, or an API Key (hex string) */
if _, err := hex.DecodeString(root.ChangeInfo.ModifiedBy); err == nil {
modifiedStr += " via API Key"
} else {
modifiedStr += " by " + root.ChangeInfo.ModifiedBy
}
fmt.Printf("Modified: %s\n", modifiedStr)
// Servers
fmt.Printf("#Servers: %d\n", root.Serverscount)
if root.Serverscount > 0 {
var servers []string
if sl := clcv2.ExtractLinks(root.Links, "server"); len(sl) > 0 {
for _, s := range sl {
servers = append(servers, s.Id)
}
fmt.Printf("Servers: %s\n", strings.Join(servers, ", "))
}
}
// Sub-groups
if len(root.Groups) > 0 {
fmt.Printf("\nGroups of %s:\n", root.Name)
table := tablewriter.NewWriter(os.Stdout)
table.SetAutoFormatHeaders(false)
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.SetAutoWrapText(true)
table.SetHeader([]string{"Name", "UUID", "Description", "#Servers", "Type"})
for _, g := range root.Groups {
table.Append([]string{g.Name, g.Id, g.Description, fmt.Sprint(g.Serverscount), g.Type})
}
table.Render()
} else {
fmt.Printf("Sub-groups: none\n")
}
}
// Pretty-printer for traversal of nested group structure.
func printGroupStructure(g *clcv2.GroupInfo, indent string) {
var groupLine string
if g.Type != "default" { // 'Archive' or similar: make it stand out
groupLine = fmt.Sprintf("%s[%s]/", indent, g.Name)
} else {
groupLine = fmt.Sprintf("%s%s/", indent, g.Name)
}
if showFlags.GroupID {
fmt.Printf("%-70s %s\n", groupLine, g.ID)
} else {
fmt.Printf("%s\n", groupLine)
}
for _, s := range g.Servers {
fmt.Printf("%s%s\n", indent+" ", s)
}
for _, g := range g.Groups {
printGroupStructure(g, indent+" ")
}
}
// queryServerState processes a single clcv2.GroupInfo node in isolation, adding server information
// NOTE: requires 'client' variable to be in enclosing scope
func queryServerState(ctx context.Context, node *clcv2.GroupInfo) error {
var serverEntries = make(chan string)
var g, gctx = errgroup.WithContext(ctx)
for _, id := range node.Servers {
id := id
g.Go(func() error {
srv, err := client.GetServer(id)
if err != nil {
return errors.Errorf("failed to get %q server information: %s", id, err)
}
infoLine := fmt.Sprintf("%-16s", strings.Join(srv.IPs(), ", "))
if srv.Details.PowerState == "started" { // add an asterisk to indicate it's on
infoLine += "*"
}
if len(srv.Details.Snapshots) > 0 { // add a tilde to indicate it has a snapshot
infoLine += "~"
}
select {
case serverEntries <- fmt.Sprintf("%-50s %s", id, infoLine):
case <-gctx.Done():
return gctx.Err()
}
return nil
})
}
go func() {
g.Wait()
close(serverEntries)
}()
node.Servers = node.Servers[:0]
for srv := range serverEntries {
node.Servers = append(node.Servers, srv)
}
return g.Wait()
}
// Show details of a single server @name
// @client: authenticated CLCv2 Client
// @servname: server name
func showServerByName(client *clcv2.CLIClient, servname string) {
if server, err := client.GetServer(servname); err != nil {
fmt.Fprintf(os.Stderr, "Failed to list details of server %q: %s\n", servname, err)
} else {
showServer(client, server)
}
}
// Show details of a single server
func showServer(client *clcv2.CLIClient, server clcv2.Server) {
grp, err := client.GetGroup(server.GroupId)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to resolve group UUID: %s\n", err)
return
}
/* First public, then private */
IPs := []string{}
for _, ip := range server.Details.IpAddresses {
if ip.Public != "" {
IPs = append(IPs, ip.Public)
}
}
for _, ip := range server.Details.IpAddresses {
if ip.Internal != "" {
IPs = append(IPs, ip.Internal)
}
}
table := tablewriter.NewWriter(os.Stdout)
table.SetAutoFormatHeaders(false)
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.SetAutoWrapText(true)
// CPU, Memory, IP and Power status are not filled in until the server reaches 'active' state.
if server.Status == "active" {
table.SetHeader([]string{
"Name", "Group", "Description", "OS",
"CPU", "Mem", "IP", "Power",
"Last Change",
})
} else {
table.SetHeader([]string{
"Name", "Group", "Description", "OS",
"Status",
"Owner", "Last Change",
})
}
modifiedStr := humanize.Time(server.ChangeInfo.ModifiedDate)
/* The ModifiedBy field can be an email address, or an API Key (hex string) */
if _, err := hex.DecodeString(server.ChangeInfo.ModifiedBy); err == nil {
modifiedStr += " via API Key"
} else if len(server.ChangeInfo.ModifiedBy) > 6 {
modifiedStr += " by " + server.ChangeInfo.ModifiedBy[:6]
} else {
modifiedStr += " by " + server.ChangeInfo.ModifiedBy
}
if server.Status == "active" {
table.Append([]string{
server.Name, grp.Name, server.Description, server.OsType,
fmt.Sprint(server.Details.Cpu), fmt.Sprintf("%d G", server.Details.MemoryMb/1024), strings.Join(IPs, " "), server.Details.PowerState,
modifiedStr,
})
} else {
table.Append([]string{
server.Name, grp.Name, server.Description, server.OsType,
server.Status,
server.ChangeInfo.CreatedBy, modifiedStr,
})
}
table.Render()
// Disks
if len(server.Details.Disks) > 0 {
fmt.Printf("\nDisks of %s (total storage: %d GB)\n", server.Name, server.Details.StorageGb)
table = tablewriter.NewWriter(os.Stdout)
table.SetAutoFormatHeaders(false)
table.SetAlignment(tablewriter.ALIGN_RIGHT)
table.SetAutoWrapText(true)
table.SetHeader([]string{"Disk ID", "Size/GB", "Paths"})
for _, d := range server.Details.Disks {
table.Append([]string{string(d.Id), fmt.Sprint(d.SizeGB), strings.Join(d.PartitionPaths, ", ")})
}
table.Render()
}
// Partitions
if len(server.Details.Partitions) > 0 {
fmt.Printf("\nPartitions of %s:\n", server.Name)
table = tablewriter.NewWriter(os.Stdout)
table.SetAutoFormatHeaders(false)
table.SetAlignment(tablewriter.ALIGN_RIGHT)
table.SetAutoWrapText(true)
table.SetHeader([]string{"Partition Path", "Partition Size/GB"})
for _, p := range server.Details.Partitions {
table.Append([]string{p.Path, fmt.Sprintf("%.1f", p.SizeGB)})
}
table.Render()
}
// Snapshots
if len(server.Details.Snapshots) > 0 {
fmt.Println()
table = tablewriter.NewWriter(os.Stdout)
table.SetAutoFormatHeaders(false)
table.SetAlignment(tablewriter.ALIGN_CENTER)
table.SetAutoWrapText(true)
table.SetHeader([]string{fmt.Sprintf("Snapshots of %s", server.Name)})
for _, s := range server.Details.Snapshots {
table.Append([]string{s.Name})
}
table.Render()
}
if server.Status == "active" {
if creds, err := client.GetServerCredentials(server.Name); err != nil {
die("unable to list %s credentials: %s", server.Name, err)
} else {
fmt.Printf("\nCredentials of %s: %s / %s\n", server.Name, creds.Username, creds.Password)
}
}
}
// Show condensed details of multiple servers
// @client: authenticated CLCv2 Client
// @servnames: server names
func showServers(client *clcv2.CLIClient, servnames []string) {
type asyncServerResult struct {
server clcv2.Server
group clcv2.Group
}
var (
wg sync.WaitGroup
resChan = make(chan asyncServerResult)
results []asyncServerResult
)
for _, servname := range servnames {
servname := servname
wg.Add(1)
go func() {
defer wg.Done()
server, err := client.GetServer(servname)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to list details of server %q: %s\n", servname, err)
return
}
grp, err := client.GetGroup(server.GroupId)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to resolve %s group UUID: %s\n", servname, err)
return
}
resChan <- asyncServerResult{
server: server,
group: *grp,
}
}()
}
// Waiter needs to run in the background, to close generator
go func() {
wg.Wait()
close(resChan)
}()
for res := range resChan {
results = append(results, res)
}
if len(results) > 0 {
var table = tablewriter.NewWriter(os.Stdout)
// Sort in ascending order of last-modified date.
sort.Slice(results, func(i, j int) bool {
return results[i].server.ChangeInfo.ModifiedDate.Before(results[j].server.ChangeInfo.ModifiedDate)
})
table.SetAutoFormatHeaders(false)
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.SetAutoWrapText(true)
table.SetHeader([]string{
"Name", "Group", "Description", "OS",
"IP", "CPU", "Mem", "Storage",
"Status", "Last Change",
})
for _, res := range results {
IPs := []string{}
for _, ip := range res.server.Details.IpAddresses {
if ip.Public != "" {
IPs = append(IPs, ip.Public)
}
if ip.Internal != "" {
IPs = append(IPs, ip.Internal)
}
}
status := res.server.Details.PowerState
if res.server.Details.InMaintenanceMode {
status = "MAINTENANCE"
} else if res.server.Status != "active" {
status = res.server.Status
}
desc := res.server.Description
if res.server.IsTemplate {
desc = "TPL: " + desc
}
modifiedStr := humanize.Time(res.server.ChangeInfo.ModifiedDate)
// The ModifiedBy field can be an email address, or an API Key (hex string) //
if _, err := hex.DecodeString(res.server.ChangeInfo.ModifiedBy); err == nil {
modifiedStr += " via API Key"
} else {
modifiedStr += " by " + truncate(res.server.ChangeInfo.ModifiedBy, 6)
}
// Append a tilde (~) to indicate it has snapshots
serverName := res.server.Name
if len(res.server.Details.Snapshots) > 0 {
serverName += " ~"
}
table.Append([]string{
serverName, res.group.Name, truncate(desc, 30), truncate(res.server.OsType, 15),
strings.Join(IPs, " "),
fmt.Sprint(res.server.Details.Cpu), fmt.Sprintf("%d G", res.server.Details.MemoryMb/1024),
fmt.Sprintf("%d G", res.server.Details.StorageGb),
status, modifiedStr,
})
}
table.Render()
}
}
|
package connrt
import (
"errors"
"github.com/golang/mock/gomock"
"github.com/gookit/event"
"github.com/kbence/conndetect/internal/connlib"
"github.com/kbence/conndetect/internal/connlib_mock"
"github.com/kbence/conndetect/internal/ext_mock"
. "gopkg.in/check.v1"
)
var _ = Suite(&ConnectionReaderTestSuite{})
type ConnectionReaderTestSuite struct{}
func (s *ConnectionReaderTestSuite) TestNewConnectionReaderReturnsError(c *C) {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
eventManagerMock := ext_mock.NewMockManagerFace(ctrl)
connSrcMock := connlib_mock.NewMockConnectionSource(ctrl)
expectedError := errors.New("some error")
connSrcMock.
EXPECT().
ReadEstablishedTCPConnections("/path/to/tcp").
Return(nil, expectedError)
_, err := NewConnectionReader(eventManagerMock, "/path/to/tcp", connSrcMock)
c.Check(err, Equals, expectedError)
}
func (s *ConnectionReaderTestSuite) TestConnectionReader(c *C) {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
eventManagerMock := ext_mock.NewMockManagerFace(ctrl)
connSrcMock := connlib_mock.NewMockConnectionSource(ctrl)
connections := connlib.ConnectionList{
connlib.Connection{
Local: connlib.Endpoint{IP: connlib.IPv4Address{1, 2, 3, 4}, Port: 45678},
Remote: connlib.Endpoint{IP: connlib.IPv4Address{5, 6, 7, 8}, Port: 443},
},
}
expectedConnection := connlib.DirectionalConnection{
Source: connections[0].Local,
Destination: connections[0].Remote,
}
connSrcMock.
EXPECT().
ReadEstablishedTCPConnections("/path/to/tcp").
Return(&connlib.CategorizedConnections{}, nil)
connSrcMock.
EXPECT().
ReadEstablishedTCPConnections("/path/to/tcp").
Return(&connlib.CategorizedConnections{Established: connections}, nil)
eventManagerMock.EXPECT().On("tick", gomock.Any())
eventManagerMock.EXPECT().Fire("newConnection", event.M{"connection": expectedConnection})
reader, _ := NewConnectionReader(eventManagerMock, "/path/to/tcp", connSrcMock)
reader.connectionSource = connSrcMock
err := reader.Handle(event.NewBasic(eventTick, event.M{}))
c.Check(err, IsNil)
}
|
package main
import (
inet "cm_liveme_im/libs/net"
"cm_liveme_im/libs/proto"
"net"
"net/rpc"
pb "github.com/golang/protobuf/proto"
log "github.com/thinkboy/log4go"
)
func InitRPC(auther Auther) (err error) {
var (
network, addr string
c = &RPC{auther: auther}
)
rpc.Register(c)
for i := 0; i < len(Conf.RPCAddrs); i++ {
log.Info("start listen rpc addr: \"%s\"", Conf.RPCAddrs[i])
if network, addr, err = inet.ParseNetwork(Conf.RPCAddrs[i]); err != nil {
log.Error("inet.ParseNetwork() error(%v)", err)
return
}
go rpcListen(network, addr)
}
return
}
func rpcListen(network, addr string) {
l, err := net.Listen(network, addr)
if err != nil {
log.Error("net.Listen(\"%s\", \"%s\") error(%v)", network, addr, err)
panic(err)
}
// if process exit, then close the rpc bind
defer func() {
log.Info("rpc addr: \"%s\" close", addr)
if err := l.Close(); err != nil {
log.Error("listener.Close() error(%v)", err)
}
}()
rpc.Accept(l)
}
// RPC
type RPC struct {
auther Auther
}
func (r *RPC) Ping(arg *proto.NoArg, reply *proto.NoReply) error {
return nil
}
// Connect auth and registe login
func (r *RPC) Connect(arg *proto.AuthInfoOp, reply *proto.OpReply) (err error) {
if arg == nil {
err = ErrConnectArgs
log.Error("Connect() error(%v)", err)
return
}
ok, err := r.auther.Auth(arg.Uid, arg.Token)
if err != nil {
log.Error("auth error %v", err)
return
}
if !ok {
log.Warn("auth failed")
return
}
if reply.Succ, err = Rsv.AddUserRouter(arg.Uid, arg.ServerId); err == nil {
log.Info("redister user ok[uid:%s serverid:%d]", arg.Uid, arg.ServerId)
} else {
log.Error("redister user err[uid:%s serverid:%d] error:%v", arg.Uid, arg.ServerId, err)
}
return
}
// Disconnect notice router offline
func (r *RPC) Disconnect(arg *proto.DisConnOp, reply *proto.OpReply) (err error) {
if arg == nil {
err = ErrDisconnectArgs
log.Error("Disconnect() error(%v)", err)
return
}
if reply.Succ, err = Rsv.DelUserRouter(arg.Uid, arg.ServerId); err == nil {
log.Info("del user ok[uid:%s serverid:%d]", arg.Uid, arg.ServerId)
} else {
log.Error("del user err[uid:%s serverid:%d] error:%v", arg.Uid, arg.ServerId, err)
}
return
}
func (r *RPC) OPRoom(arg *proto.RoomDoorBellOp, reply *proto.OpReply) (err error) {
if arg == nil {
err = ErrDisconnectArgs
log.Error("OPRoom error(%v)", err)
return
}
var act string
if arg.Type == nil {
log.Error("arg.Type == nil ")
return
}
if arg.Uid == nil || arg.RoomId == nil {
log.Error("arg.Uid == nil | arg.RoomId == nil")
return
}
if *arg.Type == proto.RoomEvent_ENTER {
reply.Succ, err = Rsv.EnterRoom(*arg.Uid, arg.ServerId, *arg.RoomId)
act = "enter"
} else if *arg.Type == proto.RoomEvent_LEAVE {
reply.Succ, err = Rsv.ExitRoom(*arg.Uid, arg.ServerId, *arg.RoomId)
act = "exit"
}
if reply.Succ {
log.Info("uid %s from server %d %s room %s", *arg.Uid, arg.ServerId, act, *arg.RoomId)
} else {
log.Error("uid %s from server %d %s room %s", *arg.Uid, arg.ServerId, act, *arg.RoomId)
}
return
}
func (r *RPC) SendMsg(arg *proto.MsgOp, reply *proto.OpReply) (err error) {
if arg == nil {
err = ErrDisconnectArgs
log.Error("SendRoomMsg arg is nil error(%v)", err)
return
}
log.Info("sendmsg to ...")
if arg.RouteType == proto.MsgRouteType_ROOM {
log.Info("room")
err = r.SendRMsg(arg.From, arg.To, arg.UnMarshalledMsg)
} else if arg.RouteType == proto.MsgRouteType_PERSONAL {
log.Info("personal")
err = r.SendPMsg(arg.From, arg.To, arg.UnMarshalledMsg)
} else {
log.Error("err type for msg (nerther room nor personal)")
}
if err != nil {
log.Error("rpc sendmsg error ")
} else {
reply.Succ = true
log.Info("rpc send msg ok")
}
return
}
func (r *RPC) SendRMsg(fromid, roomid string, msg []byte) (err error) {
//send msg to kafka
//this flag indicates if msg chan is full, we should wait or discard it
sureflag := false
if err = broadcastRoomKafka(roomid, msg, sureflag); err != nil {
log.Error("broadcastRoomKafka(\"%s\",\"%s\",\"%d\") error(%s)", roomid, string(msg), sureflag, err)
return
}
if Conf.RongSwitch {
pbmsg := &proto.Msg{}
if err = pb.Unmarshal(msg, pbmsg); err != nil {
log.Warn("Failed to use protobuf to unmarshall a Msg message from client")
return
}
var keys []string
keys = append(keys, roomid)
msg := &RongMsg{Uid: fromid, Objname: RONG_MSG_ZAN, Rooms: keys, Content: string(pbmsg.GetMsg())}
Rongserv.PutMsg(msg)
}
return
}
func (r *RPC) SendPMsg(fromuid, touid string, msg []byte) (err error) {
var serverId uint32
if serverId, err = Rsv.FindRouter(touid); err != nil {
log.Error("FindRouter %s error(%v)", touid, err)
return
}
//send msg to kafka
var keys []string
keys = append(keys, touid)
if err = mpushKafka(serverId, keys, msg); err != nil {
log.Error("to %s msg:%s mpushtokafka error %v", touid, string(msg), err)
}
return
}
|
package handler
const (
group = 1
single =2
) |
package db
import (
"database/sql"
"sync"
"github.com/textileio/go-textile/pb"
"github.com/textileio/go-textile/repo"
)
type ThreadPeerDB struct {
modelStore
}
func NewThreadPeerStore(db *sql.DB, lock *sync.Mutex) repo.ThreadPeerStore {
return &ThreadPeerDB{modelStore{db, lock}}
}
func (c *ThreadPeerDB) Add(peer *pb.ThreadPeer) error {
c.lock.Lock()
defer c.lock.Unlock()
tx, err := c.db.Begin()
if err != nil {
return err
}
stm := `insert into thread_peers(id, threadId, welcomed) values(?,?,?)`
stmt, err := tx.Prepare(stm)
if err != nil {
log.Errorf("error in tx prepare: %s", err)
return err
}
defer stmt.Close()
_, err = stmt.Exec(
peer.Id,
peer.Thread,
false,
)
if err != nil {
_ = tx.Rollback()
return err
}
return tx.Commit()
}
func (c *ThreadPeerDB) List() []pb.ThreadPeer {
c.lock.Lock()
defer c.lock.Unlock()
stm := "select * from thread_peers;"
return c.handleQuery(stm)
}
func (c *ThreadPeerDB) ListById(id string) []pb.ThreadPeer {
c.lock.Lock()
defer c.lock.Unlock()
stm := "select * from thread_peers where id='" + id + "';"
return c.handleQuery(stm)
}
func (c *ThreadPeerDB) ListByThread(threadId string) []pb.ThreadPeer {
c.lock.Lock()
defer c.lock.Unlock()
stm := "select * from thread_peers where threadId='" + threadId + "';"
return c.handleQuery(stm)
}
func (c *ThreadPeerDB) ListUnwelcomedByThread(threadId string) []pb.ThreadPeer {
c.lock.Lock()
defer c.lock.Unlock()
stm := "select * from thread_peers where threadId='" + threadId + "' and welcomed=0;"
return c.handleQuery(stm)
}
func (c *ThreadPeerDB) WelcomeByThread(threadId string) error {
c.lock.Lock()
defer c.lock.Unlock()
_, err := c.db.Exec("update thread_peers set welcomed=1 where threadId=?", threadId)
return err
}
func (c *ThreadPeerDB) Count(distinct bool) int {
c.lock.Lock()
defer c.lock.Unlock()
var stm string
if distinct {
stm = "select Count(distinct id) from thread_peers;"
} else {
stm = "select Count(*) from thread_peers;"
}
row := c.db.QueryRow(stm)
var count int
_ = row.Scan(&count)
return count
}
func (c *ThreadPeerDB) Delete(id string, threadId string) error {
c.lock.Lock()
defer c.lock.Unlock()
_, err := c.db.Exec("delete from thread_peers where id=? and threadId=?", id, threadId)
return err
}
func (c *ThreadPeerDB) DeleteById(id string) error {
c.lock.Lock()
defer c.lock.Unlock()
_, err := c.db.Exec("delete from thread_peers where id=?", id)
return err
}
func (c *ThreadPeerDB) DeleteByThread(threadId string) error {
c.lock.Lock()
defer c.lock.Unlock()
_, err := c.db.Exec("delete from thread_peers where threadId=?", threadId)
return err
}
func (c *ThreadPeerDB) handleQuery(stm string) []pb.ThreadPeer {
var list []pb.ThreadPeer
rows, err := c.db.Query(stm)
if err != nil {
log.Errorf("error in db query: %s", err)
return nil
}
for rows.Next() {
var id, threadId string
var welcomedInt int
if err := rows.Scan(&id, &threadId, &welcomedInt); err != nil {
log.Errorf("error in db scan: %s", err)
continue
}
welcomed := false
if welcomedInt == 1 {
welcomed = true
}
list = append(list, pb.ThreadPeer{
Id: id,
Thread: threadId,
Welcomed: welcomed,
})
}
return list
}
|
package config
type DatabaseConfig struct {
User string `json:"user"`
Password string `json:"password"`
Host string `json:"host"`
Port string `json:"port"`
DbName string `json:"db_name"`
Charset string `json:"charset"`
}
var C = &DatabaseConfig{} |
package main
import "fmt"
type Builder interface {
BuildFoundation() string
BuildLevels() string
}
type FlatBuilder struct{}
type HouseBuilder struct{}
func (builder FlatBuilder) BuildFoundation() string {
return "small"
}
func (builder FlatBuilder) BuildLevels() string {
return "many"
}
func (builder HouseBuilder) BuildFoundation() string {
return "large"
}
func (builder HouseBuilder) BuildLevels() string {
return "one"
}
type EngineerDirector struct{}
func (engineer EngineerDirector) ConstructBuilding(builder Builder) string {
foundation := builder.BuildFoundation()
levels := builder.BuildLevels()
return foundation + " foundation and " + levels + " levels"
}
func main() {
engineer := EngineerDirector{}
fmt.Println(engineer.ConstructBuilding(FlatBuilder{})) // small foundation and many levels
fmt.Println(engineer.ConstructBuilding(HouseBuilder{})) // large foundation and one levels
}
|
// Copyright (c) 2019 bketelsen
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
package lxd
import "time"
type ConnectionCreated struct {
conn *Client
StartTime time.Time
}
func (e ConnectionCreated) Name() string {
return e.conn.URL
}
func (e ConnectionCreated) Created() time.Time {
return e.StartTime
}
func NewConnectionCreated(conn *Client) *ConnectionCreated {
e := &ConnectionCreated{
conn: conn,
StartTime: time.Now(),
}
return e
}
type State string
const (
Creating State = "creating"
Created State = "created"
Starting State = "starting"
Started State = "started"
Stopped State = "stopped"
Stopping State = "stopping"
Completed State = "completed"
Removing State = "removing"
Removed State = "removed"
Provisioning State = "provisioning"
Provisioned State = "provisioned"
)
type ContainerState struct {
ContainerState State
ContainerName string
StartTime time.Time
}
func (e ContainerState) Name() string {
return e.ContainerName + "\t" + string(e.ContainerState)
}
func (e ContainerState) Created() time.Time {
return e.StartTime
}
func NewContainerState(name string, state State) *ContainerState {
e := &ContainerState{
ContainerState: state,
ContainerName: name,
StartTime: time.Now(),
}
return e
}
type ExecState struct {
CommandState State
Command string
ContainerName string
StartTime time.Time
}
func (e ExecState) Name() string {
return e.ContainerName + "\t" + string(e.CommandState) + "\t" + e.Command
}
func (e ExecState) Created() time.Time {
return e.StartTime
}
func NewExecState(name string, command string, state State) *ExecState {
e := &ExecState{
CommandState: state,
Command: command,
ContainerName: name,
StartTime: time.Now(),
}
return e
}
type CopyState struct {
OperationState State
File string
ContainerName string
StartTime time.Time
}
func (e CopyState) Name() string {
return e.ContainerName + "\t" + string(e.OperationState) + "\t" + e.File
}
func (e CopyState) Created() time.Time {
return e.StartTime
}
func NewCopyState(name string, file string, state State) *CopyState {
e := &CopyState{
OperationState: state,
File: file,
ContainerName: name,
StartTime: time.Now(),
}
return e
}
|
package main
import (
_ "ml"
"ml/console"
. "fmt"
"goqml"
// _ "./resource"
)
func run() error {
engine := qml.NewEngine()
component, err := engine.LoadFile(`D:\Dev\Library\Qt\Examples\Qt-5.5\quick\demos\stocqt\stocqt.qml`)
if err != nil {
return err
}
window := component.CreateWindow(nil)
window.Show()
window.Wait()
return nil
}
func main() {
if err := qml.Run(run); err != nil {
Printf("error: %v\n", err)
console.Pause("done")
}
}
|
package other
import "fmt"
/**
@desc
KMP 字符串匹配算法
*/
/**
计算字符串 s 对应的前缀表
计算规则:
例: ababc
分解: a -> 前后无一致的为 0
a b -> 前后无一致的为 0
a b a -> 第一位和最后一位一致为 1
a b a b -> 前两位和最后两位一致为 2
a b a b c -> 前后无一致的为 0
答案:[0, 0, 1, 2, 0] 为了方便进行 KMP 计算,将数组往右移一位,最左位赋值为 -1 即变成 [-1, 0, 0, 1, 2]
提示:可以使用动态规划
*/
func PrefixTable(s string) []int {
var prefixTable []int = make([]int, len(s)-1)
prefixTable = append(prefixTable, 1)
// 开始只有一位,所以为0
prefixTable[0] = 0
// 从0开始匹配,如果匹配到一位就加 1,如果没匹配到,说明
count := 0
for i := 1; i < len(s); i++ {
if s[i] == s[count] {
count++
} else if s[i] == s[0] {
count = 1
} else {
count = 0
}
prefixTable[i] = count
}
return MovePrefixTable(prefixTable)
}
/**
为了方便进行 KMP 计算,将数组往右移一位,最左位赋值为 -1
例:[0, 0, 1, 2, 0] 即变成 [-1, 0, 0, 1, 2]
*/
func MovePrefixTable(prefixTable []int) []int {
for i := len(prefixTable) - 1; i > 0; i-- {
prefixTable[i] = prefixTable[i-1]
}
prefixTable[0] = -1
return prefixTable
}
/**
KMP 匹配在字符串 text中的 pattern 的索引
*/
func KMPSearch(text string, pattern string) []int {
tp := 0
pp := 0
prefixTable := PrefixTable(pattern)
// 用来存储匹配到的数组角标
var idxArr []int
for len(text)-tp-(len(pattern)-pp) >= 0 {
fmt.Printf("TP: %d, PP: %d, len(idxArr): %d\n", tp, pp, len(idxArr))
// 如果值相同,text 与 pattern 的索引同时往前走
if text[tp] == pattern[pp] {
tp++
pp++
} else {
// 如果值不同,tp 不动,pp 退回到 prefixTable 对应的角标
pp = prefixTable[pp]
}
// 如果 索引退回到 -1 那么 tp 需要向前走
if pp == -1 {
tp++
pp = 0
} else if pp == len(pattern) {
// 如果匹配的数据已经走到头,说明全匹配了,将匹配到的开始索引放到数组角标,pp 置为 0
idxArr = append(idxArr, tp-pp)
// 将 tp 重置为 找到的 idx 重新匹配新的
tp = tp - pp + 1
pp = 0
}
}
return idxArr
}
func GetNext(s string) []int {
next := make([]int, len(s)+1)
next[0] = -1
k := -1
j := 0
for j < len(s)-1 {
//fmt.Println("k=",k,"j=",j, "next =",next)
//这里,k表示next[j-1],且s[k]表示前缀,s[j]表示后缀
//注:k==-1表示未找到k前缀与k后缀相等,首次分析可先忽略
if k == -1 || s[j] == s[k] {
j++
k++
next[j+1] = k
} else { //s[j]与s[k]不匹配,继续递归计算前缀s[next[k]]
k = next[k]
}
}
return next
}
func Kmp(s, p string, next []int) int {
i, j := 0, 0
for i < len(s) && j < len(p) {
if j == -1 || s[i] == p[j] {
i++
j++
} else {
j = next[j]
}
}
if j == len(p) {
return i - j
}
return -1
}
|
package solcast
import (
solcast "github.com/Siliconrob/solcast-go/solcast"
datatypes "github.com/Siliconrob/solcast-go/solcast/types"
"github.com/jimlawless/whereami"
"github.com/stretchr/testify/assert"
"log"
"testing"
"math"
)
var radiationLocation = datatypes.LatLng{Longitude: -97, Latitude: 32}
var powerLocation = datatypes.PowerLatLng{Capacity: 1000, LatLng: radiationLocation}
const forecastCount = 7 * 24 * (60/30) // 7 days * 24 hours * default period of 30 minutes
const actualsCount = 6.583 * 24 * (60/30) // 6.583 * 24 hours * default period of 30 minutes
func TestRadiationForecast(t *testing.T) {
result := solcast.RadiationForecast(radiationLocation)
log.Printf("%v", whereami.WhereAmI())
recordCount := len(result.Forecasts)
assert.Equal(t, recordCount, forecastCount, "Radiation forecast count should be %v", forecastCount)
}
func TestRadiationEstimatedActuals(t *testing.T) {
result := solcast.RadiationEstimatedActuals(radiationLocation)
log.Printf("%v", whereami.WhereAmI())
recordCount := len(result.EstimatedActuals)
refCount := int(math.Ceil(actualsCount))
assert.True(t, recordCount >= refCount, " Radiation estimated actuals count should be %v", actualsCount)
}
func TestPowerForecast(t *testing.T) {
result := solcast.PowerForecast(powerLocation)
log.Printf("%v", whereami.WhereAmI())
recordCount := len(result.Forecasts)
assert.Equal(t, recordCount, forecastCount, " Power forecast count should be %v", forecastCount)
}
func TestPowerEstimatedActuals(t *testing.T) {
result := solcast.PowerEstimatedActuals(powerLocation)
log.Printf("%v", whereami.WhereAmI())
recordCount := len(result.EstimatedActuals)
refCount := int(math.Ceil(actualsCount))
assert.True(t, recordCount >= refCount, " Power estimated actuals count should be %v", actualsCount)
}
|
package eventsourcing
import (
"fmt"
"log"
"reflect"
"strings"
"github.com/alexandervantrijffel/gonats/eventsourcing/contracts"
proto "github.com/golang/protobuf/proto"
)
type AggregateCommon interface {
ID() string
HandleStateChange(interface{})
}
type AggregateCommonImpl struct {
IdImpl string
Repository Repository
}
func (a *AggregateCommonImpl) ID() string {
return a.IdImpl
}
func serialize(event interface{}) (*eventsourcingcontracts.EventEnvelope, error) {
eventBytes, err := proto.Marshal(event.(proto.Message))
if err != nil {
return nil, err
}
return &eventsourcingcontracts.EventEnvelope{
TypeName: getTypeName(event),
MessageData: eventBytes,
}, nil
}
func getTypeName(obj interface{}) string {
return strings.Replace(fmt.Sprintf("%T", obj), "*", "", -1)
}
func Deserialize(eventEnvelopeBytes []byte) (eventEnvelope *eventsourcingcontracts.EventEnvelope, event interface{}, err error) {
envelope := &eventsourcingcontracts.EventEnvelope{}
proto.Unmarshal(eventEnvelopeBytes, envelope)
eventEnvelope = envelope
t := proto.MessageType(eventEnvelope.TypeName)
e := reflect.New(t.Elem())
e2, isProtoMessage := e.Interface().(proto.Message)
if !isProtoMessage {
err = fmt.Errorf(
"Event with EventEnvelope.TypeName %s and type %T can not be casted to type proto.Message",
eventEnvelope.TypeName, e)
return
}
proto.Unmarshal(eventEnvelope.MessageData, e2)
event = e2
return
}
func GetStreamName(aggregate AggregateCommon) string {
splittedTypeNameWithoutAggregate := strings.Split(getTypeName(aggregate), ".")
typeNameWithoutPackage := splittedTypeNameWithoutAggregate[len(splittedTypeNameWithoutAggregate)-1]
return fmt.Sprintf("%s.%s", typeNameWithoutPackage, aggregate.ID())
}
func checkErr(err error, desc string) bool {
if err != nil {
log.Println(desc+" FAILED, reason ", err)
}
return err != nil
}
|
package blocks
import (
"bytes"
"crypto/sha256"
"cryptom/internal"
"fmt"
"math"
"math/big"
)
const (
targetBits = 16 // arbitrary number, 24 will work for staging or prod (bigger is more difficult)
maxBits = 256
maxNonce = math.MaxInt64
)
type ProofOfWork struct {
Block *Block
target *big.Int
}
func NewPow(block *Block) *ProofOfWork {
target := big.NewInt(1)
target.Lsh(target, uint(maxBits-targetBits))
pow := &ProofOfWork{block, target}
return pow
}
func (pow *ProofOfWork) prepareData(nonce int) []byte {
return bytes.Join(
[][]byte{
pow.Block.PrevBlockHash,
pow.Block.Data,
pow.Block.HashTransactions(),
internal.IntToHex(pow.Block.Timestamp),
internal.IntToHex(int64(targetBits)),
internal.IntToHex(int64(nonce)),
},
[]byte{},
)
}
func (pow *ProofOfWork) Run() (int, []byte) {
var hashInt big.Int
var hash [32]byte
var nonce int
fmt.Printf("Mining the block containing \"%s\"\n", pow.Block.Data)
for nonce < maxNonce {
data := pow.prepareData(nonce)
hash = sha256.Sum256(data)
fmt.Printf("\r%x", hash)
hashInt.SetBytes(hash[:])
if hashInt.Cmp(pow.target) == -1 {
break
} else {
nonce++
}
}
fmt.Print("\n\n")
return nonce, hash[:]
}
func (pow *ProofOfWork) Validate() bool {
var hashInt big.Int
data := pow.prepareData(pow.Block.Nonce)
hash := sha256.Sum256(data)
hashInt.SetBytes(hash[:])
return hashInt.Cmp(pow.target) == -1
}
|
package conversion
import (
"fmt"
"testing"
)
func TestIdUtils(t *testing.T) {
c := Color16BitToRGBA("#FF708AF0")
fmt.Println(c.R, c.G, c.B, c.A)
b := RGBAToColor16Bit(c)
fmt.Println(b)
}
|
package types
//
// Pagination is used when responding with
// a paginated list
//
type Pagination struct {
Next string `json:"next"`
Previous string `json:"previous"`
}
//
// PaginatedResponse is used when responding with
// a paginated list
//
type PaginatedResponse struct {
Data interface{} `json:"data"`
Pagination Pagination `json:"pagination"`
}
|
package main
import "fmt"
func generateParenthesis(n int) []string {
res := []string{}
genParenthesis("", n, n, &res)
return res
}
//left right分别表示还可放置的左右括号的剩余数
func genParenthesis(item string, left int, right int, res *[]string) {
if left == 0 && right == 0 { //左右括号都放完
*res = append(*res, item)
return
}
//下面两个是规则,这样保证放入的都是合法序列
if left > 0 { //先放左括号,因此先判断left是否有剩余
genParenthesis(item+"(", left-1, right, res)
}
//什么时候可以放右括号呢,就是右括号剩余数量比左括号多时
if left < right {
genParenthesis(item+")", left, right-1, res)
}
//要想满足条件,则递归有限制
//1、左右括号数量各为n,有一个超过n,则递归终止
//2、必须先放左括号
//3、若左括号小于等于右括号数量,则不可放置左括号
}
func main() {
fmt.Println(generateParenthesis(2))
}
|
/*
Copyright 2020 Humio https://humio.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helpers
import (
"context"
"fmt"
"github.com/google/martian/log"
humioapi "github.com/humio/cli/api"
humiov1alpha1 "github.com/humio/humio-operator/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
"net/url"
"strings"
"github.com/humio/humio-operator/pkg/kubernetes"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type ClusterInterface interface {
Url(context.Context, client.Client) (*url.URL, error)
Name() string
Config() *humioapi.Config
constructHumioConfig(context.Context, client.Client, bool) (*humioapi.Config, error)
}
type Cluster struct {
managedClusterName string
externalClusterName string
namespace string
certManagerEnabled bool
withAPIToken bool
humioConfig *humioapi.Config
}
func NewCluster(ctx context.Context, k8sClient client.Client, managedClusterName, externalClusterName, namespace string, certManagerEnabled bool, withAPIToken bool) (ClusterInterface, error) {
// Return error immediately if we do not have exactly one of the cluster names configured
if managedClusterName != "" && externalClusterName != "" {
return nil, fmt.Errorf("cannot have both ManagedClusterName and ExternalClusterName set at the same time")
}
if managedClusterName == "" && externalClusterName == "" {
return nil, fmt.Errorf("must have one of ManagedClusterName and ExternalClusterName set")
}
if namespace == "" {
return nil, fmt.Errorf("must have non-empty namespace set")
}
cluster := Cluster{
externalClusterName: externalClusterName,
managedClusterName: managedClusterName,
namespace: namespace,
certManagerEnabled: certManagerEnabled,
withAPIToken: withAPIToken,
}
humioConfig, err := cluster.constructHumioConfig(ctx, k8sClient, withAPIToken)
if err != nil {
return nil, err
}
cluster.humioConfig = humioConfig
return cluster, nil
}
func (c Cluster) Url(ctx context.Context, k8sClient client.Client) (*url.URL, error) {
if c.managedClusterName != "" {
// Lookup ManagedHumioCluster resource to figure out if we expect to use TLS or not
var humioManagedCluster humiov1alpha1.HumioCluster
err := k8sClient.Get(ctx, types.NamespacedName{
Namespace: c.namespace,
Name: c.managedClusterName,
}, &humioManagedCluster)
if err != nil {
return nil, err
}
protocol := "https"
if !c.certManagerEnabled {
log.Infof("not using cert-manager, falling back to http")
protocol = "http"
}
if !TLSEnabled(&humioManagedCluster) {
log.Infof("humio managed cluster configured as insecure, using http")
protocol = "http"
}
baseURL, _ := url.Parse(fmt.Sprintf("%s://%s.%s:%d/", protocol, c.managedClusterName, c.namespace, 8080))
return baseURL, nil
}
// Fetch the HumioExternalCluster instance
var humioExternalCluster humiov1alpha1.HumioExternalCluster
err := k8sClient.Get(ctx, types.NamespacedName{
Namespace: c.namespace,
Name: c.externalClusterName,
}, &humioExternalCluster)
if err != nil {
return nil, err
}
baseURL, err := url.Parse(humioExternalCluster.Spec.Url)
if err != nil {
return nil, err
}
return baseURL, nil
}
// Name returns the name of the Humio cluster
func (c Cluster) Name() string {
if c.managedClusterName != "" {
return c.managedClusterName
}
return c.externalClusterName
}
// Config returns the configuration that is currently set
func (c Cluster) Config() *humioapi.Config {
return c.humioConfig
}
// constructHumioConfig returns a config to use with Humio API client with the necessary CA and API token.
func (c Cluster) constructHumioConfig(ctx context.Context, k8sClient client.Client, withAPIToken bool) (*humioapi.Config, error) {
if c.managedClusterName != "" {
// Lookup ManagedHumioCluster resource to figure out if we expect to use TLS or not
var humioManagedCluster humiov1alpha1.HumioCluster
err := k8sClient.Get(ctx, types.NamespacedName{
Namespace: c.namespace,
Name: c.managedClusterName,
}, &humioManagedCluster)
if err != nil {
return nil, err
}
// Get the URL we want to use
clusterURL, err := c.Url(ctx, k8sClient)
if err != nil {
return nil, err
}
config := &humioapi.Config{
Address: clusterURL,
}
var apiToken corev1.Secret
if withAPIToken {
// Get API token
err = k8sClient.Get(ctx, types.NamespacedName{
Namespace: c.namespace,
Name: fmt.Sprintf("%s-%s", c.managedClusterName, kubernetes.ServiceTokenSecretNameSuffix),
}, &apiToken)
if err != nil {
return nil, fmt.Errorf("unable to get secret containing api token: %w", err)
}
config.Token = string(apiToken.Data["token"])
}
// If we do not use TLS, return a client without CA certificate
if !c.certManagerEnabled || !TLSEnabled(&humioManagedCluster) {
config.Insecure = true
return config, nil
}
// Look up the CA certificate stored in the cluster CA bundle
var caCertificate corev1.Secret
err = k8sClient.Get(ctx, types.NamespacedName{
Namespace: c.namespace,
Name: c.managedClusterName,
}, &caCertificate)
if err != nil {
return nil, fmt.Errorf("unable to get CA certificate: %w", err)
}
config.CACertificatePEM = string(caCertificate.Data["ca.crt"])
return config, nil
}
// Fetch the HumioExternalCluster instance
var humioExternalCluster humiov1alpha1.HumioExternalCluster
err := k8sClient.Get(ctx, types.NamespacedName{
Namespace: c.namespace,
Name: c.externalClusterName,
}, &humioExternalCluster)
if err != nil {
return nil, err
}
if humioExternalCluster.Spec.Url == "" {
return nil, fmt.Errorf("no url specified")
}
if humioExternalCluster.Spec.APITokenSecretName == "" {
return nil, fmt.Errorf("no api token secret name specified")
}
if strings.HasPrefix(humioExternalCluster.Spec.Url, "http://") && !humioExternalCluster.Spec.Insecure {
return nil, fmt.Errorf("not possible to run secure cluster with plain http")
}
// Get API token
var apiToken corev1.Secret
err = k8sClient.Get(ctx, types.NamespacedName{
Namespace: c.namespace,
Name: humioExternalCluster.Spec.APITokenSecretName,
}, &apiToken)
if err != nil {
return nil, fmt.Errorf("unable to get secret containing api token: %w", err)
}
clusterURL, err := url.Parse(humioExternalCluster.Spec.Url)
if err != nil {
return nil, err
}
// If we do not use TLS, return a config without CA certificate
if humioExternalCluster.Spec.Insecure {
return &humioapi.Config{
Address: clusterURL,
Token: string(apiToken.Data["token"]),
Insecure: humioExternalCluster.Spec.Insecure,
}, nil
}
// If CA secret is specified, return a configuration which loads the CA
if humioExternalCluster.Spec.CASecretName != "" {
var caCertificate corev1.Secret
err = k8sClient.Get(ctx, types.NamespacedName{
Namespace: c.namespace,
Name: humioExternalCluster.Spec.CASecretName,
}, &caCertificate)
if err != nil {
return nil, fmt.Errorf("unable to get CA certificate: %w", err)
}
return &humioapi.Config{
Address: clusterURL,
Token: string(apiToken.Data["token"]),
CACertificatePEM: string(caCertificate.Data["ca.crt"]),
Insecure: humioExternalCluster.Spec.Insecure,
}, nil
}
return &humioapi.Config{
Address: clusterURL,
Token: string(apiToken.Data["token"]),
Insecure: humioExternalCluster.Spec.Insecure,
}, nil
}
|
package command
import (
"flag"
)
type ServiceArgs struct {
ConfigFile string
}
func ParseArgs() ServiceArgs {
serviceArgs := ServiceArgs{}
flag.StringVar(&serviceArgs.ConfigFile, "c", "", "Path to service config file.")
flag.Parse()
return serviceArgs
}
|
package main
import (
"log"
"time"
"github.com/Shopify/sarama"
)
var localKafka = []string{"127.0.0.1:9093"}
func main() {
consume()
}
func consume() {
consumer, err := sarama.NewConsumer(localKafka, nil)
if err != nil {
log.Fatalf("error \n")
}
partitionList, err := consumer.Partitions("my-topic")
if err != nil {
log.Fatalf("err %v", err)
}
for p := range partitionList {
pc, err := consumer.ConsumePartition("my-topic", int32(p), sarama.OffsetNewest)
if err != nil {
log.Fatalf("error %v", err)
}
defer pc.AsyncClose()
go func(sarama.PartitionConsumer) {
for msg := range pc.Messages() {
log.Default().Printf("received %v \n", msg.Value)
}
}(pc)
}
time.Sleep(10 * time.Second)
}
|
package day3
import (
"aoc-2020/internal/utils"
"fmt"
"strings"
)
func Solution() {
lines := utils.ReadInput("internal/day3/input3")
var travelMap [][]string
for _, line := range lines {
travelMap = append(travelMap, strings.Split(line, ""))
}
fmt.Println(":: Part1 ::")
fmt.Printf("The sled will encounter %d trees\n", numTreesFound(travelMap, 3, 1))
fmt.Println("\n::Part 2 ::")
slopes := [][]int{ {1, 1}, {3, 1}, {5, 1}, {7, 1}, {1, 2} }
multiplied := 1
for _, slope := range slopes {
multiplied *= numTreesFound(travelMap, slope[0], slope[1])
}
fmt.Printf("Result: %d\n", multiplied)
}
func numTreesFound(travelMap [][]string, xSlope int, ySlope int) int {
numTrees := 0
x := 0
y := 0
for {
if travelMap[y][x % len(travelMap[y])] == "#" {
numTrees++
}
x += xSlope
y += ySlope
if y >= len(travelMap) {
break
}
}
return numTrees
}
|
package db
import (
"time"
"github.com/VolticFroogo/Animal-Pictures/helpers"
"github.com/VolticFroogo/Animal-Pictures/models"
)
// StoreRefreshToken generates, stores and then returns a JTI.
func StoreRefreshToken(uuid string) (jti models.JTI, err error) {
// No need to duplication check as the JTI's don't need to be completely unique.
jti.JTI, err = helpers.GenerateRandomString(32)
if err != nil {
return
}
jti.Expiry = time.Now().Add(models.RefreshTokenValidTime).Unix()
_, err = db.Exec("INSERT INTO jti (jti, useruuid, expiry) VALUES (?, ?, ?)", jti.JTI, uuid, jti.Expiry)
if err != nil {
return
}
rows, err := db.Query("SELECT id FROM jti WHERE jti=? AND useruuid=? AND expiry=?", jti.JTI, uuid, jti.Expiry)
if err != nil {
return
}
defer rows.Close()
rows.Next()
err = rows.Scan(&jti.ID) // Scan data from query.
return
}
// GetJTI takes a JTI string and returns the JTI struct.
func GetJTI(jti string) (jtiStruct models.JTI, err error) {
rows, err := db.Query("SELECT id, useruuid, expiry FROM jti WHERE jti=?", jti)
if err != nil {
return
}
defer rows.Close()
jtiStruct.JTI = jti
rows.Next()
err = rows.Scan(&jtiStruct.ID, &jtiStruct.UserUUID, &jtiStruct.Expiry) // Scan data from query.
return
}
// CheckJTI returns the validity of a JTI.
func CheckJTI(jti models.JTI) (valid bool, err error) {
if jti.Expiry > time.Now().Unix() { // Check if token has expired.
return true, nil // Token is valid.
}
_, err = db.Exec("DELETE FROM jti WHERE id=?", jti.ID)
if err != nil {
return false, err
}
return false, nil // Token is invalid.
}
// DeleteJTI deletes a JTI based on a jti key.
func DeleteJTI(jti string) (err error) {
_, err = db.Exec("DELETE FROM jti WHERE jti=?", jti)
return
}
// DeAuthUser completely removes all of a user's JTI tokens therefore deauthorising them.
func DeAuthUser(uuid string) (err error) {
_, err = db.Exec("DELETE FROM jti WHERE useruuid=?", uuid)
return
}
|
/*
* Copyright (c) 2019 SUSE LLC.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package cluster
const (
kubeadmInitConf = `apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
bootstrapTokens: []
localAPIEndpoint:
advertiseAddress: ""
{{- if eq .CloudProvider "aws" }}
nodeRegistration:
kubeletExtraArgs:
cloud-provider: "aws"
{{- end }}
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
apiServer:
certSANs:
- {{.ControlPlaneHost}}
extraArgs:
oidc-issuer-url: https://{{.ControlPlaneHost}}:32000
oidc-client-id: oidc
oidc-ca-file: /etc/kubernetes/pki/ca.crt
oidc-username-claim: email
oidc-groups-claim: groups
{{- if eq .CloudProvider "aws" }}
cloud-provider: "aws"
controllerManager:
extraArgs:
cloud-provider: "aws"
allocate-node-cidrs: "false"
{{- end }}
clusterName: {{.ClusterName}}
controlPlaneEndpoint: {{.ControlPlaneHostAndPort}}
dns:
imageRepository: {{.ImageRepository}}
imageTag: {{.CoreDNSImageTag}}
type: CoreDNS
etcd:
local:
imageRepository: {{.ImageRepository}}
imageTag: {{.EtcdImageTag}}
imageRepository: {{.ImageRepository}}
kubernetesVersion: {{.KubernetesVersion}}
networking:
podSubnet: 10.244.0.0/16
serviceSubnet: 10.96.0.0/12
useHyperKubeImage: true
`
criDockerDefaultsConf = `## Path : System/Management
## Description : Extra cli switches for crio daemon
## Type : string
## Default : ""
## ServiceRestart : crio
#
CRIO_OPTIONS=--pause-image={{.PauseImage}}{{if not .StrictCapDefaults}} --default-capabilities="CHOWN,DAC_OVERRIDE,FSETID,FOWNER,NET_RAW,SETGID,SETUID,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,MKNOD,AUDIT_WRITE,SETFCAP"{{end}}
`
masterConfTemplate = `apiVersion: kubeadm.k8s.io/v1beta1
kind: JoinConfiguration
discovery:
bootstrapToken:
apiServerEndpoint: {{.ControlPlaneHostAndPort}}
unsafeSkipCAVerification: true
{{- if eq .CloudProvider "aws" }}
nodeRegistration:
kubeletExtraArgs:
cloud-provider: "aws"
{{- end }}
controlPlane:
localAPIEndpoint:
advertiseAddress: ""
`
workerConfTemplate = `apiVersion: kubeadm.k8s.io/v1beta1
kind: JoinConfiguration
discovery:
bootstrapToken:
apiServerEndpoint: {{.ControlPlaneHostAndPort}}
unsafeSkipCAVerification: true
{{- if eq .CloudProvider "aws" }}
nodeRegistration:
kubeletExtraArgs:
cloud-provider: "aws"
{{- end }}
`
)
|
package smarttv
import (
"errors"
"github.com/Jeffail/gabs"
"os"
"strconv"
)
// TODO: Split Sequence Builder and Sequence into to types
type SequenceBuilder struct {
connector ConnectorDTO
sequence map[string]*gabs.Container
commands []TVCommand
}
type SequenceBuilderInterface interface {
Init()
Build()
GetRoot()
GetCommands()
}
func (R * SequenceBuilder) GetCommands() ([] TVCommand) { return R.commands }
func (B *SequenceBuilder) Init(C ConnectorDTO) (error) {
seq,err := parseJson(os.Getenv("COMMAND_SEQUENCE_PATH")).S(C.Device.Type,C.CommandName).ChildrenMap()
if err != nil {
return errors.New("Failed to find command " + C.CommandName + " for " + C.Device.Type + " TV")
}
B.connector = C
B.sequence = seq
return nil
}
func (B *SequenceBuilder) Build() (error) {
var TVCommandFactory TVCommandFactory
for i := len(B.sequence)-1; i >= 0; i-- {
tvCommand,err := TVCommandFactory.Create(B.connector,B.sequence[strconv.Itoa(i)].Data().(string))
if err != nil {
return err
}
if i < len(B.sequence)-1 {
tvCommand.Next = &B.commands[0]
}
B.commands = append([]TVCommand{tvCommand},B.commands...)
}
return nil
}
|
package converter
import (
"github.com/geoirb/rss-aggregator/pkg/models"
)
// Converter ...
type Converter struct {
}
// News convert news to slice of []string
func (c *Converter) News(src []models.News) (dst [][]string) {
dst = make([][]string, 0, len(src))
for _, news := range src {
data := make([]string, 2)
data[0] = news.Title
data[1] = news.PubDate
dst = append(dst, data)
}
return
}
// NewConverter construct
func NewConverter() *Converter {
return &Converter{}
}
|
package main
import (
"context"
"fmt"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"io"
"time"
"github.com/labstack/gommon/log"
"github.com/wexel-nath/grpc-go-course/greet/greetpb"
"google.golang.org/grpc"
)
func main() {
fmt.Println("Hello I'am a client")
cc, err := grpc.Dial("localhost:50051", grpc.WithInsecure())
if err != nil {
log.Fatalf("failed to connect: %v", err)
}
defer cc.Close()
c := greetpb.NewGreetServiceClient(cc)
//doUnary(c)
//doServerStreaming(c)
//doClientStreaming(c)
//doBiDiStreaming(c)
doUnaryWithDeadline(c, 5 * time.Second)
doUnaryWithDeadline(c, 1 * time.Second)
}
func doUnary(c greetpb.GreetServiceClient) {
fmt.Println("Starting to do a Unary RPC...")
req := &greetpb.GreetRequest{
Greeting: &greetpb.Greeting{
FirstName: "Nathan",
LastName: "Welch",
},
}
resp, err := c.Greet(context.Background(), req)
if err != nil {
log.Fatalf("error while calling Greet with RPC: %v", err)
}
fmt.Printf("Response from Greet: %v\n", resp.GetResult())
}
func doServerStreaming(c greetpb.GreetServiceClient) {
fmt.Println("Starting to do a Server Streaming RPC...")
req := &greetpb.GreetManyTimesRequest{
Greeting: &greetpb.Greeting{
FirstName: "Nathan",
LastName: "Welch",
},
}
resultStream, err := c.GreetManyTimes(context.Background(), req)
if err != nil {
log.Fatalf("error while calling GreetManyTimes with RPC: %v", err)
}
for {
resp, err := resultStream.Recv()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("error while reading stream: %v", err)
}
fmt.Printf("Response from GreetManyTimes: %v\n", resp.GetResult())
}
}
func doClientStreaming(c greetpb.GreetServiceClient) {
fmt.Println("Starting to do a Client Streaming RPC...")
stream, err := c.LongGreet(context.Background())
if err != nil {
log.Fatalf("error while calling LongGreet: %v", err)
}
greetings := []*greetpb.Greeting{
{
FirstName: "Nathan",
},
{
FirstName: "Tristan",
},
{
FirstName: "Rav",
},
{
FirstName: "Alex",
},
{
FirstName: "Callum",
},
}
for _, greeting := range greetings {
fmt.Println("Sending a greeting to", greeting.FirstName)
err = stream.Send(&greetpb.LongGreetRequest{Greeting:greeting})
if err != nil {
log.Fatalf("error while sending to LongGreet: %v", err)
}
time.Sleep(time.Second)
}
resp, err := stream.CloseAndRecv()
if err != nil {
log.Fatalf("error while receiving LongGreet: %v", err)
}
fmt.Printf("Response from LongGreet: %v\n", resp.GetResult())
}
func doBiDiStreaming(c greetpb.GreetServiceClient) {
fmt.Println("Starting to do a BiDi Streaming RPC...")
stream, err := c.GreetEveryone(context.Background())
if err != nil {
log.Fatalf("error while calling GreetEveryone: %v", err)
}
greetings := []*greetpb.Greeting{
{
FirstName: "Nathan",
},
{
FirstName: "Tristan",
},
{
FirstName: "Rav",
},
{
FirstName: "Alex",
},
{
FirstName: "Callum",
},
}
go func() {
for _, greeting := range greetings {
time.Sleep(time.Second)
fmt.Println("Sending a greeting to", greeting.FirstName)
err = stream.Send(&greetpb.GreetEveryoneRequest{ Greeting: greeting })
if err != nil {
log.Fatalf("error while sending to LongGreet: %v", err)
}
}
stream.CloseSend()
}()
for {
resp, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("error while receiving LongGreet: %v", err)
}
fmt.Printf("Response from GreetEveryone: %v\n", resp.GetResult())
}
}
func doUnaryWithDeadline(c greetpb.GreetServiceClient, timeout time.Duration) {
fmt.Println("Starting to do a Unary GreetWithDeadline RPC...")
req := &greetpb.GreetRequest{
Greeting: &greetpb.Greeting{
FirstName: "Nathan",
LastName: "Welch",
},
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
resp, err := c.GreetWithDeadline(ctx, req)
if err != nil {
if s, ok := status.FromError(err); ok {
if s.Code() == codes.DeadlineExceeded {
fmt.Println("Timeout was hit! Deadline exceeded")
} else {
fmt.Println("Unexpected error status error:", s.Message())
}
} else {
log.Fatalf("error while calling Greet with RPC: %v", err)
}
return
}
fmt.Printf("Response from Greet: %v\n", resp.GetResult())
}
|
package main
const usage = `Usage: steg <command> [<args>]
-help
Print this help message.
Commands:
hide:
-f value
Path to file to hide (can specify flag multiple times.)
-input string
Path to file to hide files in.
-output string
Output path to new file, which contains hidden file(s).
show:
-image string
Path to file which contains hidden files.
-outputdir string
Path to directory to save files.
Examples:
$ steg hide -input test.jpeg -f path/to/a.txt -f path/to/b.txt -output hidden.jpeg
$ steg show -input hidden.jpeg -outputdir path/to/dir
`
|
package observer
// Publisher interface
type Publisher interface {
Attach(observer Observer)
Unpin(observer Observer)
Notify()
Show()
}
|
package pc
import (
"errors"
"reflect"
"strconv"
"strings"
"unicode"
)
type State struct {
Value interface{}
Remains string
Err error
}
// FIXME: input is state???
type Parser func(state State) State
var (
ErrNoMatch = errors.New("no match")
ErrUnexpectedEnd = errors.New("unexpected end of line")
)
func Fail(err error) Parser {
return func(s State) State {
s.Err = err
return s
}
}
func (parser Parser) Run(str string) State {
state := parser(State{Remains: str})
return state
}
func (parser Parser) Map(fn interface{}) Parser {
return func(s State) State {
state := parser(s)
if state.Err != nil {
return state
}
result := reflect.ValueOf(fn).Call([]reflect.Value{reflect.ValueOf(state.Value)})
state.Value = result[0].Interface()
return state
}
}
func OneOf(parsers ...Parser) Parser {
return func(s State) State {
for _, parser := range parsers {
state := parser(s)
if state.Err != nil {
continue
}
return state
}
return Fail(ErrNoMatch)(s)
}
}
func Satisfy(fn func(rune) bool) Parser {
return func(s State) State {
if len(s.Remains) == 0 {
return State{Err: ErrUnexpectedEnd}
}
r := []rune(s.Remains)[0]
if fn(r) {
return State{Value: r, Remains: string([]rune(s.Remains)[1:])}
}
return State{Remains: s.Remains, Err: ErrNoMatch}
}
}
func Char(expect string) Parser {
return Satisfy(func(r rune) bool {
return strings.ContainsRune(expect, r)
})
}
func NotChar(notExpect string) Parser {
return Satisfy(func(r rune) bool {
return !strings.ContainsRune(notExpect, r)
})
}
func Str(expect string) Parser {
return func(s State) State {
if strings.HasPrefix(s.Remains, expect) {
return State{Value: expect, Remains: s.Remains[len(expect):]}
}
return State{Remains: s.Remains, Err: ErrNoMatch}
}
}
func Integer() Parser {
parseInt := func(value []rune) int {
num, _ := strconv.Atoi(string(value))
return num
}
return Many1(Digit(), []rune{}).Map(parseInt)
}
func Digit() Parser {
return func(s State) State {
r := []rune(s.Remains)[0]
if unicode.Is(unicode.Digit, r) {
return State{Value: r, Remains: s.Remains[1:]}
}
return State{Err: errors.New("not digit")}
}
}
func Many1(parser Parser, kind interface{}) Parser {
return func(s State) (state State) {
state = State{Remains: s.Remains}
var (
values []interface{}
count int
)
for len(s.Remains) > 0 {
nextState := parser(s)
if nextState.Err != nil {
break
}
s.Remains = nextState.Remains
values = append(values, nextState.Value)
count++
}
if count == 0 {
return State{Remains: state.Remains, Err: ErrNoMatch}
}
state.Remains = s.Remains
if values != nil {
state.Value = interfacesToSlice(values, kind)
}
return state
}
}
func Many(parser Parser, kind interface{}) Parser {
return func(s State) (state State) {
state = State{Remains: s.Remains}
var values []interface{}
for len(s.Remains) > 0 {
nextState := parser(s)
if nextState.Err != nil {
break
}
s.Remains = nextState.Remains
values = append(values, nextState.Value)
}
state.Remains = s.Remains
state.Value = interfacesToSlice(values, kind)
return state
}
}
// FIXME: rename to Success
func Pure(value interface{}) Parser {
return func(state State) State {
state.Value = value
return state
}
}
func Between(left Parser, parser Parser, right Parser) Parser {
return left.DiscardLeft(parser.DiscardRight(right))
}
func (parser Parser) Between(left Parser, right Parser) Parser {
return Between(left, parser, right)
}
// FIXME: use lift?
func (parser Parser) DiscardLeft(right Parser) Parser {
return func(state State) State {
leftState := parser(state)
if leftState.Err != nil {
return leftState
}
return right(leftState)
}
}
// FIXME: use lift?
func (parser Parser) DiscardRight(right Parser) Parser {
return func(state State) State {
leftState := parser(state)
if leftState.Err != nil {
return leftState
}
rightState := right(leftState)
rightState.Value = leftState.Value
return rightState
}
}
func runesToString(value interface{}) interface{} {
return string(value.([]rune))
}
func NotQuotedWord() Parser {
return Many1(NotChar(` "'`), []rune{}).
Map(runesToString)
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/6/30 9:18 上午
# @File : jz_11_二进制中1的个数_test.go.go
# @Description :
# @Attention :
*/
package offer
import (
"fmt"
"testing"
)
func TestNumberOf1(t *testing.T) {
fmt.Println(NumberOf1(-1))
}
|
package util
import (
"fmt"
"math/rand"
"net/http"
"reflect"
"regexp"
"strings"
"grm-service/common"
"github.com/emicklei/go-restful"
"github.com/pborman/uuid"
errors "grm-service/errors"
log "grm-service/log"
)
type nullRet struct {
ret string `json:"ret,omitempty"`
}
func isNil(i interface{}) bool {
if i == nil {
return true
}
vi := reflect.ValueOf(i)
if vi.Kind() == reflect.Ptr {
return vi.IsNil()
}
log.Warn("Response type should be a pointer")
return vi.IsValid()
}
// http response
func ResWriteError(res *restful.Response, err error) error {
log.Error(err)
res.Header().Add("Content-Type", "application/json")
return res.WriteError(http.StatusOK, errors.New(err.Error(), http.StatusInternalServerError))
}
func ResWriteHeaderEntity(res *restful.Response, value interface{}) error {
res.Header().Add("Content-Type", "application/json")
if isNil(value) {
return res.WriteEntity(nullRet{})
}
return res.WriteHeaderAndEntity(http.StatusOK, value)
}
func ResWriteEntity(res *restful.Response, value interface{}) error {
res.Header().Add("Content-Type", "application/json")
if isNil(value) {
return res.WriteEntity(nullRet{})
}
return res.WriteEntity(value)
}
// 解析分页参数
func ParserPageArgs(req *restful.Request) *common.PageFilter {
args := common.PageFilter{
Limit: req.QueryParameter("limit"),
Offset: req.QueryParameter("offset"),
Order: req.QueryParameter("order"),
Sort: req.QueryParameter("sort"),
}
return &args
}
// 拼接分页sql
func PageFilterSql(sql, keyCol string, page *common.PageFilter) string {
if len(sql) == 0 || page == nil {
return sql
}
if len(page.Sort) > 0 && len(page.Order) == 0 {
page.Order = "desc"
}
if len(page.Sort) > 0 && page.Sort != keyCol {
sql = fmt.Sprintf(`%s order by %s %s`, sql, page.Sort, page.Order)
if len(keyCol) > 0 {
sql = fmt.Sprintf(`%s,%s %s`, sql, keyCol, page.Order)
}
} else if len(keyCol) > 0 {
sql = fmt.Sprintf(`%s order by %s %s`, sql, keyCol, page.Order)
}
if len(page.Offset) > 0 && len(page.Limit) > 0 {
sql = fmt.Sprintf(`%s limit %s offset %s`, sql, page.Limit, page.Offset)
}
return sql
}
// 获取uuid
func NewUUID() string {
uuid := uuid.NewUUID().String()
return strings.Replace(uuid, "-", "", -1)
}
// 获取切片中随机元素
func GetRandomStr(list []string) string {
return list[rand.Intn(len(list))]
}
func GetRandomItem(list []interface{}) interface{} {
return list[rand.Intn(len(list))]
}
var re = regexp.MustCompile("^[0-9]*$")
func IsNum(num string) bool {
num = strings.TrimSpace(" ")
if num == "" {
return false
}
return re.Match([]byte(num))
}
|
package policy
import (
"strings"
"github.com/rightscale/rsc/cmd"
"github.com/rightscale/rsc/rsapi"
)
// API 1.0 client
// Just a vanilla RightScale API client.
type API struct {
*rsapi.API
}
// New returns a API 1.0 client.
// It makes a test request to API 1.0 and returns an error if authentication fails.
// host may be blank in which case client attempts to resolve it using auth.
func New(host string, auth rsapi.Authenticator) *API {
return fromAPI(rsapi.New(host, auth))
}
// FromCommandLine builds a client from the command line.
func FromCommandLine(cmdLine *cmd.CommandLine) (*API, error) {
raw, err := rsapi.FromCommandLine(cmdLine)
cmdLine.Host = HostFromLogin(cmdLine.Host)
if err != nil {
return nil, err
}
return fromAPI(raw), nil
}
// Wrap generic client into API 1.0 client
func fromAPI(api *rsapi.API) *API {
api.FileEncoding = rsapi.FileEncodingJSON
api.Host = HostFromLogin(api.Host)
api.Metadata = GenMetadata
api.VersionHeader = "Api-Version"
return &API{api}
}
// HostFromLogin returns the policy endpoint from its login endpoint.
// The following isn't great but seems better than having to enter by hand.
func HostFromLogin(host string) string {
urlElems := strings.Split(host, ".")
hostPrefix := urlElems[0]
elems := strings.Split(hostPrefix, "-")
if len(elems) == 1 && elems[0] == "cm" {
// accommodates micromoo host inference, such as "cm.rightscale.local" => "selfservice.rightscale.local"
elems[0] = "governance"
} else if len(elems) < 2 {
// don't know how to compute this policy host; use the cm host
return host
} else {
elems[len(elems)-2] = "governance"
}
policyHostPrefix := strings.Join(elems, "-")
return strings.Join(append([]string{policyHostPrefix}, urlElems[1:]...), ".")
}
|
package main
import (
"fmt"
"os"
"sync"
"github.com/qiniu/api.v7/auth/qbox"
"github.com/qiniu/api.v7/storage"
"github.com/qiniu/x/rpc.v7"
)
var (
AK string
SK string
)
func init() {
AK = os.Getenv("Q_AK")
SK = os.Getenv("Q_SK")
}
func main() {
if AK == "" || SK == "" {
fmt.Println("Please set Q_AK, Q_SK environment variable")
}
if len(os.Args) < 3 {
fmt.Printf("Usage: %s <src_bucket> <dest_bucket>\n", os.Args[0])
return
}
srcBkt, dstBkt := os.Args[1], os.Args[2]
mac := qbox.NewMac(AK, SK)
manager := storage.NewBucketManager(mac, nil)
fileCh, err := manager.ListBucket(srcBkt,"", "", "")
if err != nil {
fmt.Println(err)
return
}
moveOps := make([]string, 0, 1000)
opCh := make(chan string)
exitCh := make(chan struct{})
wg := sync.WaitGroup{}
wg.Add(1)
go func(ch <-chan string, exitCh chan struct{}) {
for {
select {
case moveOp := <-opCh:
if moveOps == nil {
moveOps = make([]string, 0, 1000)
}
moveOps = append(moveOps, moveOp)
if len(moveOps) == 1000 {
go batchMove(manager, moveOps)
moveOps = nil
}
case <-exitCh:
batchMove(manager, moveOps)
wg.Done()
return
}
}
}(opCh, exitCh)
for item := range fileCh {
moveOp := storage.URIMove(srcBkt, item.Item.Key, dstBkt, item.Item.Key, true)
opCh <- moveOp
}
exitCh <- struct{}{}
wg.Wait()
}
func batchMove(manager *storage.BucketManager, ops []string) {
ret, err := manager.Batch(ops)
if err != nil {
if _, ok := err.(*rpc.ErrorInfo); ok {
for _, r := range ret {
if r.Code != 200 {
fmt.Println(r.Data.Error)
}
}
} else {
fmt.Printf("batch error: %v\n", err)
}
}
} |
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grumpy
import (
"testing"
)
func TestXxx(t *testing.T) {
}
func TestNewCodeKeywordsCheck(t *testing.T) {
oldLogFatal := logFatal
defer func() { logFatal = oldLogFatal }()
var got string
logFatal = func(msg string) {
got = msg
}
NewCode("foo", "foo.py", []Param{{"bar", None}, {"baz", nil}}, 0, nil)
if want := "foo() non-keyword arg baz after keyword arg"; got != want {
t.Errorf("NewCode logged %q, want %q", got, want)
}
}
func TestNewCode(t *testing.T) {
testFunc := newBuiltinFunction("TestNewCode", func(f *Frame, args Args, kwargs KWArgs) (*Object, *BaseException) {
if raised := checkFunctionVarArgs(f, "TestNewCode", args, CodeType); raised != nil {
return nil, raised
}
return toCodeUnsafe(args[0]).Eval(f, nil, args[1:], kwargs)
})
fn := func(f *Frame, args []*Object) (*Object, *BaseException) {
return NewTuple(Args(args).makeCopy()...).ToObject(), nil
}
nilFn := func(*Frame, []*Object) (*Object, *BaseException) {
return nil, nil
}
cases := []invokeTestCase{
invokeTestCase{args: wrapArgs(NewCode("f1", "foo.py", nil, 0, fn)), want: NewTuple().ToObject()},
invokeTestCase{args: wrapArgs(NewCode("f2", "foo.py", []Param{{"a", nil}}, 0, fn), 123), want: newTestTuple(123).ToObject()},
invokeTestCase{args: wrapArgs(NewCode("f2", "foo.py", []Param{{"a", nil}}, 0, fn)), kwargs: wrapKWArgs("a", "apple"), want: newTestTuple("apple").ToObject()},
invokeTestCase{args: wrapArgs(NewCode("f2", "foo.py", []Param{{"a", nil}}, 0, fn)), kwargs: wrapKWArgs("b", "bear"), wantExc: mustCreateException(TypeErrorType, "f2() got an unexpected keyword argument 'b'")},
invokeTestCase{args: wrapArgs(NewCode("f2", "foo.py", []Param{{"a", nil}}, 0, fn)), wantExc: mustCreateException(TypeErrorType, "f2() takes at least 1 arguments (0 given)")},
invokeTestCase{args: wrapArgs(NewCode("f2", "foo.py", []Param{{"a", nil}}, 0, fn), 1, 2, 3), wantExc: mustCreateException(TypeErrorType, "f2() takes 1 arguments (3 given)")},
invokeTestCase{args: wrapArgs(NewCode("f3", "foo.py", []Param{{"a", nil}, {"b", nil}}, 0, fn), 1, 2), want: newTestTuple(1, 2).ToObject()},
invokeTestCase{args: wrapArgs(NewCode("f3", "foo.py", []Param{{"a", nil}, {"b", nil}}, 0, fn), 1), kwargs: wrapKWArgs("b", "bear"), want: newTestTuple(1, "bear").ToObject()},
invokeTestCase{args: wrapArgs(NewCode("f3", "foo.py", []Param{{"a", nil}, {"b", nil}}, 0, fn)), kwargs: wrapKWArgs("b", "bear", "a", "apple"), want: newTestTuple("apple", "bear").ToObject()},
invokeTestCase{args: wrapArgs(NewCode("f3", "foo.py", []Param{{"a", nil}, {"b", nil}}, 0, fn), 1), kwargs: wrapKWArgs("a", "alpha"), wantExc: mustCreateException(TypeErrorType, "f3() got multiple values for keyword argument 'a'")},
invokeTestCase{args: wrapArgs(NewCode("f4", "foo.py", []Param{{"a", nil}, {"b", None}}, 0, fn), 123), want: newTestTuple(123, None).ToObject()},
invokeTestCase{args: wrapArgs(NewCode("f4", "foo.py", []Param{{"a", nil}, {"b", None}}, 0, fn), 123, "bar"), want: newTestTuple(123, "bar").ToObject()},
invokeTestCase{args: wrapArgs(NewCode("f4", "foo.py", []Param{{"a", nil}, {"b", None}}, 0, fn)), kwargs: wrapKWArgs("a", 123, "b", "bar"), want: newTestTuple(123, "bar").ToObject()},
invokeTestCase{args: wrapArgs(NewCode("f5", "foo.py", []Param{{"a", nil}}, CodeFlagVarArg, fn), 1), want: newTestTuple(1, NewTuple()).ToObject()},
invokeTestCase{args: wrapArgs(NewCode("f5", "foo.py", []Param{{"a", nil}}, CodeFlagVarArg, fn), 1, 2, 3), want: newTestTuple(1, newTestTuple(2, 3)).ToObject()},
invokeTestCase{args: wrapArgs(NewCode("f6", "foo.py", []Param{{"a", nil}}, CodeFlagKWArg, fn), "bar"), want: newTestTuple("bar", NewDict()).ToObject()},
invokeTestCase{args: wrapArgs(NewCode("f6", "foo.py", []Param{{"a", nil}}, CodeFlagKWArg, fn)), kwargs: wrapKWArgs("a", "apple", "b", "bear"), want: newTestTuple("apple", newTestDict("b", "bear")).ToObject()},
invokeTestCase{args: wrapArgs(NewCode("f6", "foo.py", []Param{{"a", nil}}, CodeFlagKWArg, fn), "bar"), kwargs: wrapKWArgs("b", "baz", "c", "qux"), want: newTestTuple("bar", newTestDict("b", "baz", "c", "qux")).ToObject()},
invokeTestCase{args: wrapArgs(NewCode("f7", "foo.py", nil, 0, nilFn)), want: None},
}
for _, cas := range cases {
if err := runInvokeTestCase(testFunc.ToObject(), &cas); err != "" {
t.Error(err)
}
}
}
func TestCodeEvalRestoreExc(t *testing.T) {
e := mustCreateException(RuntimeErrorType, "uh oh")
ranC1, ranC2 := false, false
globals := NewDict()
c1 := NewCode("<c1>", "foo.py", nil, 0, func(f *Frame, _ []*Object) (*Object, *BaseException) {
if got, _ := f.ExcInfo(); got != e {
t.Errorf("ExcInfo() = %v, want %v", got, e)
}
f.RestoreExc(nil, nil)
ranC1 = true
return None, nil
})
c2 := NewCode("<c2>", "foo.py", nil, 0, func(f *Frame, _ []*Object) (*Object, *BaseException) {
f.RestoreExc(e, newTraceback(f, nil))
c1.Eval(f, globals, nil, nil)
// The exception was cleared by c1 but when returning to c2, it
// should have been restored.
if got, _ := f.ExcInfo(); got != e {
t.Errorf("ExcInfo() = %v, want <nil>", got)
}
f.RestoreExc(nil, nil)
ranC2 = true
return None, nil
})
c2.Eval(NewRootFrame(), globals, nil, nil)
if !ranC1 {
t.Error("c1 did not run")
}
if !ranC2 {
t.Error("c2 did not run")
}
}
|
/*
Copyright 2021. The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package query
import (
"bufio"
"bytes"
"encoding/base64"
"fmt"
"io"
"time"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
apimachinerytypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
monitorContext "github.com/kubevela/pkg/monitor/context"
pkgmulticluster "github.com/kubevela/pkg/multicluster"
wfContext "github.com/kubevela/workflow/pkg/context"
"github.com/kubevela/workflow/pkg/cue/model/value"
"github.com/kubevela/workflow/pkg/types"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/multicluster"
querytypes "github.com/oam-dev/kubevela/pkg/velaql/providers/query/types"
)
const (
// ProviderName is provider name for install.
ProviderName = "query"
// HelmReleaseKind is the kind of HelmRelease
HelmReleaseKind = "HelmRelease"
annoAmbassadorServiceName = "ambassador.service/name"
annoAmbassadorServiceNamespace = "ambassador.service/namespace"
)
type provider struct {
cli client.Client
cfg *rest.Config
}
// Resource refer to an object with cluster info
type Resource struct {
Cluster string `json:"cluster"`
Component string `json:"component"`
Revision string `json:"revision"`
Object *unstructured.Unstructured `json:"object"`
}
// Option is the query option
type Option struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
Filter FilterOption `json:"filter,omitempty"`
// WithStatus means query the object from the cluster and get the latest status
// This field only suitable for ListResourcesInApp
WithStatus bool `json:"withStatus,omitempty"`
// WithTree means recursively query the resource tree.
WithTree bool `json:"withTree,omitempty"`
}
// FilterOption filter resource created by component
type FilterOption struct {
Cluster string `json:"cluster,omitempty"`
ClusterNamespace string `json:"clusterNamespace,omitempty"`
Components []string `json:"components,omitempty"`
APIVersion string `json:"apiVersion,omitempty"`
Kind string `json:"kind,omitempty"`
QueryNewest bool `json:"queryNewest,omitempty"`
}
// ListResourcesInApp lists CRs created by Application, this provider queries the object data.
func (h *provider) ListResourcesInApp(ctx monitorContext.Context, wfCtx wfContext.Context, v *value.Value, act types.Action) error {
val, err := v.LookupValue("app")
if err != nil {
return err
}
opt := Option{}
if err = val.UnmarshalTo(&opt); err != nil {
return err
}
collector := NewAppCollector(h.cli, opt)
appResList, err := collector.CollectResourceFromApp(ctx)
if err != nil {
return v.FillObject(err.Error(), "err")
}
if appResList == nil {
appResList = make([]Resource, 0)
}
return fillQueryResult(v, appResList, "list")
}
// ListAppliedResources list applied resource from tracker, this provider only queries the metadata.
func (h *provider) ListAppliedResources(ctx monitorContext.Context, wfCtx wfContext.Context, v *value.Value, act types.Action) error {
val, err := v.LookupValue("app")
if err != nil {
return err
}
opt := Option{}
if err = val.UnmarshalTo(&opt); err != nil {
return v.FillObject(err.Error(), "err")
}
collector := NewAppCollector(h.cli, opt)
app := new(v1beta1.Application)
appKey := client.ObjectKey{Name: opt.Name, Namespace: opt.Namespace}
if err = h.cli.Get(ctx, appKey, app); err != nil {
return v.FillObject(err.Error(), "err")
}
appResList, err := collector.ListApplicationResources(ctx, app)
if err != nil {
return v.FillObject(err.Error(), "err")
}
if appResList == nil {
appResList = make([]*querytypes.AppliedResource, 0)
}
return fillQueryResult(v, appResList, "list")
}
func (h *provider) CollectResources(ctx monitorContext.Context, wfCtx wfContext.Context, v *value.Value, act types.Action) error {
val, err := v.LookupValue("app")
if err != nil {
return err
}
opt := Option{}
if err = val.UnmarshalTo(&opt); err != nil {
return v.FillObject(err.Error(), "err")
}
collector := NewAppCollector(h.cli, opt)
app := new(v1beta1.Application)
appKey := client.ObjectKey{Name: opt.Name, Namespace: opt.Namespace}
if err = h.cli.Get(ctx, appKey, app); err != nil {
return v.FillObject(err.Error(), "err")
}
appResList, err := collector.ListApplicationResources(ctx, app)
if err != nil {
return v.FillObject(err.Error(), "err")
}
var resources = make([]querytypes.ResourceItem, 0)
for _, res := range appResList {
if res.ResourceTree != nil {
resources = append(resources, buildResourceArray(*res, res.ResourceTree, res.ResourceTree, opt.Filter.Kind, opt.Filter.APIVersion)...)
} else if res.Kind == opt.Filter.Kind && res.APIVersion == opt.Filter.APIVersion {
var object unstructured.Unstructured
object.SetAPIVersion(opt.Filter.APIVersion)
object.SetKind(opt.Filter.Kind)
if err := h.cli.Get(ctx, apimachinerytypes.NamespacedName{Namespace: res.Namespace, Name: res.Name}, &object); err == nil {
resources = append(resources, buildResourceItem(*res, querytypes.Workload{
APIVersion: app.APIVersion,
Kind: app.Kind,
Name: app.Name,
Namespace: app.Namespace,
}, object))
} else {
klog.Errorf("failed to get the service:%s", err.Error())
}
}
}
return fillQueryResult(v, resources, "list")
}
func (h *provider) SearchEvents(ctx monitorContext.Context, wfCtx wfContext.Context, v *value.Value, act types.Action) error {
val, err := v.LookupValue("value")
if err != nil {
return err
}
cluster, err := v.GetString("cluster")
if err != nil {
return err
}
obj := new(unstructured.Unstructured)
if err = val.UnmarshalTo(obj); err != nil {
return err
}
listCtx := multicluster.ContextWithClusterName(ctx, cluster)
fieldSelector := getEventFieldSelector(obj)
eventList := corev1.EventList{}
listOpts := []client.ListOption{
client.InNamespace(obj.GetNamespace()),
client.MatchingFieldsSelector{
Selector: fieldSelector,
},
}
if err := h.cli.List(listCtx, &eventList, listOpts...); err != nil {
return v.FillObject(err.Error(), "err")
}
return fillQueryResult(v, eventList.Items, "list")
}
func (h *provider) CollectLogsInPod(ctx monitorContext.Context, wfCtx wfContext.Context, v *value.Value, act types.Action) error {
cluster, err := v.GetString("cluster")
if err != nil {
return errors.Wrapf(err, "invalid cluster")
}
namespace, err := v.GetString("namespace")
if err != nil {
return errors.Wrapf(err, "invalid namespace")
}
pod, err := v.GetString("pod")
if err != nil {
return errors.Wrapf(err, "invalid pod name")
}
val, err := v.LookupValue("options")
if err != nil {
return errors.Wrapf(err, "invalid log options")
}
opts := &corev1.PodLogOptions{}
if err = val.UnmarshalTo(opts); err != nil {
return errors.Wrapf(err, "invalid log options content")
}
cliCtx := multicluster.ContextWithClusterName(ctx, cluster)
h.cfg.Wrap(pkgmulticluster.NewTransportWrapper())
clientSet, err := kubernetes.NewForConfig(h.cfg)
if err != nil {
return errors.Wrapf(err, "failed to create kubernetes client")
}
var defaultOutputs = make(map[string]interface{})
var errMsg string
podInst, err := clientSet.CoreV1().Pods(namespace).Get(cliCtx, pod, v1.GetOptions{})
if err != nil {
errMsg += fmt.Sprintf("failed to get pod: %s; ", err.Error())
}
req := clientSet.CoreV1().Pods(namespace).GetLogs(pod, opts)
readCloser, err := req.Stream(cliCtx)
if err != nil {
errMsg += fmt.Sprintf("failed to get stream logs %s; ", err.Error())
}
if readCloser != nil && podInst != nil {
r := bufio.NewReader(readCloser)
buffer := bytes.NewBuffer(nil)
var readErr error
defer func() {
_ = readCloser.Close()
}()
for {
s, err := r.ReadString('\n')
buffer.WriteString(s)
if err != nil {
if !errors.Is(err, io.EOF) {
readErr = err
}
break
}
}
toDate := v1.Now()
var fromDate v1.Time
// nolint
if opts.SinceTime != nil {
fromDate = *opts.SinceTime
} else if opts.SinceSeconds != nil {
fromDate = v1.NewTime(toDate.Add(time.Duration(-(*opts.SinceSeconds) * int64(time.Second))))
} else {
fromDate = podInst.CreationTimestamp
}
// the cue string can not support the special characters
logs := base64.StdEncoding.EncodeToString(buffer.Bytes())
defaultOutputs = map[string]interface{}{
"logs": logs,
"info": map[string]interface{}{
"fromDate": fromDate,
"toDate": toDate,
},
}
if readErr != nil {
errMsg += readErr.Error()
}
}
if errMsg != "" {
klog.Warningf(errMsg)
defaultOutputs["err"] = errMsg
}
return v.FillObject(defaultOutputs, "outputs")
}
// Install register handlers to provider discover.
func Install(p types.Providers, cli client.Client, cfg *rest.Config) {
prd := &provider{
cli: cli,
cfg: cfg,
}
p.Register(ProviderName, map[string]types.Handler{
"listResourcesInApp": prd.ListResourcesInApp,
"listAppliedResources": prd.ListAppliedResources,
"collectResources": prd.CollectResources,
"searchEvents": prd.SearchEvents,
"collectLogsInPod": prd.CollectLogsInPod,
"collectServiceEndpoints": prd.CollectServiceEndpoints,
})
}
|
package contracts
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"encoding/asn1"
"encoding/hex"
"fmt"
"math/big"
"reflect"
"testing"
"github.com/SIGBlockchain/project_aurum/internal/hashing"
"github.com/SIGBlockchain/project_aurum/internal/publickey"
)
func TestNew(t *testing.T) {
senderPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
encodedPublicKey, _ := publickey.Encode(&senderPrivateKey.PublicKey)
recipientPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
encodedRecipientKey, _ := publickey.Encode(&recipientPrivateKey.PublicKey)
type args struct {
version uint16
sender *ecdsa.PrivateKey
recipient []byte
value uint64
newStateNonce uint64
}
tests := []struct {
name string
args args
want *Contract
wantErr bool
}{
{
name: "Unsigned Minting contract",
args: args{
version: 1,
sender: nil,
recipient: hashing.New(encodedPublicKey),
value: 1000000000,
newStateNonce: 1,
},
want: &Contract{
Version: 1,
SenderPubKey: nil,
SigLen: 0,
Signature: nil,
RecipPubKeyHash: hashing.New(encodedPublicKey),
Value: 1000000000,
StateNonce: 1,
},
wantErr: false,
},
{
name: "Unsigned Normal contract",
args: args{
version: 1,
sender: senderPrivateKey,
recipient: hashing.New(encodedRecipientKey),
value: 1000000000,
newStateNonce: 1,
},
want: &Contract{
Version: 1,
SenderPubKey: &senderPrivateKey.PublicKey,
SigLen: 0,
Signature: nil,
RecipPubKeyHash: hashing.New(encodedRecipientKey),
Value: 1000000000,
StateNonce: 1,
},
wantErr: false,
},
{
name: "Version 0 contract",
args: args{
version: 0,
sender: senderPrivateKey,
recipient: hashing.New(encodedPublicKey),
value: 1000000000,
newStateNonce: 1,
},
want: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := New(tt.args.version, tt.args.sender, tt.args.recipient, tt.args.value, tt.args.newStateNonce)
if (err != nil) != tt.wantErr {
t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr)
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("New() = %v, want %v", got, tt.want)
}
})
}
}
func TestContract_Serialize(t *testing.T) {
senderPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
recipientPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
encodedPublicKey, _ := publickey.Encode(&senderPrivateKey.PublicKey)
nullSenderContract, _ := New(1, nil, hashing.New(encodedPublicKey), 1000, 0)
encodedRecipientKey, _ := publickey.Encode(&recipientPrivateKey.PublicKey)
unsignedContract, _ := New(1, senderPrivateKey, hashing.New(encodedRecipientKey), 1000, 0)
signedContract, _ := New(1, senderPrivateKey, hashing.New(encodedRecipientKey), 1000, 0)
signedContract.Sign(senderPrivateKey)
tests := []struct {
name string
c *Contract
}{
{
name: "Minting contract",
c: nullSenderContract,
},
{
name: "Unsigned contract",
c: unsignedContract,
},
{
name: "Signed contract",
c: signedContract,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, _ := tt.c.Serialize()
sigLen := got[180]
testSendPublicKey := tt.c.SenderPubKey
testEncodeSenderPubKey, _ := publickey.Encode(testSendPublicKey)
switch tt.name {
case "Minting contract":
if !bytes.Equal(got[2:180], make([]byte, 178)) {
t.Errorf("Non null sender public key for minting contract")
}
if sigLen != 0 {
t.Errorf("Non-zero signature length in minting contract: %v", sigLen)
}
if !bytes.Equal(got[181:213], tt.c.RecipPubKeyHash) {
t.Errorf("Invalid recipient public key hash in minting contract")
}
break
case "Unsigned contract":
if sigLen != 0 {
t.Errorf("Non-zero signature length in unsigned contract: %v", sigLen)
}
if !bytes.Equal(got[2:180], testEncodeSenderPubKey) {
t.Errorf("Invalid encoded public key for unsigned contract")
}
if !bytes.Equal(got[181:213], tt.c.RecipPubKeyHash) {
t.Errorf("Invalid recipient public key hash in unsigned contract")
}
case "Signed Contract":
if sigLen == 0 {
t.Errorf("Zero length signature in signed contract: %v", sigLen)
}
if !bytes.Equal(got[2:180], testEncodeSenderPubKey) {
t.Errorf("Invalid encoded public key for signed contract")
}
if !bytes.Equal(got[(181+int(sigLen)):(181+int(sigLen)+32)], tt.c.RecipPubKeyHash) {
t.Errorf("Invalid recipient public key hash in signed contract")
}
}
})
}
}
func TestContract_Deserialize(t *testing.T) {
senderPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
recipientPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
encodedPublicKey, _ := publickey.Encode(&senderPrivateKey.PublicKey)
nullSenderContract, _ := New(1, nil, hashing.New(encodedPublicKey), 1000, 1)
nullSenderContractSerialized, _ := nullSenderContract.Serialize()
encodedRecipientKey, _ := publickey.Encode(&recipientPrivateKey.PublicKey)
unsignedContract, _ := New(1, senderPrivateKey, hashing.New(encodedRecipientKey), 1000, 1)
unsignedContractSerialized, _ := unsignedContract.Serialize()
signedContract, _ := New(1, senderPrivateKey, hashing.New(encodedRecipientKey), 1000, 1)
signedContract.Sign(senderPrivateKey)
signedContractSerialized, _ := signedContract.Serialize()
type args struct {
b []byte
}
tests := []struct {
name string
c *Contract
args args
}{
{
name: "Minting contract",
c: &Contract{},
args: args{
nullSenderContractSerialized,
},
},
{
name: "Unsigned contract",
c: &Contract{},
args: args{
unsignedContractSerialized,
},
},
{
name: "Signed contract",
c: &Contract{},
args: args{
signedContractSerialized,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.c.Deserialize(tt.args.b)
switch tt.name {
case "Minting contract":
if tt.c.Version != nullSenderContract.Version {
t.Errorf("Invalid field on nullSender contract: version")
}
if tt.c.SigLen != nullSenderContract.SigLen {
t.Errorf("Invalid field on nullSender contract: signature length")
}
if tt.c.Value != nullSenderContract.Value {
t.Errorf("Invalid field on nullSender contract: value")
}
if tt.c.Signature != nil {
t.Errorf("Invalid field on nullSender contract: signature")
}
if tt.c.SenderPubKey != nil {
t.Errorf("Invalid field on nullSender contract: sender public key")
}
if tt.c.StateNonce != nullSenderContract.StateNonce {
t.Errorf(fmt.Sprintf("Invalid field on nullSender contract: state nonce. Want: %d, got %d", nullSenderContract.StateNonce, tt.c.StateNonce))
}
break
case "Unsigned contract":
if tt.c.Version != unsignedContract.Version {
t.Errorf("Invalid field on unsigned contract: version")
}
if tt.c.SigLen != unsignedContract.SigLen {
t.Errorf("Invalid field on unsigned contract: signature length")
}
if tt.c.Value != unsignedContract.Value {
t.Errorf("Invalid field on unsigned contract: value")
}
if tt.c.Signature != nil {
t.Errorf("Invalid field on unsigned contract: signature")
}
if !reflect.DeepEqual(tt.c.SenderPubKey, &senderPrivateKey.PublicKey) {
t.Errorf("Invalid field on unsigned contract: sender public key")
}
if tt.c.StateNonce != unsignedContract.StateNonce {
t.Errorf("Invalid field on unsigned contract: state nonce")
}
break
case "Signed contract":
if tt.c.Version != signedContract.Version {
t.Errorf("Invalid field on signed contract: version")
}
if tt.c.SigLen != signedContract.SigLen {
t.Errorf("Invalid field on signed contract: signature length")
}
if tt.c.Value != signedContract.Value {
t.Errorf("Invalid field on signed contract: value")
}
if !bytes.Equal(tt.c.Signature, signedContract.Signature) {
t.Errorf("Invalid field on signed contract: signature")
}
if !reflect.DeepEqual(tt.c.SenderPubKey, &senderPrivateKey.PublicKey) {
t.Errorf("Invalid field on signed contract: sender public key")
}
if tt.c.StateNonce != signedContract.StateNonce {
t.Errorf("Invalid field on signed contract: state nonce")
}
break
}
})
}
}
func TestContract_Sign(t *testing.T) {
senderPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
encodedPublicKey, _ := publickey.Encode(&senderPrivateKey.PublicKey)
testContract, _ := New(1, senderPrivateKey, hashing.New(encodedPublicKey), 1000, 0)
type args struct {
sender ecdsa.PrivateKey
}
tests := []struct {
name string
c *Contract
args args
}{
{
c: testContract,
args: args{
sender: *senderPrivateKey,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
copyOfContract := testContract
serializedTestContract, _ := copyOfContract.Serialize()
hashedContract := hashing.New(serializedTestContract)
tt.c.Sign(&tt.args.sender)
var esig struct {
R, S *big.Int
}
if _, err := asn1.Unmarshal(tt.c.Signature, &esig); err != nil {
t.Errorf("Failed to unmarshall signature")
}
if !ecdsa.Verify(tt.c.SenderPubKey, hashedContract, esig.R, esig.S) {
t.Errorf("Failed to verify valid signature")
}
maliciousPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if ecdsa.Verify(&maliciousPrivateKey.PublicKey, hashedContract, esig.R, esig.S) {
t.Errorf("Failed to reject invalid signature")
}
})
}
}
func TestEquals(t *testing.T) {
senderPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
encodedPublicKey, _ := publickey.Encode(&senderPrivateKey.PublicKey)
contract1 := Contract{
Version: 1,
SenderPubKey: &senderPrivateKey.PublicKey,
SigLen: 0,
Signature: nil,
RecipPubKeyHash: hashing.New(encodedPublicKey),
Value: 1000000000,
StateNonce: 1,
}
contracts := make([]Contract, 7)
for i := 0; i < 7; i++ {
contracts[i] = contract1
}
contracts[0].Version = 9001
anotherSenderPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
contracts[1].SenderPubKey = &anotherSenderPrivateKey.PublicKey
contracts[2].SigLen = 9
contracts[3].Signature = make([]byte, 100)
encodedAnotherSenderPublicKey, _ := publickey.Encode(&anotherSenderPrivateKey.PublicKey)
contracts[4].RecipPubKeyHash = hashing.New(encodedAnotherSenderPublicKey)
contracts[5].Value = 9002
contracts[6].StateNonce = 9
tests := []struct {
name string
c1 Contract
c2 Contract
want bool
}{
{
name: "equal contracts",
c1: contract1,
c2: contract1,
want: true,
},
{
name: "different contract version",
c1: contract1,
c2: contracts[0],
want: false,
},
{
name: "different contract SenderPubKey",
c1: contract1,
c2: contracts[1],
want: false,
},
{
name: "different contract signature lengths",
c1: contract1,
c2: contracts[2],
want: false,
},
{
name: "different contract signatures",
c1: contract1,
c2: contracts[3],
want: false,
},
{
name: "different contract RecipPubKeyHash",
c1: contract1,
c2: contracts[4],
want: false,
},
{
name: "different contract Values",
c1: contract1,
c2: contracts[5],
want: false,
},
{
name: "different contract StateNonce",
c1: contract1,
c2: contracts[6],
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if result := tt.c1.Equals(tt.c2); result != tt.want {
t.Errorf("Error: Equals() returned %v for %s\n Wanted: %v", result, tt.name, tt.want)
}
})
}
}
func TestContractToString(t *testing.T) {
senderPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
encodedSenderPublicKey, _ := publickey.Encode(&senderPrivateKey.PublicKey)
testContract := Contract{
Version: 1,
SenderPubKey: &senderPrivateKey.PublicKey,
SigLen: 0,
Signature: nil,
RecipPubKeyHash: hashing.New(encodedSenderPublicKey),
Value: 1000000000,
StateNonce: 1,
}
encodedTestContractSenderPublicKey, _ := publickey.Encode(testContract.SenderPubKey)
stringOfTheContract := fmt.Sprintf("Version: %v\nSenderPubKey: %v\nSigLen: %v\nSignature: %v\nRecipPubKeyHash: %v\nValue: %v\nStateNonce: %v\n", testContract.Version,
hex.EncodeToString(encodedTestContractSenderPublicKey), testContract.SigLen, hex.EncodeToString(testContract.Signature),
hex.EncodeToString(testContract.RecipPubKeyHash), testContract.Value, testContract.StateNonce)
if result := testContract.ToString(); result != stringOfTheContract {
t.Error("Contract String is not equal to test String")
}
}
func TestMarshal(t *testing.T) {
senderPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
encodedSenderPublicKey, _ := publickey.Encode(&senderPrivateKey.PublicKey)
testContract := Contract{
Version: 1,
SenderPubKey: &senderPrivateKey.PublicKey,
SigLen: 0,
Signature: nil,
RecipPubKeyHash: hashing.New(encodedSenderPublicKey),
Value: 1000000000,
StateNonce: 1,
}
var nilContract Contract
tests := []struct {
name string
c Contract
wantErr bool
}{
{
"contract",
testContract,
false,
},
{
"nil contract",
nilContract,
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resultContract, err := tt.c.Marshal()
if (err != nil) != tt.wantErr {
t.Errorf("Error: Marshal() returned %v for errors. Wanted: %v", err, tt.wantErr)
}
if tt.name == "contract" {
if resultContract.Version != tt.c.Version {
t.Errorf("Error: Version does not match. Wanted: %v, Got: %v", tt.c.Version, resultContract.Version)
}
encodedSender, _ := hex.DecodeString(resultContract.SenderPublicKey)
if sender, _ := publickey.Encode(tt.c.SenderPubKey); !bytes.Equal(encodedSender, sender) {
t.Errorf("Error: Sender pubkey does not match. Wanted: %v, Got: %v", tt.c.SenderPubKey, sender)
}
if resultContract.SignatureLength != tt.c.SigLen {
t.Errorf("Error: Signature length does not match. Wanted: %v, Got: %v", tt.c.SigLen, resultContract.SignatureLength)
}
if signature, _ := hex.DecodeString(resultContract.Signature); !bytes.Equal(signature, tt.c.Signature) {
t.Errorf("Error: Signature does not match. Wanted: %v, Got: %v", tt.c.Signature, signature)
}
if recip, _ := hex.DecodeString(resultContract.RecipientWalletAddress); !bytes.Equal(recip, tt.c.RecipPubKeyHash) {
t.Errorf("Error: Recip pubkey hash does not match. Wanted: %v, Got: %v", tt.c.RecipPubKeyHash, recip)
}
if resultContract.Value != tt.c.Value {
t.Errorf("Error: Value does not match. Wanted: %v, Got: %v", tt.c.Value, resultContract.Value)
}
if resultContract.StateNonce != tt.c.StateNonce {
t.Errorf("Error: State nonce does not match. Wanted: %v, Got: %v", tt.c.StateNonce, resultContract.StateNonce)
}
}
})
}
}
func TestUnmarshal(t *testing.T) {
senderPrivateKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
encodedSenderPublicKey, _ := publickey.Encode(&senderPrivateKey.PublicKey)
testContract := Contract{
Version: 1,
SenderPubKey: &senderPrivateKey.PublicKey,
SigLen: 0,
Signature: nil,
RecipPubKeyHash: hashing.New(encodedSenderPublicKey),
Value: 1000000000,
StateNonce: 1,
}
marshalledContract, _ := testContract.Marshal()
var nilContract JSONContract
tests := []struct {
name string
mc JSONContract
wantErr bool
}{
{
"mContract",
marshalledContract,
false,
},
{
"nil contract",
nilContract,
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resultContract, err := tt.mc.Unmarshal()
if (err != nil) != tt.wantErr {
t.Errorf("Error: Unmarshal() returned %v for errors. Wanted: %v", err, tt.wantErr)
}
if tt.name == "mContract" && !resultContract.Equals(testContract) {
t.Errorf("Error: result contract does not equal to test contract")
}
})
}
}
|
package jobconfig
import (
"testing"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/sets"
prowconfig "k8s.io/test-infra/prow/config"
)
func TestMergeConfigs(t *testing.T) {
var testCases = []struct {
name string
dest *prowconfig.JobConfig
part *prowconfig.JobConfig
expected *prowconfig.JobConfig
}{
{
name: "empty dest and empty part leads to empty result",
dest: &prowconfig.JobConfig{},
part: &prowconfig.JobConfig{},
expected: &prowconfig.JobConfig{},
},
{
name: "empty dest leads to copy of part",
dest: &prowconfig.JobConfig{},
part: &prowconfig.JobConfig{
Presubmits: map[string][]prowconfig.Presubmit{"super/duper": {{JobBase: prowconfig.JobBase{Name: "test"}}}},
Postsubmits: map[string][]prowconfig.Postsubmit{"super/duper": {{JobBase: prowconfig.JobBase{Name: "post-test"}}}},
},
expected: &prowconfig.JobConfig{
Presubmits: map[string][]prowconfig.Presubmit{"super/duper": {{JobBase: prowconfig.JobBase{Name: "test"}}}},
Postsubmits: map[string][]prowconfig.Postsubmit{"super/duper": {{JobBase: prowconfig.JobBase{Name: "post-test"}}}},
},
},
{
name: "empty part leads to dest",
dest: &prowconfig.JobConfig{
Presubmits: map[string][]prowconfig.Presubmit{"super/duper": {{JobBase: prowconfig.JobBase{Name: "test"}}}},
Postsubmits: map[string][]prowconfig.Postsubmit{"super/duper": {{JobBase: prowconfig.JobBase{Name: "post-test"}}}},
},
part: &prowconfig.JobConfig{},
expected: &prowconfig.JobConfig{
Presubmits: map[string][]prowconfig.Presubmit{"super/duper": {{JobBase: prowconfig.JobBase{Name: "test"}}}},
Postsubmits: map[string][]prowconfig.Postsubmit{"super/duper": {{JobBase: prowconfig.JobBase{Name: "post-test"}}}},
},
},
{
name: "data in both leads to merge",
dest: &prowconfig.JobConfig{
Presubmits: map[string][]prowconfig.Presubmit{"super/duper": {{JobBase: prowconfig.JobBase{Name: "test"}}}},
Postsubmits: map[string][]prowconfig.Postsubmit{"super/duper": {{JobBase: prowconfig.JobBase{Name: "post-test"}}}},
},
part: &prowconfig.JobConfig{
Presubmits: map[string][]prowconfig.Presubmit{"super/duper": {{JobBase: prowconfig.JobBase{Name: "test-2"}}}},
Postsubmits: map[string][]prowconfig.Postsubmit{"super/duper": {{JobBase: prowconfig.JobBase{Name: "post-test-2"}}}},
},
expected: &prowconfig.JobConfig{
Presubmits: map[string][]prowconfig.Presubmit{"super/duper": {{JobBase: prowconfig.JobBase{Name: "test"}}, {JobBase: prowconfig.JobBase{Name: "test-2"}}}},
Postsubmits: map[string][]prowconfig.Postsubmit{"super/duper": {{JobBase: prowconfig.JobBase{Name: "post-test"}}, {JobBase: prowconfig.JobBase{Name: "post-test-2"}}}},
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
mergeConfigs(testCase.dest, testCase.part)
if actual, expected := testCase.dest, testCase.expected; !equality.Semantic.DeepEqual(actual, expected) {
t.Errorf("%s: wanted to get %v, got %v", testCase.name, expected, actual)
}
})
}
}
func TestMergeJobConfig(t *testing.T) {
tests := []struct {
allJobs sets.String
destination, source, expected *prowconfig.JobConfig
}{
{
allJobs: sets.String{},
destination: &prowconfig.JobConfig{},
source: &prowconfig.JobConfig{
Presubmits: map[string][]prowconfig.Presubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "source-job"}, Context: "ci/prow/source"},
}},
},
expected: &prowconfig.JobConfig{
Presubmits: map[string][]prowconfig.Presubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "source-job"}, Context: "ci/prow/source"},
}},
},
}, {
allJobs: sets.String{},
destination: &prowconfig.JobConfig{
Presubmits: map[string][]prowconfig.Presubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "another-job"}, Context: "ci/prow/another"},
}},
},
source: &prowconfig.JobConfig{
Presubmits: map[string][]prowconfig.Presubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "source-job"}, Context: "ci/prow/source"},
}},
},
expected: &prowconfig.JobConfig{
Presubmits: map[string][]prowconfig.Presubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "source-job"}, Context: "ci/prow/source"},
{JobBase: prowconfig.JobBase{Name: "another-job"}, Context: "ci/prow/another"},
}},
},
}, {
allJobs: sets.String{},
destination: &prowconfig.JobConfig{
Presubmits: map[string][]prowconfig.Presubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "same-job"}, Context: "ci/prow/same"},
}},
},
source: &prowconfig.JobConfig{
Presubmits: map[string][]prowconfig.Presubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "same-job"}, Context: "ci/prow/different"},
}},
},
expected: &prowconfig.JobConfig{
Presubmits: map[string][]prowconfig.Presubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "same-job"}, Context: "ci/prow/different"},
}},
},
}, {
allJobs: sets.String{},
destination: &prowconfig.JobConfig{},
source: &prowconfig.JobConfig{
Postsubmits: map[string][]prowconfig.Postsubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "source-job", Agent: "ci/prow/source"}},
}},
},
expected: &prowconfig.JobConfig{
Postsubmits: map[string][]prowconfig.Postsubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "source-job", Agent: "ci/prow/source"}},
}},
},
}, {
allJobs: sets.String{},
destination: &prowconfig.JobConfig{
Postsubmits: map[string][]prowconfig.Postsubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "another-job", Agent: "ci/prow/another"}},
}},
},
source: &prowconfig.JobConfig{
Postsubmits: map[string][]prowconfig.Postsubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "source-job", Agent: "ci/prow/source"}},
}},
},
expected: &prowconfig.JobConfig{
Postsubmits: map[string][]prowconfig.Postsubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "source-job", Agent: "ci/prow/source"}},
{JobBase: prowconfig.JobBase{Name: "another-job", Agent: "ci/prow/another"}},
}},
},
}, {
allJobs: sets.String{},
destination: &prowconfig.JobConfig{
Postsubmits: map[string][]prowconfig.Postsubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "same-job", Agent: "ci/prow/same"}},
}},
},
source: &prowconfig.JobConfig{
Postsubmits: map[string][]prowconfig.Postsubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "same-job", Agent: "ci/prow/different"}},
}},
},
expected: &prowconfig.JobConfig{
Postsubmits: map[string][]prowconfig.Postsubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "same-job", Agent: "ci/prow/different"}},
}},
},
}, {
allJobs: sets.String{},
destination: &prowconfig.JobConfig{
Postsubmits: map[string][]prowconfig.Postsubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "same-job", Agent: "ci/prow/same"}},
}},
},
source: &prowconfig.JobConfig{
Postsubmits: map[string][]prowconfig.Postsubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "same-job", Agent: "ci/prow/same"}},
}},
},
expected: &prowconfig.JobConfig{
Postsubmits: map[string][]prowconfig.Postsubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "same-job", Agent: "ci/prow/same"}},
}},
},
}, {
allJobs: sets.NewString("other-job"),
destination: &prowconfig.JobConfig{
Postsubmits: map[string][]prowconfig.Postsubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "same-job", Agent: "ci/prow/same"}},
{JobBase: prowconfig.JobBase{Name: "other-job", Agent: "ci/prow/same"}},
{JobBase: prowconfig.JobBase{Name: "old-job", Agent: "ci/prow/same"}},
}},
},
source: &prowconfig.JobConfig{
Postsubmits: map[string][]prowconfig.Postsubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "same-job", Agent: "ci/prow/same"}},
}},
},
expected: &prowconfig.JobConfig{
Postsubmits: map[string][]prowconfig.Postsubmit{"organization/repository": {
{JobBase: prowconfig.JobBase{Name: "same-job", Agent: "ci/prow/same"}},
{JobBase: prowconfig.JobBase{Name: "old-job", Agent: "ci/prow/same"}},
}},
},
},
}
for _, tc := range tests {
mergeJobConfig(tc.destination, tc.source, tc.allJobs)
if !equality.Semantic.DeepEqual(tc.destination, tc.expected) {
t.Errorf("expected merged job config diff:\n%s", diff.ObjectDiff(tc.expected, tc.destination))
}
}
}
func TestMergePresubmits(t *testing.T) {
var testCases = []struct {
name string
old, new *prowconfig.Presubmit
expected prowconfig.Presubmit
}{
{
name: "identical old and new returns identical",
old: &prowconfig.Presubmit{
JobBase: prowconfig.JobBase{
Name: "pull-ci-super-duper",
Agent: "agent",
Labels: map[string]string{"foo": "bar"},
MaxConcurrency: 10,
Cluster: "somewhere",
},
AlwaysRun: true,
Context: "context",
RegexpChangeMatcher: prowconfig.RegexpChangeMatcher{RunIfChanged: "foo"},
SkipReport: true,
Optional: true,
Trigger: "whatever",
RerunCommand: "something",
},
new: &prowconfig.Presubmit{
JobBase: prowconfig.JobBase{
Name: "pull-ci-super-duper",
Agent: "agent",
Labels: map[string]string{"foo": "bar"},
MaxConcurrency: 10,
Cluster: "somewhere",
},
AlwaysRun: true,
Context: "context",
RegexpChangeMatcher: prowconfig.RegexpChangeMatcher{RunIfChanged: "foo"},
SkipReport: true,
Optional: true,
Trigger: "whatever",
RerunCommand: "something",
},
expected: prowconfig.Presubmit{
JobBase: prowconfig.JobBase{
Name: "pull-ci-super-duper",
Agent: "agent",
Labels: map[string]string{"foo": "bar"},
MaxConcurrency: 10,
Cluster: "somewhere",
},
AlwaysRun: true,
RegexpChangeMatcher: prowconfig.RegexpChangeMatcher{RunIfChanged: "foo"},
Context: "context",
SkipReport: true,
Optional: true,
Trigger: "whatever",
RerunCommand: "something",
},
},
{
name: "new can update fields in the old",
old: &prowconfig.Presubmit{
JobBase: prowconfig.JobBase{
Name: "pull-ci-super-duper",
Agent: "agent",
Labels: map[string]string{"foo": "bar"},
MaxConcurrency: 10,
Cluster: "somewhere",
},
AlwaysRun: true,
RegexpChangeMatcher: prowconfig.RegexpChangeMatcher{RunIfChanged: "foo"},
Context: "context",
SkipReport: true,
Optional: true,
Trigger: "whatever",
RerunCommand: "something",
},
new: &prowconfig.Presubmit{
JobBase: prowconfig.JobBase{
Name: "pull-ci-super-duper",
Agent: "agent",
Labels: map[string]string{"foo": "baz"},
MaxConcurrency: 10,
Cluster: "somewhere",
},
AlwaysRun: true,
RegexpChangeMatcher: prowconfig.RegexpChangeMatcher{RunIfChanged: "foo"},
Context: "contaxt",
SkipReport: true,
Optional: true,
Trigger: "whatever",
RerunCommand: "something",
},
expected: prowconfig.Presubmit{
JobBase: prowconfig.JobBase{
Name: "pull-ci-super-duper",
Agent: "agent",
Labels: map[string]string{"foo": "baz"},
MaxConcurrency: 10,
Cluster: "somewhere",
},
AlwaysRun: true,
RegexpChangeMatcher: prowconfig.RegexpChangeMatcher{RunIfChanged: "foo"},
Context: "contaxt",
SkipReport: true,
Optional: true,
Trigger: "whatever",
RerunCommand: "something",
},
},
{
name: "new cannot update honored fields in old",
old: &prowconfig.Presubmit{
JobBase: prowconfig.JobBase{
Name: "pull-ci-super-duper",
Agent: "agent",
Labels: map[string]string{"foo": "bar"},
MaxConcurrency: 10,
Cluster: "somewhere",
},
AlwaysRun: true,
RegexpChangeMatcher: prowconfig.RegexpChangeMatcher{RunIfChanged: "foo"},
Context: "context",
SkipReport: true,
Optional: true,
Trigger: "whatever",
RerunCommand: "something",
},
new: &prowconfig.Presubmit{
JobBase: prowconfig.JobBase{
Name: "pull-ci-super-duper",
Agent: "agent",
Labels: map[string]string{"foo": "bar"},
MaxConcurrency: 10000,
Cluster: "somewhere",
},
AlwaysRun: false,
RegexpChangeMatcher: prowconfig.RegexpChangeMatcher{RunIfChanged: "whatever"},
Context: "context",
SkipReport: false,
Optional: false,
Trigger: "whatever",
RerunCommand: "something",
},
expected: prowconfig.Presubmit{
JobBase: prowconfig.JobBase{
Name: "pull-ci-super-duper",
Agent: "agent",
Labels: map[string]string{"foo": "bar"},
MaxConcurrency: 10,
Cluster: "somewhere",
},
AlwaysRun: true,
RegexpChangeMatcher: prowconfig.RegexpChangeMatcher{RunIfChanged: "foo"},
Context: "context",
SkipReport: true,
Optional: true,
Trigger: "whatever",
RerunCommand: "something",
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
if actual, expected := mergePresubmits(testCase.old, testCase.new), testCase.expected; !equality.Semantic.DeepEqual(actual, expected) {
t.Errorf("%s: did not get expected merged presubmit config:\n%s", testCase.name, diff.ObjectDiff(actual, expected))
}
})
}
}
func TestMergePostsubmits(t *testing.T) {
var testCases = []struct {
name string
old, new *prowconfig.Postsubmit
expected prowconfig.Postsubmit
}{
{
name: "identical old and new returns identical",
old: &prowconfig.Postsubmit{
JobBase: prowconfig.JobBase{
Name: "pull-ci-super-duper",
Labels: map[string]string{"foo": "bar"},
MaxConcurrency: 10,
Agent: "agent",
Cluster: "somewhere",
},
},
new: &prowconfig.Postsubmit{
JobBase: prowconfig.JobBase{
Name: "pull-ci-super-duper",
Labels: map[string]string{"foo": "bar"},
MaxConcurrency: 10,
Agent: "agent",
Cluster: "somewhere",
},
},
expected: prowconfig.Postsubmit{
JobBase: prowconfig.JobBase{
Name: "pull-ci-super-duper",
Labels: map[string]string{"foo": "bar"},
MaxConcurrency: 10,
Agent: "agent",
Cluster: "somewhere",
},
},
},
{
name: "new can update fields in the old",
old: &prowconfig.Postsubmit{
JobBase: prowconfig.JobBase{
Name: "pull-ci-super-duper",
Labels: map[string]string{"foo": "bar"},
MaxConcurrency: 10,
Agent: "agent",
Cluster: "somewhere",
},
},
new: &prowconfig.Postsubmit{
JobBase: prowconfig.JobBase{Name: "pull-ci-super-duper",
Labels: map[string]string{"foo": "baz"},
MaxConcurrency: 10,
Agent: "agent",
Cluster: "somewhere",
},
},
expected: prowconfig.Postsubmit{
JobBase: prowconfig.JobBase{Name: "pull-ci-super-duper",
Labels: map[string]string{"foo": "baz"},
MaxConcurrency: 10,
Agent: "agent",
Cluster: "somewhere",
},
},
},
{
name: "new cannot update honored fields in old",
old: &prowconfig.Postsubmit{
JobBase: prowconfig.JobBase{Name: "pull-ci-super-duper",
Labels: map[string]string{"foo": "bar"},
MaxConcurrency: 10,
Agent: "agent",
Cluster: "somewhere",
},
},
new: &prowconfig.Postsubmit{
JobBase: prowconfig.JobBase{Name: "pull-ci-super-duper",
Labels: map[string]string{"foo": "bar"},
MaxConcurrency: 10000,
Agent: "agent",
Cluster: "somewhere",
},
},
expected: prowconfig.Postsubmit{
JobBase: prowconfig.JobBase{Name: "pull-ci-super-duper",
Labels: map[string]string{"foo": "bar"},
MaxConcurrency: 10,
Agent: "agent",
Cluster: "somewhere",
},
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
if actual, expected := mergePostsubmits(testCase.old, testCase.new), testCase.expected; !equality.Semantic.DeepEqual(actual, expected) {
t.Errorf("%s: did not get expected merged postsubmit config:\n%s", testCase.name, diff.ObjectDiff(actual, expected))
}
})
}
}
|
// Copyright 2021 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
pubsublitepb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/pubsublite/pubsublite_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/pubsublite"
)
// Server implements the gRPC interface for Topic.
type TopicServer struct{}
// ProtoToTopicPartitionConfig converts a TopicPartitionConfig resource from its proto representation.
func ProtoToPubsubliteTopicPartitionConfig(p *pubsublitepb.PubsubliteTopicPartitionConfig) *pubsublite.TopicPartitionConfig {
if p == nil {
return nil
}
obj := &pubsublite.TopicPartitionConfig{
Count: dcl.Int64OrNil(p.Count),
Capacity: ProtoToPubsubliteTopicPartitionConfigCapacity(p.GetCapacity()),
}
return obj
}
// ProtoToTopicPartitionConfigCapacity converts a TopicPartitionConfigCapacity resource from its proto representation.
func ProtoToPubsubliteTopicPartitionConfigCapacity(p *pubsublitepb.PubsubliteTopicPartitionConfigCapacity) *pubsublite.TopicPartitionConfigCapacity {
if p == nil {
return nil
}
obj := &pubsublite.TopicPartitionConfigCapacity{
PublishMibPerSec: dcl.Int64OrNil(p.PublishMibPerSec),
SubscribeMibPerSec: dcl.Int64OrNil(p.SubscribeMibPerSec),
}
return obj
}
// ProtoToTopicRetentionConfig converts a TopicRetentionConfig resource from its proto representation.
func ProtoToPubsubliteTopicRetentionConfig(p *pubsublitepb.PubsubliteTopicRetentionConfig) *pubsublite.TopicRetentionConfig {
if p == nil {
return nil
}
obj := &pubsublite.TopicRetentionConfig{
PerPartitionBytes: dcl.Int64OrNil(p.PerPartitionBytes),
Period: dcl.StringOrNil(p.Period),
}
return obj
}
// ProtoToTopic converts a Topic resource from its proto representation.
func ProtoToTopic(p *pubsublitepb.PubsubliteTopic) *pubsublite.Topic {
obj := &pubsublite.Topic{
Name: dcl.StringOrNil(p.Name),
PartitionConfig: ProtoToPubsubliteTopicPartitionConfig(p.GetPartitionConfig()),
RetentionConfig: ProtoToPubsubliteTopicRetentionConfig(p.GetRetentionConfig()),
Project: dcl.StringOrNil(p.Project),
Location: dcl.StringOrNil(p.Location),
}
return obj
}
// TopicPartitionConfigToProto converts a TopicPartitionConfig resource to its proto representation.
func PubsubliteTopicPartitionConfigToProto(o *pubsublite.TopicPartitionConfig) *pubsublitepb.PubsubliteTopicPartitionConfig {
if o == nil {
return nil
}
p := &pubsublitepb.PubsubliteTopicPartitionConfig{
Count: dcl.ValueOrEmptyInt64(o.Count),
Capacity: PubsubliteTopicPartitionConfigCapacityToProto(o.Capacity),
}
return p
}
// TopicPartitionConfigCapacityToProto converts a TopicPartitionConfigCapacity resource to its proto representation.
func PubsubliteTopicPartitionConfigCapacityToProto(o *pubsublite.TopicPartitionConfigCapacity) *pubsublitepb.PubsubliteTopicPartitionConfigCapacity {
if o == nil {
return nil
}
p := &pubsublitepb.PubsubliteTopicPartitionConfigCapacity{
PublishMibPerSec: dcl.ValueOrEmptyInt64(o.PublishMibPerSec),
SubscribeMibPerSec: dcl.ValueOrEmptyInt64(o.SubscribeMibPerSec),
}
return p
}
// TopicRetentionConfigToProto converts a TopicRetentionConfig resource to its proto representation.
func PubsubliteTopicRetentionConfigToProto(o *pubsublite.TopicRetentionConfig) *pubsublitepb.PubsubliteTopicRetentionConfig {
if o == nil {
return nil
}
p := &pubsublitepb.PubsubliteTopicRetentionConfig{
PerPartitionBytes: dcl.ValueOrEmptyInt64(o.PerPartitionBytes),
Period: dcl.ValueOrEmptyString(o.Period),
}
return p
}
// TopicToProto converts a Topic resource to its proto representation.
func TopicToProto(resource *pubsublite.Topic) *pubsublitepb.PubsubliteTopic {
p := &pubsublitepb.PubsubliteTopic{
Name: dcl.ValueOrEmptyString(resource.Name),
PartitionConfig: PubsubliteTopicPartitionConfigToProto(resource.PartitionConfig),
RetentionConfig: PubsubliteTopicRetentionConfigToProto(resource.RetentionConfig),
Project: dcl.ValueOrEmptyString(resource.Project),
Location: dcl.ValueOrEmptyString(resource.Location),
}
return p
}
// ApplyTopic handles the gRPC request by passing it to the underlying Topic Apply() method.
func (s *TopicServer) applyTopic(ctx context.Context, c *pubsublite.Client, request *pubsublitepb.ApplyPubsubliteTopicRequest) (*pubsublitepb.PubsubliteTopic, error) {
p := ProtoToTopic(request.GetResource())
res, err := c.ApplyTopic(ctx, p)
if err != nil {
return nil, err
}
r := TopicToProto(res)
return r, nil
}
// ApplyTopic handles the gRPC request by passing it to the underlying Topic Apply() method.
func (s *TopicServer) ApplyPubsubliteTopic(ctx context.Context, request *pubsublitepb.ApplyPubsubliteTopicRequest) (*pubsublitepb.PubsubliteTopic, error) {
cl, err := createConfigTopic(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return s.applyTopic(ctx, cl, request)
}
// DeleteTopic handles the gRPC request by passing it to the underlying Topic Delete() method.
func (s *TopicServer) DeletePubsubliteTopic(ctx context.Context, request *pubsublitepb.DeletePubsubliteTopicRequest) (*emptypb.Empty, error) {
cl, err := createConfigTopic(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteTopic(ctx, ProtoToTopic(request.GetResource()))
}
// ListPubsubliteTopic handles the gRPC request by passing it to the underlying TopicList() method.
func (s *TopicServer) ListPubsubliteTopic(ctx context.Context, request *pubsublitepb.ListPubsubliteTopicRequest) (*pubsublitepb.ListPubsubliteTopicResponse, error) {
cl, err := createConfigTopic(ctx, request.ServiceAccountFile)
if err != nil {
return nil, err
}
resources, err := cl.ListTopic(ctx, request.Project, request.Location)
if err != nil {
return nil, err
}
var protos []*pubsublitepb.PubsubliteTopic
for _, r := range resources.Items {
rp := TopicToProto(r)
protos = append(protos, rp)
}
return &pubsublitepb.ListPubsubliteTopicResponse{Items: protos}, nil
}
func createConfigTopic(ctx context.Context, service_account_file string) (*pubsublite.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return pubsublite.NewClient(conf), nil
}
|
package fuzz
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
// Instrument builds the instrumented binary and fuzz.zip if they do not already
// exist in the fzgo cache. If instead there is a cache hit, Instrument prints to stderr
// that the cached is being used.
// cacheDir is the location for the instrumented binary, and would typically be something like:
// GOPATH/pkg/fuzz/linux_amd64/619f7d77e9cd5d7433f8/fmt.FuzzFmt
func Instrument(function Func, verbose bool) (Target, error) {
report := func(err error) (Target, error) {
return Target{}, fmt.Errorf("instrument %s.%s error: %v", function.PkgName, function.FuncName, err)
}
// check if go-fuzz and go-fuzz-build seem to be in our path
err := checkGoFuzz()
if err != nil {
return report(err)
}
if function.FuncName == "" || function.PkgDir == "" || function.PkgPath == "" {
return report(fmt.Errorf("unexpected fuzz function: %#v", function))
}
// check if we have a plain data []byte signature, vs. a rich signature
plain, err := IsPlainSig(function.TypesFunc)
if err != nil {
return report(err)
}
var target Target
if plain {
// create our initial target struct using the actual func supplied by the user.
target = Target{UserFunc: function}
} else {
info("detected rich signature for %v.%v", function.PkgName, function.FuncName)
// create a wrapper function to handle the rich signature.
// When fuzzing, we do not want to print our arguments.
printArgs := false
target, err = CreateRichSigWrapper(function, printArgs)
if err != nil {
return report(err)
}
// CreateRichSigWrapper was successful, which means it populated the temp dir with the wrapper func.
// By the time we leave our current function, we are done with the temp dir
// that CreateRichSigWrapper created, so delete via a defer.
// (We can't delete it immediately because we haven't yet run go-fuzz-build on it).
defer os.RemoveAll(target.wrapperTempDir)
}
// Determine where our cacheDir is.
// This includes calculating a hash covering the package, its dependencies, and some other items.
cacheDir, err := target.cacheDir(verbose)
if err != nil {
return report(fmt.Errorf("getting cache dir failed: %v", err))
}
// set up our cache directory if needed
err = os.MkdirAll(cacheDir, os.ModePerm)
if err != nil {
return report(fmt.Errorf("creating cache dir failed: %v", err))
}
// check if our instrumented zip already exists in our cache (in which case we trust it).
finalZipPath, err := target.zipPath(verbose)
if err != nil {
return report(fmt.Errorf("zip path failed: %v", err))
}
if _, err = os.Stat(finalZipPath); os.IsNotExist(err) {
info("building instrumented binary for %v.%v", function.PkgName, function.FuncName)
outFile := filepath.Join(cacheDir, "fuzz.zip.partial")
// to support experimentation, initial args for go-fuzz-build are
// populated by the optional FZGOFLAGSBUILD env var
// (or an empty slice if FZGOFLAGSBUILD is not set).
args := fzgoEnvFlags("FZGOFLAGSBUILD")
if !target.hasWrapper {
args = append(args,
"-func="+target.UserFunc.FuncName,
"-o="+outFile,
// "-race", // TODO: make a flag
buildTagsArg,
target.UserFunc.PkgPath,
)
} else {
args = append(args,
"-func="+target.wrapperFunc.FuncName,
"-o="+outFile,
// "-race", // TODO: make a flag
buildTagsArg,
target.wrapperFunc.PkgPath,
)
}
err = execCmd("go-fuzz-build", args, target.wrapperEnv, 0)
if err != nil {
return report(fmt.Errorf("go-fuzz-build failed with args %q: %v", args, err))
}
err = os.Rename(outFile, finalZipPath)
if err != nil {
return report(err)
}
} else {
info("using cached instrumented binary for %v.%v", function.PkgName, function.FuncName)
}
return target, nil
}
// Start begins fuzzing by invoking 'go-fuzz'.
// cacheDir contains the instrumented binary, and would typically be something like:
// GOPATH/pkg/fuzz/linux_amd64/619f7d77e9cd5d7433f8/fmt.FuzzFmt
// workDir contains the corpus, and would typically be something like:
// GOPATH/src/github.com/user/proj/testdata/fuzz/fmt.FuzzFmt
func Start(target Target, workDir string, maxDuration time.Duration, parallel int, funcTimeout time.Duration, v bool) error {
report := func(err error) error {
return fmt.Errorf("start fuzzing %s error: %v", target.FuzzName(), err)
}
info("starting fuzzing %s", target.FuzzName())
info("output in %s", workDir)
// check if go-fuzz and go-fuzz-build seem to be in our path
err := checkGoFuzz()
if err != nil {
return report(err)
}
// prepare our args
if funcTimeout < 1*time.Second {
return fmt.Errorf("minimum allowed func timeout value is 1 second")
}
verboseLevel := 0
if v {
verboseLevel = 1
}
zipPath, err := target.zipPath(v)
if err != nil {
return report(fmt.Errorf("zip path failed: %v", err))
}
// to support experimentation, initial args for go-fuzz are
// populated by the optional FZGOFLAGSFUZZ env var
// (or an empty slice if FZGOFLAGSFUZZ is not set).
runArgs := fzgoEnvFlags("FZGOFLAGSFUZZ")
runArgs = append(runArgs,
fmt.Sprintf("-bin=%s", zipPath),
fmt.Sprintf("-workdir=%s", workDir),
fmt.Sprintf("-procs=%d", parallel),
fmt.Sprintf("-timeout=%d", int(funcTimeout.Seconds())), // this is not total run time
fmt.Sprintf("-v=%d", verboseLevel),
)
err = execCmd("go-fuzz", runArgs, nil, maxDuration)
if err != nil {
return report(err)
}
return nil
}
// Target tracks some metadata about each fuzz target, and is responsible
// for tracking a fuzz.Func found via go/packages and making it useful
// as a fuzz target, including determining where to cache the fuzz.zip
// and what the target's fuzzName should be.
type Target struct {
UserFunc Func // the user's original function
savedCacheDir string // the cacheDir relies on a content hash, so remember the answer
hasWrapper bool
wrapperFunc Func // synthesized wrapper function, only used if user's func has rich signatures
wrapperEnv []string // env with GOPATH set up to include the temporary
wrapperTempDir string
}
// FuzzName returns the '<pkg>.<OrigFuzzFunc>' string.
// For example, it might be 'fmt.FuzzFmt'. This is used
// in messages, as well it is part of the path when creating
// the corpus location under testdata.
func (t *Target) FuzzName() string {
return t.UserFunc.FuzzName()
}
func (t *Target) zipPath(verbose bool) (string, error) {
cacheDir, err := t.cacheDir(verbose)
if err != nil {
return "", err
}
return filepath.Join(cacheDir, "fuzz.zip"), nil
}
func (t *Target) cacheDir(verbose bool) (string, error) {
if t.savedCacheDir == "" {
// generate a hash covering the package, its dependencies, and some items like go-fuzz-build binary and go version
// TODO: pass verbose flag around?
var err error
var h string
if !t.hasWrapper {
// use everything directly from the original user function
h, err = Hash(t.UserFunc.PkgPath, t.UserFunc.FuncName, t.UserFunc.PkgDir, nil, verbose)
} else {
// we have a wrapper function, so target that for our hash.
h, err = Hash(t.wrapperFunc.PkgPath, t.wrapperFunc.FuncName, t.wrapperFunc.PkgDir, t.wrapperEnv, verbose)
}
if err != nil {
return "", err
}
// the user facing location on disk is the friendly name (that is, from the original user function)
t.savedCacheDir = CacheDir(h, t.UserFunc.PkgName, t.FuzzName())
}
return t.savedCacheDir, nil
}
// ExecGo invokes the go command. The intended use case is fzgo operating in
// pass-through mode, where an invocation like 'fzgo env GOPATH'
// gets passed to the 'go' tool as 'go env GOPATH'. args typically would be
// os.Args[1:]
func ExecGo(args []string, env []string) error {
if len(env) == 0 {
env = os.Environ()
}
_, err := exec.LookPath("go")
if err != nil {
return fmt.Errorf("failed to find \"go\" command in path. error: %v", err)
}
return execCmd("go", args, env, 0)
}
// A maxDuration of 0 means no max time is enforced.
func execCmd(name string, args []string, env []string, maxDuration time.Duration) error {
report := func(err error) error { return fmt.Errorf("exec %v error: %v", name, err) }
cmd := exec.Command(name, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
if len(env) > 0 {
cmd.Env = env
}
if maxDuration == 0 {
// invoke cmd and let it run until it returns
err := cmd.Run()
if err != nil {
return report(err)
}
return nil
}
// we have a maxDuration specified.
// start and then manually kill cmd after maxDuration if it doesn't exit on its own.
err := cmd.Start()
if err != nil {
return report(err)
}
timer := time.AfterFunc(maxDuration, func() {
err := cmd.Process.Signal(os.Interrupt)
if err != nil {
// os.Interrupt expected to fail in some cases (e.g., not implemented on Windows)
_ = cmd.Process.Kill()
}
})
err = cmd.Wait()
if timer.Stop() && err != nil {
// timer.Stop() returned true, which means our kill func never ran, so return this error
return report(err)
}
return nil
}
// checkGoFuzz lightly validates that dvyukov/go-fuzz seems to have been properly installed.
func checkGoFuzz() error {
for _, cmdName := range []string{"go-fuzz", "go-fuzz-build"} {
_, err := exec.LookPath(cmdName)
if err != nil {
return fmt.Errorf("failed to find %q command in path. please run \"go get -u github.com/dvyukov/go-fuzz/...\" and verify your path settings. error: %v",
cmdName, err)
}
}
return nil
}
// fzgoEnvFlags gets any whitespace-separated arguments
// in the named environment variable (e.g., FZGOFLAGSBUILD).
func fzgoEnvFlags(name string) []string {
val := os.Getenv(name)
return strings.Fields(val)
}
func info(s string, args ...interface{}) {
// Related comment from https://golang.org/cmd/go/#hdr-Test_packages
// All test output and summary lines are printed to the go command's standard output,
// even if the test printed them to its own standard error.
// (The go command's standard error is reserved for printing errors building the tests.)
fmt.Println("fzgo:", fmt.Sprintf(s, args...))
}
|
package accounting
import (
"log"
"tddbudget/repository"
"testing"
"time"
"bou.ke/monkey"
"github.com/stretchr/testify/suite"
)
// AccountingSuite 計算測試組
type AccountingSuite struct {
suite.Suite
*Accounting
}
func TestSuiteInit(t *testing.T) {
suite.Run(t, new(AccountingSuite))
}
func (at *AccountingSuite) SetupTest() {
at.Accounting = NewAccounting()
}
func (at *AccountingSuite) Test_no_budget() {
start, end := at.setStartEnd("2021-04-01", "2021-04-01")
at.totalShouldBe(start, end, 0)
}
func (at *AccountingSuite) Test_period_inside_budget_month() {
mock := at.mockGetBudgets(map[string]float64{"202104": 30})
defer mock.Unpatch()
start, end := at.setStartEnd("2021-04-01", "2021-04-30")
at.totalShouldBe(start, end, 30)
}
func (at *AccountingSuite) Test_period_inside_month() {
mock := at.mockGetBudgets(map[string]float64{"202104": 30})
defer mock.Unpatch()
start, end := at.setStartEnd("2021-04-01", "2021-04-01")
at.totalShouldBe(start, end, 1)
}
func (at *AccountingSuite) Test_period_no_overlapping_first_day() {
mock := at.mockGetBudgets(map[string]float64{"202104": 30})
defer mock.Unpatch()
start, end := at.setStartEnd("2021-03-01", "2021-03-01")
at.totalShouldBe(start, end, 0)
}
func (at *AccountingSuite) Test_period_no_overlapping_last_day() {
mock := at.mockGetBudgets(map[string]float64{"202104": 30})
defer mock.Unpatch()
start, end := at.setStartEnd("2021-05-01", "2021-05-01")
at.totalShouldBe(start, end, 0)
}
func (at *AccountingSuite) Test_invalid_period() {
mock := at.mockGetBudgets(map[string]float64{"202104": 30})
defer mock.Unpatch()
start, end := at.setStartEnd("2021-05-30", "2021-05-01")
at.totalShouldBe(start, end, 0)
}
func (at *AccountingSuite) Test_period_ovelapping_budget_first_day() {
mock := at.mockGetBudgets(map[string]float64{"202104": 30})
defer mock.Unpatch()
start, end := at.setStartEnd("2021-03-31", "2021-04-02")
at.totalShouldBe(start, end, 2)
}
func (at *AccountingSuite) Test_period_ovelapping_budget_last_day() {
mock := at.mockGetBudgets(map[string]float64{"202104": 30})
defer mock.Unpatch()
start, end := at.setStartEnd("2021-04-28", "2021-05-02")
at.totalShouldBe(start, end, 3)
}
func (at *AccountingSuite) Test_daily_budget_10() {
mock := at.mockGetBudgets(map[string]float64{"202104": 300})
defer mock.Unpatch()
start, end := at.setStartEnd("2021-04-01", "2021-04-03")
at.totalShouldBe(start, end, 30)
}
func (at *AccountingSuite) Test_cross_2_month() {
mock := at.mockGetBudgets(map[string]float64{"202104": 30, "202105": 310})
defer mock.Unpatch()
start, end := at.setStartEnd("2021-04-01", "2021-05-02")
at.totalShouldBe(start, end, 30+20)
}
func (at *AccountingSuite) Test_cross_3_month() {
mock := at.mockGetBudgets(map[string]float64{
"202104": 30,
"202105": 310,
"202106": 3000,
})
defer mock.Unpatch()
start, end := at.setStartEnd("2021-04-01", "2021-06-02")
at.totalShouldBe(start, end, 30+310+200)
}
func (at *AccountingSuite) totalShouldBe(start, end time.Time, expected float64) {
at.Equal(expected, at.Accounting.GetTotal(start, end))
}
func (at *AccountingSuite) mockGetBudgets(mockData map[string]float64) *monkey.PatchGuard {
mock := monkey.Patch(repository.GetBudgets, func() map[string]float64 {
return mockData
})
return mock
}
func (at *AccountingSuite) setStartEnd(startStr, endStr string) (
start, end time.Time,
) {
var err error
start, err = time.Parse("2006-01-02", startStr)
if err != nil {
log.Println("[ 設定初始化開始時間失敗 ] GOT: ", startStr)
return
}
end, err = time.Parse("2006-01-02", endStr)
if err != nil {
log.Println("[ 設定初始化結束時間失敗 ] GOT: ", endStr)
return
}
return
}
|
package ability
import (
"context"
"time"
"github.com/milobella/oratio/internal/config"
"github.com/milobella/oratio/internal/model"
"github.com/sirupsen/logrus"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
type DAO interface {
CreateOrUpdate(ability *model.Ability) (*model.Ability, error)
GetAll() ([]*model.Ability, error)
GetByIntent(intent string) ([]*model.Ability, error)
}
type mongoDAO struct {
client *mongo.Client
url string
database string
collection string
timeout time.Duration
}
func NewMongoDAO(conf config.Database, timeout time.Duration) (DAO, error) {
client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(conf.MongoUrl))
return &mongoDAO{
client: client,
url: conf.MongoUrl,
database: conf.MongoDatabase,
collection: conf.MongoCollection,
timeout: timeout,
}, err
}
func (dao *mongoDAO) CreateOrUpdate(ability *model.Ability) (*model.Ability, error) {
collection := dao.client.Database(dao.database).Collection(dao.collection)
opts := options.FindOneAndReplace().SetUpsert(true)
ctx, _ := context.WithTimeout(context.Background(), dao.timeout)
filter := bson.D{{"name", ability.Name}}
result := collection.FindOneAndReplace(ctx, filter, ability, opts)
var foundAbility *model.Ability
err := result.Decode(foundAbility)
return foundAbility, err
}
func (dao *mongoDAO) GetAll() ([]*model.Ability, error) {
collection := dao.client.Database(dao.database).Collection(dao.collection)
ctx, _ := context.WithTimeout(context.Background(), dao.timeout)
cursor, err := collection.Find(ctx, bson.D{})
if err != nil {
dao.logError(err, "Error creating the database cursor")
return []*model.Ability{}, err
}
results := make([]*model.Ability, 0)
if err = cursor.All(ctx, &results); err != nil {
dao.logError(err, "Error getting results from the cursor")
return []*model.Ability{}, err
}
return results, nil
}
func (dao *mongoDAO) GetByIntent(intent string) ([]*model.Ability, error) {
collection := dao.client.Database(dao.database).Collection(dao.collection)
ctx, _ := context.WithTimeout(context.Background(), dao.timeout)
cursor, err := collection.Find(ctx, bson.M{"intents": intent})
if err != nil {
dao.logError(err, "Error creating the database cursor")
return []*model.Ability{}, err
}
var results []*model.Ability
if err = cursor.All(ctx, &results); err != nil {
dao.logError(err, "Error getting results from the cursor")
return []*model.Ability{}, err
}
return results, nil
}
func (dao *mongoDAO) logError(err error, message string) {
logrus.WithError(err).
WithField("url", dao.url).
WithField("database", dao.database).
WithField("collection", dao.collection).
Error(message)
}
|
package main
import (
"bufio"
"fmt"
"os"
"sort"
)
const (
UP = iota
RIGHT = iota
DOWN = iota
LEFT = iota
STRAIGHT = iota
)
type Cart struct {
x, y int
dir int
nextTurn int
}
func (c Cart) String() string {
return fmt.Sprintf("%s@%d,%d", dirName(c.dir), c.x, c.y)
}
func (c *Cart) TurnLeft() {
c.dir = (c.dir + 3) % 4
}
func (c *Cart) TurnRight() {
c.dir = (c.dir + 1) % 4
}
func (c *Cart) Turn() {
switch c.nextTurn {
case LEFT:
c.dir = (c.dir + 3) % 4
c.nextTurn = STRAIGHT
case STRAIGHT:
c.nextTurn = RIGHT
case RIGHT:
c.dir = (c.dir + 1) % 4
c.nextTurn = LEFT
}
}
func (c *Cart) Move() {
switch c.dir {
case UP:
c.y -= 1
case DOWN:
c.y += 1
case LEFT:
c.x -= 1
case RIGHT:
c.x += 1
}
}
func dirName(dir int) string {
switch dir {
case UP:
return "^"
case DOWN:
return "v"
case LEFT:
return "<"
case RIGHT:
return ">"
default:
return "?"
}
}
func dirFromChar(char rune) int {
switch char {
case '^':
return UP
case 'v':
return DOWN
case '<':
return LEFT
case '>':
return RIGHT
default:
return -1
}
}
func tick(cart *Cart, track []string) {
cart.Move()
switch track[cart.y][cart.x] {
case '+':
cart.Turn()
case '/':
switch cart.dir {
case UP, DOWN:
cart.TurnRight()
case LEFT, RIGHT:
cart.TurnLeft()
}
case '\\':
switch cart.dir {
case UP, DOWN:
cart.TurnLeft()
case LEFT, RIGHT:
cart.TurnRight()
}
}
}
func parseCarts(track []string) ([]string, []Cart) {
var carts []Cart
for i, row := range track {
for j, char := range row {
d := dirFromChar(char)
if d == UP || d == DOWN {
track[i] = track[i][:j] + "|" + track[i][j+1:]
} else if d == LEFT || d == RIGHT {
track[i] = track[i][:j] + "-" + track[i][j+1:]
} else {
continue
}
carts = append(carts, Cart{x: j, y: i, dir: d, nextTurn: LEFT})
}
}
return track, carts
}
func sortCarts(carts []Cart) []Cart {
sort.Slice(carts, func(i, j int) bool {
if carts[i].y == carts[j].y {
return carts[i].x < carts[j].x
} else {
return carts[i].y < carts[j].y
}
})
return carts
}
func collision(carts *[]Cart, ignore map[*Cart]bool) bool {
type Pos struct {
x, y int
}
positions := map[Pos]bool{}
for i, c := range *carts {
if ignore[&(*carts)[i]] {
continue
}
p := Pos{x: c.x, y: c.y}
if positions[p] {
return true
} else {
positions[p] = true
}
}
return false
}
func part1(track []string, carts []Cart) string {
for {
for i := range sortCarts(carts) {
tick(&carts[i], track)
if collision(&carts, nil) {
return fmt.Sprintf("%d,%d", carts[i].x, carts[i].y)
}
}
}
}
func part2(track []string, carts []Cart) string {
for len(carts) > 1 {
removed := map[*Cart]bool{}
for i := range sortCarts(carts) {
if removed[&carts[i]] {
continue
}
tick(&carts[i], track)
if collision(&carts, removed) {
x, y := carts[i].x, carts[i].y
found := 0
newCarts := make([]Cart, len(carts)-2)
for j := 0; j < len(carts); j++ {
if carts[j].x == x && carts[j].y == y {
found += 1
removed[&carts[j]] = true
} else {
newCarts = append(newCarts, carts[j])
}
}
}
}
if len(removed) > 0 {
remaining := make([]Cart, 0, len(carts)-len(removed))
for i, c := range carts {
if !removed[&carts[i]] {
remaining = append(remaining, c)
}
}
carts = remaining
}
}
return fmt.Sprintf("%d,%d", carts[0].x, carts[0].y)
}
func main() {
f, _ := os.Open("input/day13.txt")
scanner := bufio.NewScanner(bufio.NewReader(f))
var input []string
for scanner.Scan() {
input = append(input, scanner.Text())
}
track, carts := parseCarts(input)
p1Carts := make([]Cart, len(carts))
copy(p1Carts, carts)
fmt.Println(part1(track, p1Carts))
fmt.Println(part2(track, carts))
}
|
package ui
import (
"fmt"
"html/template"
"io/ioutil"
"net/http"
"time"
"github.com/dpordomingo/learning-exercises/ant/actors"
"github.com/dpordomingo/learning-exercises/ant/generators"
"github.com/dpordomingo/learning-exercises/ant/geo"
)
//RunServer starts a server that will report the world state
func RunServer(m *geo.Map, target *geo.Point, rover actors.Rover) {
http.HandleFunc("/", getIntroHandler(m))
http.HandleFunc("/favicon.ico", dummyHandler)
http.HandleFunc("/events", getEventSourceHandler(m))
http.ListenAndServe(":8888", nil)
}
func dummyHandler(res http.ResponseWriter, req *http.Request) {
res.WriteHeader(http.StatusNoContent)
}
func getIntroHandler(m *geo.Map) http.HandlerFunc {
templateScope := map[string]int32{
"Width": 10 * m.W(),
"Height": 10 * m.H(),
}
return func(res http.ResponseWriter, req *http.Request) {
header := res.Header()
introTpl, err := readTemplate("ui/html.tpl")
if err != nil {
http.Error(res, "Error loading template", http.StatusInternalServerError)
return
}
if err := introTpl.Execute(res, templateScope); err != nil {
http.Error(res, "Error rendering template", http.StatusInternalServerError)
return
}
header.Set("Content-Type", "text/html;charset=utf-8")
}
}
const CONTENT string = "" +
"id: %d\n" +
"retry: 500\n" +
"data: %d %d\n\n"
func getEventSourceHandler(m *geo.Map) http.HandlerFunc {
return func(res http.ResponseWriter, req *http.Request) {
fmt.Println("Conection opened\n")
defer fmt.Println("Conection closed\n")
header := res.Header()
header.Set("Content-Type", "text/event-stream")
header.Set("Cache-Control", "no-cache")
header.Set("Connection", "keep-alive")
flusher, ok := res.(http.Flusher)
if !ok {
http.Error(res, "Streaming unsupported!", http.StatusInternalServerError)
return
}
closingChannel := res.(http.CloseNotifier).CloseNotify()
notifyChannel := make(chan bool)
go func() {
notifyChannel <- <-closingChannel
}()
go func() {
for {
notifyChannel <- false
time.Sleep(time.Millisecond * 500)
}
}()
for !<-notifyChannel {
point := generators.GetRandomPoint(10*m.W(), 10*m.H())
content := fmt.Sprintf(CONTENT, time.Now().UTC().UnixNano(), point.X, point.Y)
res.Write([]byte(content))
flusher.Flush()
fmt.Println(content)
}
}
}
func readTemplate(file string) (*template.Template, error) {
bytes, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
return template.New("intro").Parse(string(bytes))
}
|
package main
import (
"encoding/json"
"io/ioutil"
"github.com/brigadecore/brigade/sdk/v3"
"github.com/pkg/errors"
)
// event is a git-initializer-specific representation of a Brigade Event.
type event struct {
Project struct {
Secrets map[string]string `json:"secrets"`
} `json:"project"`
Worker struct {
Git *sdk.GitConfig `json:"git"`
} `json:"worker"`
}
// getEvent loads an Event from a JSON file on the file system.
func getEvent() (event, error) {
evt := event{}
eventPath := "/var/event/event.json"
data, err := ioutil.ReadFile(eventPath)
if err != nil {
return evt, errors.Wrapf(err, "unable read the event file %q", eventPath)
}
err = json.Unmarshal(data, &evt)
return evt, errors.Wrap(err, "error unmarshaling the event")
}
|
package router
import (
"project/app/admin/apis"
"github.com/gin-gonic/gin"
)
func init() {
routerNoCheckRole = append(routerNoCheckRole, getCaptchaRouter)
}
// 无需认证的路由代码
func getCaptchaRouter(v1 *gin.RouterGroup) {
r := v1.Group("/auth")
{
r.GET("code", apis.Captcha)
}
}
|
// Test that return values are processed
// Package pkg does something.
package pkg
import "errors"
type megaErr struct {
error
}
func (i megaErr) Error() string {
return "I am THE error"
}
func returnOne() int {
return 0
}
func returnTwo() (int, string) {
return 0, "something"
}
func returnErrOne() error {
return errors.New("err")
}
func returnErrTwo() (asd int, err error) {
return 0, errors.New("err")
}
// TODO: implement deep check for returned values that implement error
func returnErrStruct() (string, megaErr) {
return "", megaErr{}
}
func main() {
returnOne() // MATCH /result of 'returnOne' should not be silently ignored/
returnTwo() // MATCH /result of 'returnTwo' should not be silently ignored/
returnErrOne() // MATCH /function 'returnErrOne' returns an error, it should not be silently ignored/
a, _ := returnErrTwo() // MATCH /function 'returnErrTwo' returns an error, generally it should not be intentionally ignored/
if a, _ := returnErrTwo(); a > 0 { // MATCH /function 'returnErrTwo' returns an error, generally it should not be intentionally ignored/
doSomethingMan()
}
//returnErrThree() // TODO: see above
}
|
package test
import (
"github.com/anihouse/bot/app"
"github.com/sirupsen/logrus"
)
type module struct {
app *app.Module
enabled bool
}
func (module) ID() string {
return "test"
}
func (m module) IsEnabled() bool {
return m.enabled
}
func (module) LoadConfig(path string) error {
return nil
}
func (module) SetLogger(logger *logrus.Logger) {
}
func (m *module) Init(prefix string) error {
m.app = app.NewModule(_module.ID(), prefix)
m.app.On("ping").Handle(m.onPing)
return nil
}
func (m *module) Enable() {
m.enabled = true
m.app.Enable()
}
func (m *module) Disable() {
m.enabled = false
m.app.Disable()
}
|
package evo
import (
"sort"
)
type fitnessFunc func([]rune) float64
type newGenomeFunc func() []rune
type crossoverFunc func([]rune, []rune) []rune
type mutateFunc func([]rune) []rune
type GA struct {
popSize int
fitness fitnessFunc
newGenome newGenomeFunc
crossover crossoverFunc
mutate mutateFunc
elitismRate, reproductionRate float64
maxProcs int
pop []Chromosome
}
type Chromosome struct {
Genome []rune
Fitness float64
}
func NewGA(popSize int) *GA {
return &GA{
popSize: popSize,
}
}
func (e *GA) Init() {
e.newPopulation()
}
func (e *GA) SetNewGenome(newGenome newGenomeFunc) *GA {
e.newGenome = newGenome
return e
}
func (e *GA) SetFitness(fitness fitnessFunc) *GA {
e.fitness = fitness
return e
}
func (e *GA) SetMutate(mutate mutateFunc) *GA {
e.mutate = mutate
return e
}
func (e *GA) SetElitism(elitism float64) *GA {
e.elitismRate = elitism
return e
}
func (e *GA) SetReproduction(reproduction float64, crossover crossoverFunc) *GA {
e.reproductionRate = reproduction
e.crossover = crossover
return e
}
func (e *GA) SetMaxProcs(maxProcs int) *GA {
e.maxProcs = maxProcs
return e
}
func (e *GA) newPopulation() {
e.pop = make([]Chromosome, e.popSize)
for i := 0; i < e.popSize; i++ {
g := e.newGenome()
e.pop[i] = Chromosome{
Genome: g,
Fitness: e.fitness(g),
}
}
e.sortPopulation()
}
func (e *GA) sortPopulation() {
sort.Slice(e.pop, func(i, j int) bool {
return e.pop[i].Fitness > e.pop[j].Fitness
})
}
func (e GA) AverageFitness() float64 {
sum := 0.0
for _, c := range e.pop {
sum += c.Fitness
}
return sum / float64(e.popSize)
}
func (e GA) Best() Chromosome {
return e.pop[0]
}
func (e *GA) Evolve() {
newPop, i := make([]Chromosome, e.popSize), 0
// elitism
nElites := int(e.elitismRate * float64(e.popSize))
for j := 0; j < nElites; j++ {
newPop[i] = e.pop[j]
i++
}
// reproduction
nChildren := int(e.reproductionRate * float64(e.popSize))
// var wg sync.WaitGroup
// wg.Add(e.maxProcs)
// var lck sync.RWMutex
// for j := 0; j < e.maxProcs; j++ {
// from := i + j*nChildren/e.maxProcs
// to := from + (j+1)*nChildren/e.maxProcs
// if to > e.popSize {
// to = e.popSize
// }
// go func(from, to int) {
// for k := from; k < to; k++ {
// m, n := boxMullerIntn(e.popSize), boxMullerIntn(e.popSize)
// a, b := e.pop[m], e.pop[n]
// cg := e.crossover(a.Genome, b.Genome)
// c := Chromosome{
// Genome: cg,
// }
// lck.Lock()
// newPop[k] = c
// lck.Unlock()
// }
// wg.Done()
// }(from, to)
// }
// wg.Wait()
// i += nChildren
for j := 0; j < nChildren; j++ {
m, n := boxMullerIntn(e.popSize), boxMullerIntn(e.popSize)
a, b := e.pop[m], e.pop[n]
cg := e.crossover(a.Genome, b.Genome)
c := Chromosome{
Genome: cg,
// fitness: e.fitness(cg),
}
newPop[i] = c
i++
}
// mutation for the new-ish genomes
for j, c := range newPop {
// the chromosomes above i are null-pointers - we'll fill up in a later step
if j >= i {
break
}
c.Genome = e.mutate(c.Genome)
}
// filling up, no mutation necessary fo these!
for ; i < e.popSize; i++ {
g := e.newGenome()
newPop[i] = Chromosome{
Genome: g,
}
}
// fitness
for i, c := range newPop {
newPop[i].Fitness = e.fitness(c.Genome)
}
// the swap
e.pop = newPop
e.sortPopulation()
}
|
package fakeip
import (
"net"
"testing"
)
func TestPool_Basic(t *testing.T) {
_, ipnet, _ := net.ParseCIDR("192.168.0.1/30")
pool, _ := New(ipnet)
first := pool.Get()
last := pool.Get()
if !first.Equal(net.IP{192, 168, 0, 1}) {
t.Error("should get right first ip, instead of", first.String())
}
if !last.Equal(net.IP{192, 168, 0, 2}) {
t.Error("should get right last ip, instead of", first.String())
}
}
func TestPool_Cycle(t *testing.T) {
_, ipnet, _ := net.ParseCIDR("192.168.0.1/30")
pool, _ := New(ipnet)
first := pool.Get()
pool.Get()
same := pool.Get()
if !first.Equal(same) {
t.Error("should return same ip", first.String())
}
}
func TestPool_Error(t *testing.T) {
_, ipnet, _ := net.ParseCIDR("192.168.0.1/31")
_, err := New(ipnet)
if err == nil {
t.Error("should return err")
}
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//728. Self Dividing Numbers
//A self-dividing number is a number that is divisible by every digit it contains.
//For example, 128 is a self-dividing number because 128 % 1 == 0, 128 % 2 == 0, and 128 % 8 == 0.
//Also, a self-dividing number is not allowed to contain the digit zero.
//Given a lower and upper number bound, output a list of every possible self dividing number, including the bounds if possible.
//Example 1:
//Input:
//left = 1, right = 22
//Output: [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 15, 22]
//Note:
//The boundaries of each input argument are 1 <= left <= right <= 10000.
//func selfDividingNumbers(left int, right int) []int {
//}
// Time Is Money |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.