text stringlengths 11 4.05M |
|---|
package cls
import (
"bytes"
"crypto/md5"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"github.com/pierrec/lz4"
"google.golang.org/protobuf/proto"
)
func lz4Compress(src []byte) ([]byte, error) {
dst := make([]byte, len(src))
ht := make([]int, 64<<10)
n, err := lz4.CompressBlock(src, dst, ht)
if err != nil {
return nil, err
}
if n == 0 || n >= len(src) {
return nil, fmt.Errorf("incompressible data")
}
return dst[:n], nil
}
func md5Sum(p []byte) string {
h := md5.New()
h.Write(p)
return hex.EncodeToString(h.Sum(nil))
}
type Cursor struct {
Value string `json:"cursor"`
}
func (cls *ClSCleint) UploadLog(logTopicID string, logGroupList LogGroupList, hash string, compress bool) error {
var params = url.Values{"topic_id": {fmt.Sprintf("%s", logTopicID)}}
data, err := proto.Marshal(&logGroupList)
if err != nil {
return err
}
var headers = url.Values{"Host": {fmt.Sprintf("%s", cls.Host)}, "User-Agent": {"AuthSDK"}}
body := bytes.NewBuffer(nil)
var incompressible bool
if compress {
lzdata, err := lz4Compress(data)
if err != nil {
if !errors.Is(err, fmt.Errorf("incompressible data")) {
return err
}
body.Write(data)
incompressible = true
} else {
body.Write(lzdata)
}
} else {
body.Write(data)
}
sig := Signature(fmt.Sprintf("%s", cls.SecretId), fmt.Sprintf("%s", cls.SecretKey),
"POST", "/structuredlog", params, headers, 300)
req, err := http.NewRequest("POST", fmt.Sprintf("http://%s/structuredlog?topic_id=%s", cls.Host, logTopicID), body)
if err != nil {
return err
}
req.Header.Add("Authorization", sig)
req.Header.Add("Host", fmt.Sprintf("%s", cls.Host))
req.Header.Add("Content-Type", "application/x-protobuf")
if hash != "" {
req.Header.Add("x-cls-hashkeye", md5Sum(body.Bytes()))
}
if compress && !incompressible {
req.Header.Set("x-cls-compress-type", "lz4")
}
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
if resp.StatusCode != 200 {
return fmt.Errorf("%d", resp.StatusCode)
}
return nil
}
func (cls *ClSCleint) GetLogStart(logTopicID, start string) (cursor string, err error) {
var params = url.Values{"topic_id": {fmt.Sprintf("%s", logTopicID)}, "start": {fmt.Sprintf("%s", start)}}
var headers = url.Values{"Host": {fmt.Sprintf("%s", cls.Host)}, "User-Agent": {"AuthSDK"}}
sig := Signature(fmt.Sprintf("%s", cls.SecretId), fmt.Sprintf("%s", cls.SecretKey),
"GET", "/cursor", params, headers, 300)
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/cursor?topic_id=%s&start=%s", cls.Host, logTopicID, url.QueryEscape(start)), nil)
if err != nil {
return "", err
}
req.Header.Add("Authorization", sig)
req.Header.Add("Host", fmt.Sprintf("%s", cls.Host))
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return "", err
}
if resp.StatusCode != 200 {
return "", fmt.Errorf("%d", resp.StatusCode)
}
cursorStruct := Cursor{}
body, err := ioutil.ReadAll(resp.Body)
if err := json.Unmarshal(body, &cursorStruct); err != nil {
fmt.Println(err)
return "", err
}
return cursorStruct.Value, nil
}
// TODO 一直返回400
func (cls *ClSCleint) SearchLog(requestDataMap map[string]string) (string, error) {
var params = url.Values{}
var urlString string
for k, v := range requestDataMap {
params.Add(fmt.Sprintf("%s", k), fmt.Sprintf("%s", v))
urlString = fmt.Sprintf("%s&%s=%s", urlString, k, url.QueryEscape(v))
}
urlString = urlString[1:]
var headers = url.Values{"Host": {fmt.Sprintf("%s", cls.Host)}, "User-Agent": {"AuthSDK"}}
sig := Signature(fmt.Sprintf("%s", cls.SecretId), fmt.Sprintf("%s", cls.SecretKey),
"GET", "/searchlog", params, headers, 300)
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/searchlog?%s", cls.Host, urlString), nil)
if err != nil {
return "", err
}
req.Header.Add("Authorization", sig)
req.Header.Add("Host", fmt.Sprintf("%s", cls.Host))
req.Header.Add("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return "", err
}
if resp.StatusCode != 200 {
return "", fmt.Errorf("%d", resp.StatusCode)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
fmt.Println(string(body))
return "", nil
}
func (cls *ClSCleint) DowloadLog(logTopicID, cursor, count string) error {
var params = url.Values{"topic_id": {fmt.Sprintf("%s", logTopicID)}, "cursor": {fmt.Sprintf("%s", cursor)}, "count": {fmt.Sprintf("%s", count)}}
var headers = url.Values{"Host": {fmt.Sprintf("%s", cls.Host)}, "User-Agent": {"AuthSDK"}}
sig := Signature(fmt.Sprintf("%s", cls.SecretId), fmt.Sprintf("%s", cls.SecretKey),
"GET", "/log", params, headers, 300)
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/log?topic_id=%s&cursor=%s&count=%s", cls.Host, logTopicID, url.QueryEscape(cursor), count), nil)
if err != nil {
return err
}
req.Header.Add("Authorization", sig)
req.Header.Add("Host", fmt.Sprintf("%s", cls.Host))
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return err
}
if resp.StatusCode != 200 {
return fmt.Errorf("%d", resp.StatusCode)
}
return nil
}
|
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package glock
import (
"bufio"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"github.com/golang/dep"
"github.com/golang/dep/gps"
"github.com/golang/dep/internal/importers/base"
"github.com/pkg/errors"
)
const glockfile = "GLOCKFILE"
// Importer imports glock configuration into the dep configuration format.
type Importer struct {
*base.Importer
packages []glockPackage
}
// NewImporter for glock.
func NewImporter(logger *log.Logger, verbose bool, sm gps.SourceManager) *Importer {
return &Importer{Importer: base.NewImporter(logger, verbose, sm)}
}
// Name of the importer.
func (g *Importer) Name() string {
return "glock"
}
// HasDepMetadata checks if a directory contains config that the importer can handle.
func (g *Importer) HasDepMetadata(dir string) bool {
path := filepath.Join(dir, glockfile)
if _, err := os.Stat(path); err != nil {
return false
}
return true
}
// Import the config found in the directory.
func (g *Importer) Import(dir string, pr gps.ProjectRoot) (*dep.Manifest, *dep.Lock, error) {
err := g.load(dir)
if err != nil {
return nil, nil, err
}
m, l := g.convert(pr)
return m, l, nil
}
type glockPackage struct {
importPath string
revision string
}
func (g *Importer) load(projectDir string) error {
g.Logger.Println("Detected glock configuration files...")
path := filepath.Join(projectDir, glockfile)
if g.Verbose {
g.Logger.Printf(" Loading %s", path)
}
f, err := os.Open(path)
if err != nil {
return errors.Wrapf(err, "unable to open %s", path)
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
pkg, err := parseGlockLine(scanner.Text())
if err != nil {
g.Logger.Printf(" Warning: Skipping line. Unable to parse: %s\n", err)
continue
}
if pkg == nil {
continue
}
g.packages = append(g.packages, *pkg)
}
if err := scanner.Err(); err != nil {
g.Logger.Printf(" Warning: Ignoring errors found while parsing %s: %s\n", path, err)
}
return nil
}
func parseGlockLine(line string) (*glockPackage, error) {
fields := strings.Fields(line)
switch len(fields) {
case 2: // Valid.
case 0: // Skip empty lines.
return nil, nil
default:
return nil, fmt.Errorf("invalid glock configuration: %s", line)
}
// Skip commands.
if fields[0] == "cmd" {
return nil, nil
}
return &glockPackage{
importPath: fields[0],
revision: fields[1],
}, nil
}
func (g *Importer) convert(pr gps.ProjectRoot) (*dep.Manifest, *dep.Lock) {
g.Logger.Println("Converting from GLOCKFILE ...")
packages := make([]base.ImportedPackage, 0, len(g.packages))
for _, pkg := range g.packages {
// Validate
if pkg.importPath == "" {
g.Logger.Println(
" Warning: Skipping project. Invalid glock configuration, import path is required",
)
continue
}
if pkg.revision == "" {
// Do not add 'empty constraints' to the manifest. Solve will add to lock if required.
g.Logger.Printf(
" Warning: Skipping import with empty constraints. "+
"The solve step will add the dependency to the lock if needed: %q\n",
pkg.importPath,
)
continue
}
packages = append(packages, base.ImportedPackage{
Name: pkg.importPath,
LockHint: pkg.revision,
})
}
g.ImportPackages(packages, true)
return g.Manifest, g.Lock
}
|
package typesutil
import (
"errors"
"go/types"
"strings"
)
var ErrBadType = errors.New("bad type")
type NamedStruct struct {
Named *types.Named
Struct *types.Struct
}
// GetStruct is a helper function that returns a *NamedStruct value that represents
// the struct type of the the given *types.Var. If the struct type is unnamed then
// the Named field of the *NamedStruct value will remain uninitialized. If the var's
// type is not a struct then GetStruct will return an error.
func GetStruct(v *types.Var) (*NamedStruct, error) {
ns := new(NamedStruct)
typ := v.Type()
var ok bool
if ns.Named, ok = typ.(*types.Named); ok {
typ = ns.Named.Underlying()
}
if ns.Struct, ok = typ.(*types.Struct); !ok {
return nil, ErrBadType
}
return ns, nil
}
// GetDirectiveName returns the name of the gosql directive type of the given
// variable. If the type of the given variable is not a directive an empty
// string will be returned instead.
func GetDirectiveName(v *types.Var) string {
named, ok := v.Type().(*types.Named)
if !ok {
return ""
}
path := named.Obj().Pkg().Path()
if !strings.HasSuffix(path, "github.com/frk/gosql") {
return ""
}
st, ok := named.Underlying().(*types.Struct)
if !ok || st.NumFields() != 1 {
return ""
} else if st.Field(0).Name() != "_isdir" {
return ""
}
return named.Obj().Name()
}
|
package main
import (
"fmt"
"os"
"strconv"
"strings"
"time"
"periph.io/x/conn/v3/driver/driverreg"
"periph.io/x/conn/v3/i2c"
"periph.io/x/conn/v3/i2c/i2creg"
_ "periph.io/x/host/v3/bcm283x"
_ "periph.io/x/host/v3/rpi"
)
var temperatureBands = []map[string]int{
{"temp": 50, "speed": 10},
{"temp": 60, "speed": 50},
{"temp": 65, "speed": 100},
}
func check(e error) {
if e != nil {
panic(e)
}
}
func initI2C() (*i2c.Dev, error) {
_, err := driverreg.Init()
if err != nil {
return nil, err
}
b, err := i2creg.Open("")
if err != nil {
return nil, fmt.Errorf("Failed to initialise I2C: %w", err)
}
d := &i2c.Dev{Addr: 26, Bus: b}
return d, nil
}
func getTemperature() (int, error) {
t, err := os.ReadFile("/sys/class/thermal/thermal_zone0/temp")
if err != nil {
return 0, err
}
st := strings.TrimSpace(string(t))
i, err := strconv.Atoi(st)
if err != nil {
return 0, err
}
return i / 1000, nil
}
func selectFanSpeed(temperature int) int {
targetSpeed := 0
for _, v := range temperatureBands {
if temperature > v["temp"] {
targetSpeed = v["speed"]
}
}
return targetSpeed
}
func setFanSpeed(device *i2c.Dev, speed int) error {
_, err := device.Write([]byte{byte(speed)})
return err
}
func monitor(device *i2c.Dev) {
temp, err := getTemperature()
check(err)
fanSpeed := selectFanSpeed(temp)
fmt.Printf("Current temperature: %d; Target fan speed: %d\n", temp, fanSpeed)
setFanSpeed(device, fanSpeed)
check(err)
}
func main() {
device, err := initI2C()
check(err)
ticker := time.NewTicker(30 * time.Second)
for range ticker.C {
monitor(device)
}
}
|
package parser
import (
"regexp"
"strconv"
"strings"
)
func MemorableCharacters(body []byte, memorableCharacters string) [3]string {
memSplit := strings.Split(memorableCharacters, "")
bodyStr := string(body)
characters := []string{
regexp.MustCompile(`memInfo1\">Character (\d+)`).FindStringSubmatch(bodyStr)[1],
regexp.MustCompile(`memInfo2\">Character (\d+)`).FindStringSubmatch(bodyStr)[1],
regexp.MustCompile(`memInfo3\">Character (\d+)`).FindStringSubmatch(bodyStr)[1],
}
var characterIndexes [3]int
for i, char := range characters {
val, _ := strconv.Atoi(char)
characterIndexes[i] = val - 1
}
return [3]string{
memSplit[characterIndexes[0]],
memSplit[characterIndexes[1]],
memSplit[characterIndexes[2]],
}
}
func SubmitToken(body []byte) string {
submitRegex := regexp.MustCompile(`name="submitToken" value="(\d+)`)
return submitRegex.FindStringSubmatch(string(body))[1]
}
|
package metrocard
import (
// "fmt"
)
func Lifecycle(value, cost float64) ([]float64) {
balances := make([]float64, (int(value/cost) + 1))
for i := 0; value >= 0; i++ {
balances[i] = value
value -= cost
}
return balances
}
|
/**
*@Author: haoxiongxiao
*@Date: 2019/3/20
*@Description: CREATE GO FILE admin
*/
package admin
import (
"bysj/services"
"bysj/web/middleware"
"github.com/kataras/iris"
"github.com/spf13/cast"
)
type AuthController struct {
Ctx iris.Context
Service *services.AuthServices
Common
}
func NewAuthController() *AuthController {
return &AuthController{Service: services.NewAuthServices()}
}
func (this *AuthController) PostLogin() {
m := make(map[string]interface{})
if err := this.Ctx.ReadJSON(&m); err != nil {
this.ReturnJson(10001, cast.ToString(err))
return
}
if user, err := this.Service.AdminLogin(m); err != nil {
this.ReturnJson(10002, cast.ToString(err))
return
} else {
token := middleware.GenrateAdminToken(&user)
result := make(map[string]interface{})
result["code"] = 10000
result["message"] = "success"
result["token"] = token
result["userinfo"] = user
this.Ctx.JSON(result)
return
}
}
|
package main
import (
"github.com/kataras/iris/v12"
"github.com/kataras/iris/v12/mvc"
"log"
)
func newApp() *iris.Application {
app := iris.Default()
mvc.New(app.Party("/")).Handle(&lotteryController{})
return app
}
func main() {
app := newApp()
err := app.Run(iris.Addr(":8081"), iris.WithoutServerError(iris.ErrServerClosed))
if err != nil {
log.Fatal(err)
}
}
|
package leetcode
import "testing"
func TestUniqueOccurrences(t *testing.T) {
if uniqueOccurrences([]int{1, 2, 2, 1, 1, 3}) != true {
t.Fatal()
}
if uniqueOccurrences([]int{1, 2}) != false {
t.Fatal()
}
if uniqueOccurrences([]int{-3, 0, 1, -3, 1, 1, 1, -3, 10, 0}) != true {
t.Fatal()
}
}
|
package gothreat
import (
"encoding/json"
)
type AntiVirusData struct {
ResponseCode string `json:"response_code"`
Md5 string `json:"md5"`
Sha1 string `json:"sha1"`
Scans []string `json:"scans"`
Ips []string `json:"ips"`
Domains []string `json:"domains"`
References []string `json:"references"`
Permalink string `json:"permalink"`
}
func AntiVirusReportRaw(av string) ([]byte, error) {
return process_report("antivirus", av)
}
func AntiVirusReport(av string) (AntiVirusData, error) {
var avData AntiVirusData
data, err := AntiVirusReportRaw(av)
if err != nil {
return avData, err
}
json.Unmarshal(data, &avData)
return avData, nil
}
|
package main
import "fmt"
func uniqueMorseRepresentations(words []string) int {
morselist := []string{".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."}
morseset := make(map[string]int)
for _, word := range words {
morse := ""
for _, rune := range word {
morse += morselist[int(rune) - 97]
}
morseset[morse]++
}
return len(morseset)
}
func main() {
words := []string{"gin", "zen", "gig", "msg"}
uniquecount := uniqueMorseRepresentations(words)
fmt.Println(uniquecount)
}
|
package jpush
type AdminRequest struct {
AppName string `json:"app_name,string"`
AndroidPackage string `json:"android_package,string"`
GroupName string `json:"group_name,string"`
}
|
package main
import (
"bufio"
"fmt"
"github.com/tidwall/gjson"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
"time"
)
func main() {
usernameList, err := readLines("usernames.txt")
if err != nil {
log.Fatalf("readLines: %s", err)
}
file, err := os.Create("available.txt")
if err != nil {
fmt.Println(err)
}
fmt.Println("Enter function number (1..3)")
reader := bufio.NewReader(os.Stdin)
char, _, err := reader.ReadRune()
if err != nil {
fmt.Println(err)
}
fmt.Println(string(char), "has been pressed...")
switch char {
case '1':
for _, line := range usernameList {
time.Sleep(500 * time.Millisecond)
if checkUsername1(line) == true {
fmt.Println(line, "IS AVAILABLE TO REGISTER!!!")
file.WriteString(line + "\r\n")
} else {
fmt.Println(line, "is not available to register...")
}
}
break
case '2':
for _, line := range usernameList {
time.Sleep(500 * time.Millisecond)
if checkUsername2(line) == true {
fmt.Println(line, "IS AVAILABLE TO REGISTER!!!")
file.WriteString(line + "\r\n")
} else {
fmt.Println(line, "is not available to register...")
}
}
break
case '3':
for _, line := range usernameList {
time.Sleep(500 * time.Millisecond)
if checkUsername3(line) == true {
fmt.Println(line, "IS AVAILABLE TO REGISTER!!!")
file.WriteString(line + "\r\n")
} else {
fmt.Println(line, "is not available to register...")
}
}
}
file.Close()
fmt.Println("\nPress 'Enter' to close...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
//fmt.Println(checkUsername1(line))
//fmt.Println(checkUsername2(line))
//fmt.Println(checkUsername3(line))
}
func checkUsername1(username string) bool {
resp, err := http.Get("https://mixer.com/api/v1/channels?scope=names&limit=1&q=" + username)
if err != nil {
// handle error
}
bodyBytes, _ := ioutil.ReadAll(resp.Body)
bodyString := string(bodyBytes)
resp.Body.Close()
usernameStatus := gjson.Get(bodyString, "0.user.username").String()
if strings.ToLower(username) == strings.ToLower(usernameStatus) {
return false // already registered
} else {
return true // available to register
}
}
// alternate ways of checking username availability
func checkUsername2(username string) bool {
resp, err := http.Get("https://mixer.com/api/v1/channels/" + username)
if err != nil {
// handle error
}
bodyBytes, _ := ioutil.ReadAll(resp.Body)
bodyString := string(bodyBytes)
resp.Body.Close()
usernameStatus := gjson.Get(bodyString, "token").String()
if strings.ToLower(username) == strings.ToLower(usernameStatus) {
return false // already registered
} else {
return true // available to register
}
}
func checkUsername3(username string) bool {
resp, err := http.Get("https://mixer.com/api/v1/channels/" + username)
if err != nil {
// handle error
}
bodyBytes, _ := ioutil.ReadAll(resp.Body)
bodyString := string(bodyBytes)
resp.Body.Close()
usernameStatus := gjson.Get(bodyString, "message").String()
if "Channel not found." != usernameStatus {
return false // already registered
} else {
return true // available to register
}
}
func readLines(path string) ([]string, error) {
// https://stackoverflow.com/a/18479916
file, err := os.Open(path)
if err != nil {
return nil, err
}
var lines []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
file.Close()
return lines, scanner.Err()
}
|
package logger
import (
"log"
"go.uber.org/zap"
)
// Logger zap logger instance
var Logger *zap.Logger
func init() {
l, err := zap.NewProduction()
if err != nil {
log.Fatalln(err)
}
Logger = l
}
|
package service
import (
"context"
"sync"
raEvents "github.com/go-ocf/cloud/resource-aggregate/cqrs/events"
pbRA "github.com/go-ocf/cloud/resource-aggregate/pb"
"github.com/go-ocf/cqrs/event"
"github.com/go-ocf/cqrs/eventstore"
httpUtils "github.com/go-ocf/kit/net/http"
)
type resourceCtx struct {
lock sync.Mutex
snapshot *raEvents.ResourceStateSnapshotTaken
}
func NewResourceCtx() func(context.Context) (eventstore.Model, error) {
return func(context.Context) (eventstore.Model, error) {
return &resourceCtx{
snapshot: raEvents.NewResourceStateSnapshotTaken(func(string, string) error { return nil }),
}, nil
}
}
func (m *resourceCtx) cloneLocked() *resourceCtx {
ra := raEvents.NewResourceStateSnapshotTaken(func(string, string) error { return nil })
ra.ResourceStateSnapshotTaken.LatestResourceChange = m.snapshot.LatestResourceChange
ra.ResourceStateSnapshotTaken.EventMetadata = m.snapshot.EventMetadata
ra.ResourceStateSnapshotTaken.Resource = m.snapshot.Resource
ra.ResourceStateSnapshotTaken.TimeToLive = m.snapshot.TimeToLive
ra.ResourceStateSnapshotTaken.IsPublished = m.snapshot.IsPublished
ra.ResourceStateSnapshotTaken.Id = m.snapshot.Id
return &resourceCtx{
snapshot: ra,
}
}
func (m *resourceCtx) Clone() *resourceCtx {
m.lock.Lock()
defer m.lock.Unlock()
return m.cloneLocked()
}
func (m *resourceCtx) Handle(ctx context.Context, iter event.Iter) error {
m.lock.Lock()
defer m.lock.Unlock()
return m.snapshot.Handle(ctx, iter)
}
func (m *resourceCtx) SnapshotEventType() string {
return httpUtils.ProtobufContentType(&pbRA.ResourceStateSnapshotTaken{})
}
|
package services
import (
"os"
"sync"
"sort"
"log"
"example.com/finder/models"
)
var dirPath string
func GetFiles(dirPath string) ([]os.FileInfo, error) {
f, err := os.Open(dirPath)
if err != nil {
return []os.FileInfo{}, err
}
files, err := f.Readdir(-1)
f.Close()
if err != nil {
return []os.FileInfo{}, err
}
return files, err
}
func GetEntitiesOrderedBySizeFromPath(path string) ([]models.FinderEntity, int64, error) {
dirPath = path
files, err := GetFiles(dirPath)
if err != nil {
return []models.FinderEntity{}, int64(0), err
}
in := entityChan(files)
//build as many routines as is appropriate
c1, e1 := getSizeChan(in)
c2, e2 := getSizeChan(in)
c3, e3 := getSizeChan(in)
// if any of routines returned an error, signal other routines to stop maybe? Depends on business logic
if len(e1) != 0 || len(e2) !=0 || len(e3) != 0 {
log.Println("calculation is not complete due to an error.")
}
var totalSize int64
var entities []models.FinderEntity
for entity := range merge(c1, c2, c3) {
entities = append(entities, entity)
totalSize += entity.Size
}
sort.Slice(entities, func(i, j int) bool {
return entities[i].Size < entities[j].Size
})
return entities, totalSize, nil
}
func entityChan(files []os.FileInfo) <-chan models.FinderEntity {
out := make(chan models.FinderEntity)
go func() {
for _, file := range files {
size := file.Size()
entity := models.FinderEntity{
file,
file.Name(),
size,
models.GetHumanReadableSize(size),
dirPath,
file.ModTime().Local().Format("Mon Jan 2 15:04:05 MST 2006"),
}
out <- entity
}
close(out)
}()
return out
}
func getSizeChan(inChan <-chan models.FinderEntity) (<-chan models.FinderEntity, <-chan error) {
outChan := make(chan models.FinderEntity)
errChan := make(chan error)
go func() {
for entity := range inChan {
err := entity.SetActualSize()
if err != nil {
errChan <- err
}
outChan <- entity
}
close(outChan)
close(errChan)
}()
return outChan, errChan
}
func merge(cs ...<-chan models.FinderEntity) <-chan models.FinderEntity {
var wg sync.WaitGroup
out := make(chan models.FinderEntity)
output := func(c <-chan models.FinderEntity) {
for n := range c {
out <- n
}
wg.Done()
}
wg.Add(len(cs))
for _, c := range cs {
go output(c)
}
go func() {
wg.Wait()
close(out)
}()
return out
} |
/*
* @lc app=leetcode.cn id=63 lang=golang
*
* [63] 不同路径 II
*/
// @lc code=start
package main
import "fmt"
func main() {
a := [][]int {
{0,0,1},
{0,0,0},
{0,0,0},
}
fmt.Println(uniquePathsWithObstacles(a))
}
func uniquePathsWithObstacles(obstacleGrid [][]int) int {
n := len(obstacleGrid)
m := len(obstacleGrid[0])
states := make([][]int, n)
for i := 0 ; i < n ; i++ {
states[i] = make([]int, m)
}
for j := 0 ; j < m ; j++ {
if obstacleGrid[0][j] == 1 {
states[0][j] = 0
} else if j == 0 {
states[0][j] = 1
} else {
states[0][j] = states[0][j-1]
}
}
for i := 0 ; i < n ; i ++ {
if obstacleGrid[i][0] == 1 {
states[i][0] = 0
} else if i == 0 {
states[i][0] = 1
} else {
states[i][0] = states[i-1][0]
}
}
for i := 1 ; i < n ; i++ {
for j := 1; j < m ; j++ {
if obstacleGrid[i][j] == 1 {
states[i][j] = 0
} else {
states[i][j]=states[i-1][j] + states[i][j-1]
}
}
}
return states[n-1][m-1]
}
// @lc code=end
|
package com
import (
"fmt"
"gopkg.in/go-playground/validator.v9"
"log"
"testing"
)
func Test_Validation(t *testing.T) {
type Address struct {
Street string `validate:"required"`
City string `validate:"required"`
Planet string `validate:"required"`
Phone string `validate:"required"`
}
type User struct {
FirstName string `validate:"required"`
LastName string `validate:"required"`
Age uint8 `validate:"gte=0,lte=130"`
Email string `validate:"required,email"`
FavouriteColor string `validate:"hexcolor|rgb|rgba"`
Addresses []*Address `validate:"required,dive,required"` // a person can have a home and cottage...
}
validate := validator.New()
//addr := Address{
// Street: "Eavesdown Docks",
// Planet: "Persphone",
// Phone: "none",
//}
addrMap := map[string]string{
"street": "Eavesdown",
"planet": "Persphone",
"phone": "none",
}
log.Printf("%+v\n", addrMap)
err := validate.Struct(addrMap)
if err != nil {
if vErr, ok := err.(validator.ValidationErrors); ok {
for _, v := range vErr {
fmt.Printf("%+v\n",v.Field())
}
}
t.Fatalf("could not validate struct: %+v", err)
}
} |
package main
import "fmt"
func do(i interface{}){
switch i.(type) {
case int:
fmt.Println("int type")
case string:
fmt.Println("string type")
default:
fmt.Printf("unknown type %T\n", i)
}
}
func main() {
do(12)
do("Ayo")
do(4 + 5i)
do(false)
} |
package alibabacloud
import (
"sort"
survey "github.com/AlecAivazis/survey/v2"
"github.com/AlecAivazis/survey/v2/core"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// GetBaseDomain returns a base domain chosen from among the account's domains.
func GetBaseDomain() (string, error) {
client, err := NewClient(defaultRegion)
if err != nil {
return "", err
}
logrus.Debugf("listing Alibaba Cloud domains")
resp, err := client.ListDNSDomain()
if err != nil {
return "", err
}
domains := []string{}
for _, domain := range resp.Domains.Domain {
domains = append(domains, domain.DomainName)
}
sort.Strings(domains)
if len(domains) == 0 {
return "", errors.New("no domain found")
}
var basedomain string
if err := survey.AskOne(
&survey.Select{
Message: "Base Domain",
Help: "The base domain of the cluster. All DNS records will be sub-domains of this base and will also include the cluster name.\n\nIf you don't see you intended base-domain listed, create a new domain and rerun the installer.",
Options: domains,
},
&basedomain,
survey.WithValidator(func(ans interface{}) error {
choice := ans.(core.OptionAnswer).Value
i := sort.SearchStrings(domains, choice)
if i == len(domains) || domains[i] != choice {
return errors.Errorf("invalid base domain %q", choice)
}
return nil
}),
); err != nil {
return "", errors.Wrap(err, "failed UserInput")
}
return basedomain, nil
}
|
package testutils
import (
"io/ioutil"
"os"
"github.com/mitchellh/go-homedir"
)
// NewTempDir constructs a new temporary directory
// and returns the directory name along with a cleanup function
// or any error that occurred during the process
func NewTempDir(name string) (string, func(), error) {
dir, err := ioutil.TempDir("", name)
if err != nil {
return "", nil, err
}
return dir, func() { os.RemoveAll(dir) }, nil
}
// SetupHomeDir sets up the $HOME directory for a test
// and returns the directory name along with a reset function
func SetupHomeDir(newHome string) (string, func()) {
origHome := os.Getenv("HOME")
if newHome == "" {
newHome = "."
}
homedir.DisableCache = true
_ = os.Setenv("HOME", newHome)
return newHome, func() {
homedir.DisableCache = false
_ = os.Setenv("HOME", origHome)
}
}
|
package gosupervisor
// SupervisorRPC ...
type SupervisorRPC struct {
URL string
}
// New ...
func New(url string) *SupervisorRPC {
return &SupervisorRPC{URL: url}
}
|
/*
This file handles configuration info for a DVID datastore and its serialization
to files as well as the keys to be used to store values in the key/value store.
*/
package datastore
import (
"encoding/json"
"fmt"
_ "log"
"github.com/janelia-flyem/dvid/dvid"
"github.com/janelia-flyem/dvid/storage"
)
const (
// ConfigFilename is name of a file with datastore configuration data
// just for human inspection.
ConfigFilename = "dvid-config.json"
)
var (
// KeyConfig is the key for a DVID configuration
KeyConfig = storage.Key{
Dataset: storage.KeyDatasetGlobal,
Version: storage.KeyVersionGlobal,
Index: []byte{0x01},
}
// KeyVersionDAG is the key for a Version DAG.
KeyVersionDAG = storage.Key{
Dataset: storage.KeyDatasetGlobal,
Version: storage.KeyVersionGlobal,
Index: []byte{0x02},
}
)
// runtimeConfig holds editable configuration data for a datastore instance.
type runtimeConfig struct {
// Data supported. This is a map of a user-defined name like "fib_data" with
// the supporting data type "grayscale8"
Datasets map[DatasetString]DatasetService
// Always incremented counter that provides local dataset ID so we can use
// smaller # of bytes (dvid.LocalID size) instead of full identifier.
NewLocalID dvid.LocalID
}
// Get retrieves a configuration from a KeyValueDB.
func (config *runtimeConfig) Get(db storage.KeyValueDB) (err error) {
// Get data
var data []byte
data, err = db.Get(KeyConfig)
if err != nil {
return
}
// Deserialize into object
err = config.Deserialize(dvid.Serialization(data))
return
}
// Put stores a configuration into a KeyValueDB.
func (config *runtimeConfig) Put(db storage.KeyValueDB) (err error) {
// Get serialization
var serialization dvid.Serialization
serialization, err = config.Serialize()
if err != nil {
return
}
// Put data
return db.Put(KeyConfig, []byte(serialization))
}
// Serialize returns a serialization of configuration with Snappy compression and
// CRC32 checksum.
func (config *runtimeConfig) Serialize() (s dvid.Serialization, err error) {
return dvid.Serialize(config, dvid.Snappy, dvid.CRC32)
}
// Deserialize converts a serialization to a runtime configuration and checks to
// make sure the data types are available.
// TODO -- Handle versions of data types.
func (config *runtimeConfig) Deserialize(s dvid.Serialization) (err error) {
err = dvid.Deserialize(s, config)
if err != nil {
return
}
err = config.VerifyCompiledTypes()
return
}
// VerifyCompiledTypes will return an error if any required data type in the datastore
// configuration was not compiled into DVID executable. Check is done by more exact
// URL and not the data type name.
func (config *runtimeConfig) VerifyCompiledTypes() error {
var errMsg string
for name, datatype := range config.Datasets {
_, found := CompiledTypes[datatype.DatatypeUrl()]
if !found {
errMsg += fmt.Sprintf("DVID was not compiled with support for %s, data type %s [%s]\n",
name, datatype.DatatypeName(), datatype.DatatypeUrl())
}
}
if errMsg != "" {
return fmt.Errorf(errMsg)
}
return nil
}
// DataChart returns a chart of data set names and their types for this runtime configuration.
func (config *runtimeConfig) DataChart() string {
var text string
if len(config.Datasets) == 0 {
return " No data sets have been added to this datastore.\n Use 'dvid dataset ...'"
}
writeLine := func(name DatasetString, version string, url UrlString) {
text += fmt.Sprintf("%-15s %-25s %s\n", name, version, url)
}
writeLine("Name", "Type Name", "Url")
for name, dtype := range config.Datasets {
writeLine(name, dtype.DatatypeName()+" ("+dtype.DatatypeVersion()+")", dtype.DatatypeUrl())
}
return text
}
// About returns a chart of the code versions of compile-time DVID datastore
// and the runtime data types.
func (config *runtimeConfig) About() string {
var text string
writeLine := func(name, version string) {
text += fmt.Sprintf("%-15s %s\n", name, version)
}
writeLine("Name", "Version")
writeLine("DVID datastore", Version)
writeLine("Storage backend", storage.Version)
for _, dtype := range config.Datasets {
writeLine(dtype.DatatypeName(), dtype.DatatypeVersion())
}
return text
}
// AboutJSON returns the components and versions of DVID software.
func (config *runtimeConfig) AboutJSON() (jsonStr string, err error) {
data := map[string]string{
"DVID datastore": Version,
"Storage backend": storage.Version,
}
for _, datatype := range config.Datasets {
data[datatype.DatatypeName()] = datatype.DatatypeVersion()
}
m, err := json.Marshal(data)
if err != nil {
return
}
jsonStr = string(m)
return
}
// TypeInfo contains data type information reformatted for easy consumption by clients.
type TypeInfo struct {
Name string
Url string
Version string
Help string
}
// ConfigJSON returns configuration data in JSON format.
func (config *runtimeConfig) ConfigJSON() (jsonStr string, err error) {
datasets := make(map[DatasetString]TypeInfo)
for name, dtype := range config.Datasets {
datasets[name] = TypeInfo{
Name: dtype.DatatypeName(),
Url: string(dtype.DatatypeUrl()),
Version: dtype.DatatypeVersion(),
Help: dtype.Help(),
}
}
data := struct {
Datasets map[DatasetString]TypeInfo
}{
datasets,
}
m, err := json.Marshal(data)
if err != nil {
return
}
jsonStr = string(m)
return
}
|
package commands
import (
"fmt"
"github.com/cuichenli/doing/model"
"github.com/spf13/cobra"
)
var showCommand = &cobra.Command{
Use: "show",
Short: "Show all records",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
records, err := getExistingRecords()
if err != nil {
return err
}
return show(&records)
},
}
var show = func(recordList *model.RecordList) error {
records := recordList.GetAllRecords()
for _, record := range *records {
str, err := record.ToDisplayString()
if err != nil {
return err
}
fmt.Println(str)
}
return nil
}
|
// Copyright 2021 Clivern. All rights reserved.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
package service
import (
"fmt"
"strings"
"github.com/manifoldco/promptui"
)
// Prompt struct
type Prompt struct {
}
// NotEmpty returns error if input is empty
func NotEmpty(input string) error {
if strings.TrimSpace(input) == "" {
return fmt.Errorf("Input must not be empty")
}
return nil
}
// Optional optional value
func Optional(_ string) error {
return nil
}
// IsEmpty if field is empty
func IsEmpty(input string) bool {
if strings.TrimSpace(input) == "" {
return true
}
return false
}
// Input request a value from end user
func (p *Prompt) Input(label string, validate promptui.ValidateFunc) (string, error) {
templates := &promptui.PromptTemplates{
Prompt: "{{ . }} ",
Valid: "{{ . | green }} ",
Invalid: "{{ . | red }} ",
Success: "{{ . | bold }} ",
}
item := promptui.Prompt{
Label: label,
Templates: templates,
Validate: validate,
}
result, err := item.Run()
if err != nil {
return "", err
}
return result, nil
}
// Select request a value from a list from end user
func (p *Prompt) Select(label string, items []string) (string, error) {
item := promptui.Select{
Label: label,
Items: items,
}
_, result, err := item.Run()
if err != nil {
return "", err
}
return result, nil
}
|
package smg
import (
"encoding/json"
"errors"
"os"
"os/signal"
"path"
"path/filepath"
"syscall"
"github.com/sirupsen/logrus"
"github.com/geniuscirno/smg/configurator"
"github.com/geniuscirno/smg/registrator"
)
type EnvironmentType int
const (
EEnvironmentTypeInvaild EnvironmentType = iota
EEnvironmentTypePublic
EEnvironmentTypeBeta
EEnvironmentTypeDev
EEnvironmentTypeMax
)
type Server interface {
Serve() error
}
type applicationOptions struct {
registerEndpoint *registrator.Endpoint
includeCfgPattern []string
ignoreCfgPattern []string
prefix string
cfgPath string
nameSpace string
}
type ApplicationOption func(*applicationOptions)
func WithRegistrator(nameSpace string, advertiseAddr string, meta interface{}) ApplicationOption {
return func(o *applicationOptions) {
o.registerEndpoint = ®istrator.Endpoint{
Addr: advertiseAddr,
Meta: meta,
}
o.nameSpace = nameSpace
}
}
func WithIncludeCfgFile(pattern []string) ApplicationOption {
return func(o *applicationOptions) {
o.includeCfgPattern = pattern
}
}
func WithIgnoreCfgFile(pattern []string) ApplicationOption {
return func(o *applicationOptions) {
o.ignoreCfgPattern = pattern
}
}
func WithPrefix(prefix string) ApplicationOption {
return func(o *applicationOptions) {
o.prefix = prefix
}
}
func WithCfgPath(cfgPath string) ApplicationOption {
return func(o *applicationOptions) {
o.cfgPath = cfgPath
}
}
type Config struct {
RegistryUrl string `json:"registry-url"`
ResolverUrl string `json:"resolver-url"`
Environment string `json:"environment"`
Mongo string `json:"mongo"`
RedisDB map[string]string `json:"redis-db"`
RedisCache map[string]string `json:"redis-cache"`
}
func (c *Config) Load(b []byte) error {
return json.Unmarshal(b, c)
}
func (c *Config) String() string {
b, _ := json.Marshal(c)
return string(b)
}
type Application interface {
Environment() EnvironmentType
Run(Server) error
Load(string, configurator.Loader) error
Watch(string) (configurator.Watcher, error)
ApplicationUrl(string, string) string
Cfg() *Config
}
type application struct {
opts applicationOptions
appRegistrator *appRegistratorWarpper
appConfigurator *appConfiguratorWarpper
parsedTarget configurator.Target
cfg *Config
name string
version string
}
func NewApplication(name string, version string, url string, opts ...ApplicationOption) (app *application, err error) {
app = &application{name: name, cfg: &Config{}, version: version}
for _, opt := range opts {
opt(&app.opts)
}
if app.opts.prefix == "" {
wd, err := os.Getwd()
if err != nil {
return nil, err
}
app.opts.prefix = wd
}
if len(app.opts.includeCfgPattern) == 0 {
app.opts.includeCfgPattern = []string{"*.*", "*"}
}
parsedTarget, err := parseConfiguratorTarget(url)
if err != nil {
return nil, err
}
app.parsedTarget = parsedTarget
app.appConfigurator, err = newAppConfiguratorWarpper(app)
if err != nil {
return nil, err
}
if err := app.loadApplicationCfg(); err != nil {
return nil, err
}
if app.Environment() == EEnvironmentTypeInvaild {
return nil, errors.New("invalid environment type")
}
if app.opts.registerEndpoint != nil {
app.appRegistrator, err = newAppRegistratorWarpper(app)
if err != nil {
return nil, err
}
}
return app, nil
}
func (app *application) Environment() EnvironmentType {
switch app.cfg.Environment {
case "public":
return EEnvironmentTypePublic
case "beta":
return EEnvironmentTypeBeta
case "dev":
return EEnvironmentTypeDev
default:
return EEnvironmentTypeInvaild
}
}
func (app *application) Run(server Server) error {
if app.appRegistrator != nil {
if err := app.appRegistrator.Register(); err != nil {
return err
}
defer app.appRegistrator.Degister()
}
go server.Serve()
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL)
select {
case <-ch:
logrus.Info("signal captured, exit.")
}
return nil
}
func (app *application) GlobalCfgRoot() string {
return filepath.ToSlash(path.Join(app.parsedTarget.Endpoint, "cfg"))
}
func (app *application) CfgRoot() string {
if app.opts.cfgPath == "" {
return filepath.ToSlash(path.Join(app.GlobalCfgRoot(), app.name, app.version))
}
return filepath.ToSlash(path.Join(app.GlobalCfgRoot(), app.opts.cfgPath, app.version))
}
func (app *application) loadApplicationCfg() error {
return app.appConfigurator.configurator.Load(path.Join(app.GlobalCfgRoot(), "application", "default"), app.cfg)
}
func (app *application) Load(file string, v configurator.Loader) error {
return app.appConfigurator.configurator.Load(path.Join(app.CfgRoot(), file), v)
}
func (app *application) Watch(file string) (configurator.Watcher, error) {
return app.appConfigurator.configurator.Watch(path.Join(app.CfgRoot(), file))
}
func (app *application) ApplicationUrl(nameSpace string, name string) string {
return app.cfg.RegistryUrl + "/" + path.Join("registry", nameSpace, name)
}
func (app *application) Cfg() *Config {
return app.cfg
}
|
package heuristics
func tScore(puzzle []int, nb int, x int, y int, size int) float32 {
nb1 := puzzle[get1d(x, y, size)]
if nb == nb1 {
return 0
}
return 1
}
// Tiles out-of place
func toop(grid []int, size int, depth int) float32 {
var score float32
for x := 0; x < size; x++ {
for y := 0; y < size; y++ {
val := grid[get1d(x, y, size)]
if val != 0 {
score += tScore(finalState, val, x, y, size)
}
}
}
return score
}
func toopA(grid []int, size int, depth int) float32 {
return toop(grid, size, depth) + float32(depth)
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//110. Balanced Binary Tree
//Given a binary tree, determine if it is height-balanced.
//For this problem, a height-balanced binary tree is defined as:
//a binary tree in which the depth of the two subtrees of every node never differ by more than 1.
//Example 1:
//Given the following tree [3,9,20,null,null,15,7]:
// 3
// / \
// 9 20
// / \
// 15 7
//Return true.
//Example 2:
//Given the following tree [1,2,2,3,3,null,null,4,4]:
// 1
// / \
// 2 2
// / \
// 3 3
// / \
// 4 4
//Return false.
///**
// * Definition for a binary tree node.
// * type TreeNode struct {
// * Val int
// * Left *TreeNode
// * Right *TreeNode
// * }
// */
//func isBalanced(root *TreeNode) bool {
//}
// Time Is Money |
package httpserver
import (
"net/http"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"go.uber.org/zap"
)
type options struct {
//TODO add storage interface
logger *zap.Logger
}
// Option server options
type Option interface {
apply(*options)
}
type loggerOption struct {
Log *zap.Logger
}
var (
logger *zap.SugaredLogger
metricReqProcessed = promauto.NewCounter(prometheus.CounterOpts{
Name: "calculator_request_processed_total",
Help: "The total number of processed requests",
})
)
// HandleHello handle /hello URL
func handleHello(w http.ResponseWriter, r *http.Request) {
_, err := w.Write([]byte("Hello"))
metricReqProcessed.Inc()
if err != nil {
logger.Error("Error while writing response: %v", err)
}
logger.Infof("Request processed from %s", r.RemoteAddr)
}
// StartServer start http server
func StartServer(address string, opts ...Option) error {
options := options{
logger: zap.NewNop(),
}
for _, o := range opts {
o.apply(&options)
}
logger = options.logger.Sugar()
http.HandleFunc("/hello", handleHello)
return http.ListenAndServe(address, nil)
}
// WithLogger apply logger
func WithLogger(log *zap.Logger) Option {
return loggerOption{Log: log}
}
func (l loggerOption) apply(opts *options) {
opts.logger = l.Log
}
|
package main
//import fmt "fmt" // Пакет, реализующий форматированный ввод-вывод
func main() {
//fmt.Printf("Hello, world; Привет, мир; или Καλημέρα κόσμε; или こんにちは 世界\n")
// fmt.Printf("Hello, world\n");
}
|
package exchange
import (
"io/ioutil"
"testing"
"github.com/ghodss/yaml"
"github.com/test-go/testify/require"
)
func TestHandlePairSyncEvent(t *testing.T) {
t.Skipf("ski pair sync event")
testCase := &TestCase{}
pairSyncEventYaml, err := ioutil.ReadFile("./testdata/TestHandlePairSyncEvent.yaml")
require.NoError(t, err)
err = yaml.Unmarshal(pairSyncEventYaml, &testCase)
require.NoError(t, err)
intrinsics := NewTestIntrinsics(testCase)
sg := NewTestSubgraph(intrinsics)
err = sg.Init()
require.NoError(t, err)
for _, ev := range testCase.Events {
err := sg.HandleEvent(ev.Event)
if err != nil {
t.Errorf(err.Error())
}
}
}
func TestGetBNBPriceInUSD_BUSDOnly(t *testing.T) {
t.Skipf("ski pair sync event")
testCase := &TestCase{}
storeYaml := []byte(`---
storeData:
- type: pair
entity:
id: "0x58f876857a02d6762e0101bb5c46a8c1ed44dc16"
name: "BUSD-WBNB"
token1Price: "10.00"
reserve0: "100"
`)
err := yaml.Unmarshal(storeYaml, &testCase)
require.NoError(t, err)
intrinsics := NewTestIntrinsics(testCase)
sg := NewTestSubgraph(intrinsics)
err = sg.Init()
require.NoError(t, err)
res, err := sg.GetBnbPriceInUSD()
require.NoError(t, err)
resFloat, _ := res.Float64()
require.InEpsilon(t, resFloat, 10.00, 0.0001)
}
func TestGetBNBPriceInUSD_USDTOnly(t *testing.T) {
t.Skipf("ski pair sync event")
testCase := &TestCase{}
storeYaml := []byte(`---
storeData:
- type: pair
entity:
id: "0x16b9a82891338f9ba80e2d6970fdda79d1eb0dae"
name: "USDT-WBNB"
token0Price: "5.00"
reserve1: "50"
`)
err := yaml.Unmarshal(storeYaml, &testCase)
require.NoError(t, err)
intrinsics := NewTestIntrinsics(testCase)
sg := NewTestSubgraph(intrinsics)
err = sg.Init()
require.NoError(t, err)
res, err := sg.GetBnbPriceInUSD()
require.NoError(t, err)
resFloat, _ := res.Float64()
require.InEpsilon(t, resFloat, 5.00, 0.0001)
}
func TestGetBNBPriceInUSD_BothPairsExist(t *testing.T) {
t.Skipf("ski pair sync event")
testCase := &TestCase{}
storeYaml := []byte(`---
storeData:
- type: pair
entity:
id: "0x58f876857a02d6762e0101bb5c46a8c1ed44dc16"
name: "BUSD-WBNB"
token1Price: "10.00"
reserve0: "100"
- type: pair
entity:
id: "0x16b9a82891338f9ba80e2d6970fdda79d1eb0dae"
name: "USDT-WBNB"
token0Price: "5.00"
reserve1: "50"
`)
err := yaml.Unmarshal(storeYaml, &testCase)
require.NoError(t, err)
intrinsics := NewTestIntrinsics(testCase)
sg := NewTestSubgraph(intrinsics)
err = sg.Init()
require.NoError(t, err)
res, err := sg.GetBnbPriceInUSD()
require.NoError(t, err)
resFloat, _ := res.Float64()
require.InEpsilon(t, resFloat, 8.333333, 0.0001)
}
func TestGetBNBPriceInUSD_BothPairsExist_ZeroLiquidity(t *testing.T) {
t.Skipf("ski pair sync event")
testCase := &TestCase{}
storeYaml := []byte(`---
storeData:
- type: pair
entity:
id: "0x58f876857a02d6762e0101bb5c46a8c1ed44dc16"
name: "BUSD-WBNB"
token1Price: "10.00"
reserve0: "0"
- type: pair
entity:
id: "0x16b9a82891338f9ba80e2d6970fdda79d1eb0dae"
name: "USDT-WBNB"
token0Price: "5.00"
reserve1: "0"
`)
err := yaml.Unmarshal(storeYaml, &testCase)
require.NoError(t, err)
intrinsics := NewTestIntrinsics(testCase)
sg := NewTestSubgraph(intrinsics)
err = sg.Init()
require.NoError(t, err)
res, err := sg.GetBnbPriceInUSD()
require.NoError(t, err)
resFloat, _ := res.Float64()
require.Zero(t, resFloat)
}
|
package migration
import (
"database/sql"
"github.com/jinzhu/gorm"
"github.com/pressly/goose"
"tezos_index/puller/models"
)
func init() {
goose.AddMigration(Up20200910103420, Down20200910103420)
}
func Up20200910103420(tx *sql.Tx) error {
// This code is executed when the migration is applied.
db, err := gorm.Open("mysql", tx)
if err != nil {
return err
}
err = db.AutoMigrate(
&models.Account{}, &models.Block{}, &models.Chain{}, &models.Flow{},
&models.Contract{}, &models.Op{}, &models.Supply{}, &models.BigMapItem{},
&models.Election{}, &models.Proposal{}, &models.Vote{}, &models.Ballot{},
&models.Income{}, &models.Right{}, &models.Snapshot{}, &models.HarvesterStatus{}).Error
return err
}
func Down20200910103420(tx *sql.Tx) error {
// This code is executed when the migration is rolled back.
db, err := gorm.Open("mysql", tx)
if err != nil {
return err
}
err = db.DropTableIfExists(&models.Account{}, &models.Block{}, &models.Chain{}, &models.Flow{},
&models.Contract{}, &models.Op{}, &models.Supply{}, &models.BigMapItem{},
&models.Election{}, &models.Proposal{}, &models.Vote{}, &models.Ballot{},
&models.Income{}, &models.Right{}, &models.Snapshot{}, &models.HarvesterStatus{}).Error
return err
}
|
package charts
import "github.com/go-echarts/go-echarts/v2/opts"
type SingleSeries struct {
Name string `json:"name,omitempty"`
Type string `json:"type,omitempty"`
// Rectangular charts
Stack string `json:"stack,omitempty"`
XAxisIndex int `json:"xAxisIndex,omitempty"`
YAxisIndex int `json:"yAxisIndex,omitempty"`
// Bar
BarGap string `json:"barGap,omitempty"`
BarCategoryGap string `json:"barCategoryGap,omitempty"`
ShowBackground bool `json:"showBackground,omitempty"`
RoundCap bool `json:"roundCap,omitempty"`
// Bar3D
Shading string `json:"shading,omitempty"`
// Graph
Links interface{} `json:"links,omitempty"`
Layout string `json:"layout,omitempty"`
Force interface{} `json:"force,omitempty"`
Categories interface{} `json:"categories,omitempty"`
Roam bool `json:"roam,omitempty"`
EdgeSymbol interface{} `json:"edgeSymbol,omitempty"`
EdgeSymbolSize interface{} `json:"edgeSymbolSize,omitempty"`
EdgeLabel interface{} `json:"edgeLabel,omitempty"`
Draggable bool `json:"draggable,omitempty"`
FocusNodeAdjacency bool `json:"focusNodeAdjacency,omitempty"`
// KLine
BarWidth string `json:"barWidth,omitempty"`
BarMinWidth string `json:"barMinWidth,omitempty"`
BarMaxWidth string `json:"barMaxWidth,omitempty"`
// Line
Step interface{} `json:"step,omitempty"`
Smooth bool `json:"smooth"`
ConnectNulls bool `json:"connectNulls"`
ShowSymbol bool `json:"showSymbol"`
Symbol string `json:"symbol,omitempty"`
Color string `json:"color,omitempty"`
// Liquid
IsLiquidOutline bool `json:"outline,omitempty"`
IsWaveAnimation bool `json:"waveAnimation"`
// Map
MapType string `json:"map,omitempty"`
CoordSystem string `json:"coordinateSystem,omitempty"`
// Pie
RoseType interface{} `json:"roseType,omitempty"`
Center interface{} `json:"center,omitempty"`
Radius interface{} `json:"radius,omitempty"`
// Scatter
SymbolSize interface{} `json:"symbolSize,omitempty"`
// Tree
Orient string `json:"orient,omitempty"`
ExpandAndCollapse bool `json:"expandAndCollapse,omitempty"`
InitialTreeDepth int `json:"initialTreeDepth,omitempty"`
Leaves interface{} `json:"leaves,omitempty"`
Left string `json:"left,omitempty"`
Right string `json:"right,omitempty"`
Top string `json:"top,omitempty"`
Bottom string `json:"bottom,omitempty"`
// TreeMap
LeafDepth int `json:"leafDepth,omitempty"`
Levels interface{} `json:"levels,omitempty"`
UpperLabel interface{} `json:"upperLabel,omitempty"`
// WordCloud
Shape string `json:"shape,omitempty"`
SizeRange []float32 `json:"sizeRange,omitempty"`
RotationRange []float32 `json:"rotationRange,omitempty"`
// Sunburst
NodeClick string `json:"nodeClick,omitempty"`
Sort string `json:"sort,omitempty"`
RenderLabelForZeroData bool `json:"renderLabelForZeroData"`
SelectedMode bool `json:"selectedMode"`
Animation bool `json:"animation" default:"true"`
AnimationThreshold int `json:"animationThreshold,omitempty"`
AnimationDuration int `json:"animationDuration,omitempty"`
AnimationEasing string `json:"animationEasing,omitempty"`
AnimationDelay int `json:"animationDelay,omitempty"`
AnimationDurationUpdate int `json:"animationDurationUpdate,omitempty"`
AnimationEasingUpdate string `json:"animationEasingUpdate,omitempty"`
AnimationDelayUpdate int `json:"animationDelayUpdate,omitempty"`
// series data
Data interface{} `json:"data,omitempty"`
DatasetIndex int `json:"datasetIndex,omitempty"`
// series options
*opts.Encode `json:"encode,omitempty"`
*opts.ItemStyle `json:"itemStyle,omitempty"`
*opts.Label `json:"label,omitempty"`
*opts.LabelLine `json:"labelLine,omitempty"`
*opts.Emphasis `json:"emphasis,omitempty"`
*opts.MarkLines `json:"markLine,omitempty"`
*opts.MarkAreas `json:"markArea,omitempty"`
*opts.MarkPoints `json:"markPoint,omitempty"`
*opts.RippleEffect `json:"rippleEffect,omitempty"`
*opts.LineStyle `json:"lineStyle,omitempty"`
*opts.AreaStyle `json:"areaStyle,omitempty"`
*opts.TextStyle `json:"textStyle,omitempty"`
*opts.CircularStyle `json:"circular,omitempty"`
}
type SeriesOpts func(s *SingleSeries)
func WithSeriesAnimation(enable bool) SeriesOpts {
return func(s *SingleSeries) {
s.Animation = enable
}
}
// WithLabelOpts sets the label.
func WithLabelOpts(opt opts.Label) SeriesOpts {
return func(s *SingleSeries) {
s.Label = &opt
}
}
// WithEmphasisOpts sets the emphasis.
func WithEmphasisOpts(opt opts.Emphasis) SeriesOpts {
return func(s *SingleSeries) {
s.Emphasis = &opt
}
}
// WithAreaStyleOpts sets the area style.
func WithAreaStyleOpts(opt opts.AreaStyle) SeriesOpts {
return func(s *SingleSeries) {
s.AreaStyle = &opt
}
}
// WithItemStyleOpts sets the item style.
func WithItemStyleOpts(opt opts.ItemStyle) SeriesOpts {
return func(s *SingleSeries) {
s.ItemStyle = &opt
}
}
// WithRippleEffectOpts sets the ripple effect.
func WithRippleEffectOpts(opt opts.RippleEffect) SeriesOpts {
return func(s *SingleSeries) {
s.RippleEffect = &opt
}
}
// WithLineStyleOpts sets the line style.
func WithLineStyleOpts(opt opts.LineStyle) SeriesOpts {
return func(s *SingleSeries) {
s.LineStyle = &opt
}
}
// With CircularStyle Opts
func WithCircularStyleOpts(opt opts.CircularStyle) SeriesOpts {
return func(s *SingleSeries) {
s.CircularStyle = &opt
}
}
/* Chart Options */
// WithBarChartOpts sets the BarChart option.
func WithBarChartOpts(opt opts.BarChart) SeriesOpts {
return func(s *SingleSeries) {
s.Stack = opt.Stack
s.BarGap = opt.BarGap
s.BarCategoryGap = opt.BarCategoryGap
s.XAxisIndex = opt.XAxisIndex
s.YAxisIndex = opt.YAxisIndex
s.ShowBackground = opt.ShowBackground
s.RoundCap = opt.RoundCap
s.CoordSystem = opt.CoordSystem
}
}
// WithSunburstOpts sets the SunburstChart option.
func WithSunburstOpts(opt opts.SunburstChart) SeriesOpts {
return func(s *SingleSeries) {
s.NodeClick = opt.NodeClick
s.Sort = opt.Sort
s.RenderLabelForZeroData = opt.RenderLabelForZeroData
s.SelectedMode = opt.SelectedMode
s.Animation = opt.Animation
s.AnimationThreshold = opt.AnimationThreshold
s.AnimationDuration = opt.AnimationDuration
s.AnimationEasing = opt.AnimationEasing
s.AnimationDelay = opt.AnimationDelay
s.AnimationDurationUpdate = opt.AnimationDurationUpdate
s.AnimationEasingUpdate = opt.AnimationEasingUpdate
s.AnimationDelayUpdate = opt.AnimationDelayUpdate
}
}
// WithGraphChartOpts sets the GraphChart option.
func WithGraphChartOpts(opt opts.GraphChart) SeriesOpts {
return func(s *SingleSeries) {
s.Layout = opt.Layout
s.Force = opt.Force
s.Roam = opt.Roam
s.EdgeSymbol = opt.EdgeSymbol
s.EdgeSymbolSize = opt.EdgeSymbolSize
s.Draggable = opt.Draggable
s.FocusNodeAdjacency = opt.FocusNodeAdjacency
s.Categories = opt.Categories
s.EdgeLabel = opt.EdgeLabel
}
}
// WithHeatMapChartOpts sets the HeatMapChart option.
func WithHeatMapChartOpts(opt opts.HeatMapChart) SeriesOpts {
return func(s *SingleSeries) {
s.XAxisIndex = opt.XAxisIndex
s.YAxisIndex = opt.YAxisIndex
}
}
// WithLineChartOpts sets the LineChart option.
func WithLineChartOpts(opt opts.LineChart) SeriesOpts {
return func(s *SingleSeries) {
s.YAxisIndex = opt.YAxisIndex
s.Stack = opt.Stack
s.Smooth = opt.Smooth
s.ShowSymbol = opt.ShowSymbol
s.Symbol = opt.Symbol
s.SymbolSize = opt.SymbolSize
s.Step = opt.Step
s.XAxisIndex = opt.XAxisIndex
s.YAxisIndex = opt.YAxisIndex
s.ConnectNulls = opt.ConnectNulls
s.Color = opt.Color
}
}
// WithLineChartOpts sets the LineChart option.
func WithKlineChartOpts(opt opts.KlineChart) SeriesOpts {
return func(s *SingleSeries) {
s.BarWidth = opt.BarWidth
s.BarMinWidth = opt.BarMinWidth
s.BarMaxWidth = opt.BarMaxWidth
}
}
// WithPieChartOpts sets the PieChart option.
func WithPieChartOpts(opt opts.PieChart) SeriesOpts {
return func(s *SingleSeries) {
s.RoseType = opt.RoseType
s.Center = opt.Center
s.Radius = opt.Radius
}
}
// WithScatterChartOpts sets the ScatterChart option.
func WithScatterChartOpts(opt opts.ScatterChart) SeriesOpts {
return func(s *SingleSeries) {
s.XAxisIndex = opt.XAxisIndex
s.YAxisIndex = opt.YAxisIndex
}
}
// WithLiquidChartOpts sets the LiquidChart option.
func WithLiquidChartOpts(opt opts.LiquidChart) SeriesOpts {
return func(s *SingleSeries) {
s.Shape = opt.Shape
s.IsLiquidOutline = opt.IsShowOutline
s.IsWaveAnimation = opt.IsWaveAnimation
}
}
// WithBar3DChartOpts sets the Bar3DChart option.
func WithBar3DChartOpts(opt opts.Bar3DChart) SeriesOpts {
return func(s *SingleSeries) {
s.Shading = opt.Shading
}
}
// WithTreeOpts sets the TreeChart option.
func WithTreeOpts(opt opts.TreeChart) SeriesOpts {
return func(s *SingleSeries) {
s.Layout = opt.Layout
s.Orient = opt.Orient
s.ExpandAndCollapse = opt.ExpandAndCollapse
s.InitialTreeDepth = opt.InitialTreeDepth
s.Roam = opt.Roam
s.Label = opt.Label
s.Leaves = opt.Leaves
s.Right = opt.Right
s.Left = opt.Left
s.Top = opt.Top
s.Bottom = opt.Bottom
}
}
// WithTreeMapOpts sets the TreeMapChart options.
func WithTreeMapOpts(opt opts.TreeMapChart) SeriesOpts {
return func(s *SingleSeries) {
s.Animation = opt.Animation
s.LeafDepth = opt.LeafDepth
s.Roam = opt.Roam
s.Levels = opt.Levels
s.UpperLabel = opt.UpperLabel
s.Right = opt.Right
s.Left = opt.Left
s.Top = opt.Top
s.Bottom = opt.Bottom
}
}
// WithWorldCloudChartOpts sets the WorldCloudChart option.
func WithWorldCloudChartOpts(opt opts.WordCloudChart) SeriesOpts {
return func(s *SingleSeries) {
s.Shape = opt.Shape
s.SizeRange = opt.SizeRange
s.RotationRange = opt.RotationRange
}
}
// WithMarkLineNameTypeItemOpts sets the type of the MarkLine.
func WithMarkLineNameTypeItemOpts(opt ...opts.MarkLineNameTypeItem) SeriesOpts {
return func(s *SingleSeries) {
if s.MarkLines == nil {
s.MarkLines = &opts.MarkLines{}
}
for _, o := range opt {
s.MarkLines.Data = append(s.MarkLines.Data, o)
}
}
}
// WithMarkLineStyleOpts sets the style of the MarkLine.
func WithMarkLineStyleOpts(opt opts.MarkLineStyle) SeriesOpts {
return func(s *SingleSeries) {
if s.MarkLines == nil {
s.MarkLines = &opts.MarkLines{}
}
s.MarkLines.MarkLineStyle = opt
}
}
// WithMarkLineNameCoordItemOpts sets the coordinates of the MarkLine.
func WithMarkLineNameCoordItemOpts(opt ...opts.MarkLineNameCoordItem) SeriesOpts {
type MLNameCoord struct {
Name string `json:"name,omitempty"`
Coord []interface{} `json:"coord"`
}
return func(s *SingleSeries) {
if s.MarkLines == nil {
s.MarkLines = &opts.MarkLines{}
}
for _, o := range opt {
s.MarkLines.Data = append(
s.MarkLines.Data,
[]MLNameCoord{{Name: o.Name, Coord: o.Coordinate0}, {Coord: o.Coordinate1}},
)
}
}
}
// WithMarkLineNameXAxisItemOpts sets the X axis of the MarkLine.
func WithMarkLineNameXAxisItemOpts(opt ...opts.MarkLineNameXAxisItem) SeriesOpts {
return func(s *SingleSeries) {
if s.MarkLines == nil {
s.MarkLines = &opts.MarkLines{}
}
for _, o := range opt {
s.MarkLines.Data = append(s.MarkLines.Data, o)
}
}
}
// WithMarkLineNameYAxisItemOpts sets the Y axis of the MarkLine.
func WithMarkLineNameYAxisItemOpts(opt ...opts.MarkLineNameYAxisItem) SeriesOpts {
return func(s *SingleSeries) {
if s.MarkLines == nil {
s.MarkLines = &opts.MarkLines{}
}
for _, o := range opt {
s.MarkLines.Data = append(s.MarkLines.Data, o)
}
}
}
// WithMarkAreaNameTypeItemOpts sets the type of the MarkArea.
func WithMarkAreaNameTypeItemOpts(opt ...opts.MarkAreaNameTypeItem) SeriesOpts {
return func(s *SingleSeries) {
if s.MarkAreas == nil {
s.MarkAreas = &opts.MarkAreas{}
}
for _, o := range opt {
s.MarkAreas.Data = append(s.MarkAreas.Data, o)
}
}
}
// WithMarkAreaStyleOpts sets the style of the MarkArea.
func WithMarkAreaStyleOpts(opt opts.MarkAreaStyle) SeriesOpts {
return func(s *SingleSeries) {
if s.MarkAreas == nil {
s.MarkAreas = &opts.MarkAreas{}
}
s.MarkAreas.MarkAreaStyle = opt
}
}
// WithMarkAreaNameCoordItemOpts sets the coordinates of the MarkLine.
func WithMarkAreaNameCoordItemOpts(opt ...opts.MarkAreaNameCoordItem) SeriesOpts {
type MANameCoord struct {
Name string `json:"name,omitempty"`
ItemStyle *opts.ItemStyle `json:"itemStyle"`
Coord []interface{} `json:"coord"`
}
return func(s *SingleSeries) {
if s.MarkAreas == nil {
s.MarkAreas = &opts.MarkAreas{}
}
for _, o := range opt {
s.MarkAreas.Data = append(
s.MarkAreas.Data,
[]MANameCoord{
{Name: o.Name, ItemStyle: o.ItemStyle, Coord: o.Coordinate0},
{Coord: o.Coordinate1},
},
)
}
}
}
// WithMarkAreaNameXAxisItemOpts sets the X axis of the MarkLine.
func WithMarkAreaNameXAxisItemOpts(opt ...opts.MarkAreaNameXAxisItem) SeriesOpts {
return func(s *SingleSeries) {
if s.MarkAreas == nil {
s.MarkAreas = &opts.MarkAreas{}
}
for _, o := range opt {
s.MarkAreas.Data = append(s.MarkAreas.Data, o)
}
}
}
// WithMarkAreaNameYAxisItemOpts sets the Y axis of the MarkLine.
func WithMarkAreaNameYAxisItemOpts(opt ...opts.MarkAreaNameYAxisItem) SeriesOpts {
return func(s *SingleSeries) {
if s.MarkAreas == nil {
s.MarkAreas = &opts.MarkAreas{}
}
for _, o := range opt {
s.MarkAreas.Data = append(s.MarkAreas.Data, o)
}
}
}
// WithMarkPointNameTypeItemOpts sets the type of the MarkPoint.
func WithMarkPointNameTypeItemOpts(opt ...opts.MarkPointNameTypeItem) SeriesOpts {
return func(s *SingleSeries) {
if s.MarkPoints == nil {
s.MarkPoints = &opts.MarkPoints{}
}
for _, o := range opt {
s.MarkPoints.Data = append(s.MarkPoints.Data, o)
}
}
}
// WithMarkPointStyleOpts sets the style of the MarkPoint.
func WithMarkPointStyleOpts(opt opts.MarkPointStyle) SeriesOpts {
return func(s *SingleSeries) {
if s.MarkPoints == nil {
s.MarkPoints = &opts.MarkPoints{}
}
s.MarkPoints.MarkPointStyle = opt
}
}
// WithMarkPointNameCoordItemOpts sets the coordinated of the MarkPoint.
func WithMarkPointNameCoordItemOpts(opt ...opts.MarkPointNameCoordItem) SeriesOpts {
return func(s *SingleSeries) {
if s.MarkPoints == nil {
s.MarkPoints = &opts.MarkPoints{}
}
for _, o := range opt {
s.MarkPoints.Data = append(s.MarkPoints.Data, o)
}
}
}
func (s *SingleSeries) InitSeriesDefaultOpts(c BaseConfiguration) {
opts.SetDefaultValue(s)
// some special inherited options from BaseConfiguration
s.Animation = c.Animation
}
func (s *SingleSeries) ConfigureSeriesOpts(options ...SeriesOpts) {
for _, opt := range options {
opt(s)
}
}
// MultiSeries represents multiple series.
type MultiSeries []SingleSeries
// SetSeriesOptions sets options for all the series.
// Previous options will be overwrote every time hence setting them on the `AddSeries` if you want
// to customize each series individually
//
// here -> ↓ <-
//
// func (c *Bar) AddSeries(name string, data []opts.BarData, options ...SeriesOpts)
func (ms *MultiSeries) SetSeriesOptions(opts ...SeriesOpts) {
s := *ms
for i := 0; i < len(s); i++ {
s[i].ConfigureSeriesOpts(opts...)
}
}
// WithEncodeOpts Set encodes for dataSets
func WithEncodeOpts(opt opts.Encode) SeriesOpts {
return func(s *SingleSeries) {
s.Encode = &opt
}
}
// WithDatasetIndex sets the datasetIndex option.
func WithDatasetIndex(index int) SeriesOpts {
return func(s *SingleSeries) {
s.DatasetIndex = index
}
}
|
package infrastructure
import (
"github.com/caarlos0/env"
"github.com/pkg/errors"
"regexp"
)
type Config struct {
Port string `env:"CONVERTER_PORT" envDefault:":8000"`
}
func LoadConfig() (Config, error) {
cnf := Config{}
err := env.Parse(&cnf)
if err != nil {
return cnf, err
}
err = validateConfig(cnf)
return cnf, err
}
func validateConfig(cnf Config) error {
matched, err := regexp.MatchString(`:\d+`, cnf.Port)
if err != nil {
return errors.WithStack(err)
}
if !matched {
return errors.New("port is not valid")
}
return nil
}
|
package tsrv
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00300101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:tsrv.003.001.01 Document"`
Message *UndertakingIssuanceNotificationV01 `xml:"UdrtkgIssncNtfctn"`
}
func (d *Document00300101) AddMessage() *UndertakingIssuanceNotificationV01 {
d.Message = new(UndertakingIssuanceNotificationV01)
return d.Message
}
// The UndertakingIssuanceNotification message is sent by the party that issued the undertaking to the applicant to notify it of the contents of an undertaking issued electronically or on paper. The undertaking that is notified could be a demand guarantee, standby letter of credit, counter-undertaking (counter-guarantee or counter-standby), or suretyship undertaking. In addition to containing details on the applicable rules, expiry date, the amount, required documents, and terms and conditions of the undertaking, the message may provide information from the sender such as confirmation details.
type UndertakingIssuanceNotificationV01 struct {
// Details related to the notification of the issued undertaking.
UndertakingIssuanceNotificationDetails *iso20022.UndertakingAdvice2 `xml:"UdrtkgIssncNtfctnDtls"`
// Digital signature of the undertaking notification.
DigitalSignature *iso20022.PartyAndSignature2 `xml:"DgtlSgntr,omitempty"`
}
func (u *UndertakingIssuanceNotificationV01) AddUndertakingIssuanceNotificationDetails() *iso20022.UndertakingAdvice2 {
u.UndertakingIssuanceNotificationDetails = new(iso20022.UndertakingAdvice2)
return u.UndertakingIssuanceNotificationDetails
}
func (u *UndertakingIssuanceNotificationV01) AddDigitalSignature() *iso20022.PartyAndSignature2 {
u.DigitalSignature = new(iso20022.PartyAndSignature2)
return u.DigitalSignature
}
|
package main
import (
"bufio"
"fmt"
"io"
"log"
"net"
)
type file struct {
name string
content []byte
}
type server struct {
files []file
}
var ftp *server
func init() {
f1 := file{"hello.txt", []byte("helllo world")}
f2 := file{"goodbye.txt", []byte("goodbye")}
ftp = &server{[]file{f1, f2}}
}
func (s *server) ls(c net.Conn) (int, error) {
dir := ""
for _, file := range s.files {
dir += file.name + "\n"
}
return io.WriteString(c, dir)
}
func handle(c net.Conn) {
defer c.Close()
scanner := bufio.NewScanner(c)
for scanner.Scan() {
command := scanner.Text()
switch command {
case "ls":
_, err := ftp.ls(c)
if err != nil {
return
}
case "close":
io.WriteString(c, "bye!")
return
default:
_, err := io.WriteString(c, fmt.Sprintf("%s: %s\n", command, "command not found"))
if err != nil {
return
}
}
}
}
func main() {
listener, err := net.Listen("tcp", "localhost:8000")
if err != nil {
log.Fatal(err)
}
for {
conn, err := listener.Accept()
if err != nil {
log.Print(err)
continue
}
go handle(conn) // handle connections | |
}
}
|
package simple_factory
import "fmt"
type Person interface {
Say()
}
type Man struct {
Person
}
func (m *Man) Say() {
fmt.Println("man")
}
type Women struct {
Person
}
func (w *Women) Say() {
fmt.Println("women")
}
func NewPerson(t int) Person {
switch t {
case 0:
return &Women{}
case 1:
return &Man{}
}
return nil
}
|
package models
import (
"fmt"
"github.com/astaxie/beego/orm"
"github.com/devplayg/ipas-mcs/objs"
log "github.com/sirupsen/logrus"
)
// 순위통계 검색 by 기관, 그룹, 장비(stats_evt?_by_(group|equip)
func GetStatsBy(member *objs.Member, filter *objs.StatsFilter) ([]objs.Stats, int64, error) {
var where string
var args []interface{}
var assetId int
query := `
select date, asset_id, item, count, rank
from stats_%s_by_%s
where date = (select value_s from sys_config where section = ? and keyword = ?) and asset_id = ? %s
order by rank asc
limit ?
`
args = append(args, "stats", "last_updated")
if filter.OrgId < 1 { // 전체통계 요청 시
if member.Position >= objs.Administrator { // 관리자 세션이면 허용
assetId = -1
} else { // 일반사용자 세션이면 접근제어
assetId = member.MemberId * -1
}
args = append(args, assetId)
} else { // 기관통계 요청 시
if filter.GroupId < 0 { // 전체 그룹 데이터 접근 시
if member.Position >= objs.Administrator { // 관리자 세션이면 허용
args = append(args, filter.GroupId)
} else { // 일반사용자 세션이면 접근제어
args = append(args, filter.OrgId)
if filter.AssetType == "group" {
where += " and SUBSTRING_INDEX(item, '/', -1) in (select asset_id from mbr_asset where member_id = ?)"
args = append(args, member.MemberId)
} else if filter.AssetType == "equip" {
where += " and item in (select equip_id from ast_ipas where org_id = ? and group_id in (select asset_id from mbr_asset where member_id = ?))"
args = append(args, filter.OrgId, member.MemberId)
} else {
return nil, 0, nil
}
}
} else { // 특정 그룹통계 요청 시
if member.Position >= objs.Administrator { // // 관리자 세션이면 허용
args = append(args, filter.GroupId)
} else { // 일반사용자 세션이면 접근제어
where += " and asset_id in (select asset_id from mbr_asset where member_id = ?)"
args = append(args, filter.GroupId, member.MemberId)
}
}
}
// where date >= ? and date <= ? and asset_id = ? %s
query = fmt.Sprintf(query, filter.StatsType, filter.AssetType, where)
args = append(args, filter.Top)
var rows []objs.Stats
o := orm.NewOrm()
total, err := o.Raw(query, args).QueryRows(&rows)
if err != nil {
log.Error(err)
}
return rows, total, err
}
// 자산ID를 기준으로 순위통계 검색
func GetStatsByAssetId(member *objs.Member, filter *objs.StatsFilter) ([]objs.Stats, int64, error) {
var where string
var args []interface{}
var assetId int
query := `
select date, asset_id, item, count, rank
from stats_%s
where date = (select value_s from sys_config where section = ? and keyword = ?) %s
order by rank asc
limit ?
`
args = append(args, "stats", "last_updated")
if filter.OrgId < 1 { // 전체통계 요청 시
where += " and asset_id = ?"
if member.Position >= objs.Administrator { // 관리자 세션이면 허용
assetId = -1
} else { // 일반사용자 세션이면 접근제어
assetId = member.MemberId * -1
}
args = append(args, assetId)
} else { // 기관통계 요청 시
if filter.GroupId < 0 { // 모든 그룹에 대한 조회 요청 시(기관만 선택되고, 그룹은 선택되지 않은 경우)
if member.Position >= objs.Administrator { // 관리자 세션이면 허용
where += " and asset_id = ?"
args = append(args, filter.OrgId)
} else { // 일반사용자 세션이면 접근제어
// 쿼리 다시 쓰기
query = `
select date, asset_id, item, count, rank
from stats_%s
where date = (select value_s from sys_config where section = ? and keyword = ?)
and asset_id in (
select asset_id
from ast_asset
where parent_id = ? and asset_id in (select asset_id from mbr_asset where member_id = ?)
) %s
order by count desc, item asc
limit ?
`
args = append(args, filter.OrgId, member.MemberId)
}
} else { // 특정 그룹통계 요청 시
where += " and asset_id = ?"
if member.Position >= objs.Administrator { // 관리자 세션이면 허용
args = append(args, filter.GroupId)
} else {
where += " and asset_id in (select asset_id from mbr_asset where member_id = ?)"
args = append(args, filter.GroupId, member.MemberId)
}
}
}
query = fmt.Sprintf(query, filter.StatsType, where)
args = append(args, filter.Top)
var rows []objs.Stats
o := orm.NewOrm()
total, err := o.Raw(query, args).QueryRows(&rows)
if err != nil {
log.Error(err)
}
return rows, total, err
}
//
//func GetEquipCountByType(member *objs.Member, filter *objs.StatsFilter) ([]objs.TagCount, error) {
// var where string
// var args []interface{}
// query := `
// select equip_type, count(*) count
// from ast_ipas
// where true %s
// group by equip_type
// `
// if filter.OrgId < 1 { // 전체통계 요청 시
// if member.Position >= objs.Administrator { // 관리자 세션이면 허용
// } else { // 일반사용자 세션이면 접근제어
// where += " and group_id in (select asset_id from mbr_asset where member_id = ?)"
// args = append(args, member.MemberId)
// }
// } else { // 기관통계 요청 시
// if filter.GroupId < 0 { // 기관 전체통계 요청 시
// if member.Position >= objs.Administrator { // 관리자 세션이면 허용
// where += " and org_id = ?"
// args = append(args, filter.OrgId)
// } else { // 일반사용자 세션이면 접근제어
// where += " and org_id = ? and group_id in (select asset_id from mbr_asset where member_id = ?)"
// args = append(args, filter.OrgId, member.MemberId)
// }
// } else { // 특정 그룹통계 요청 시
// if member.Position >= objs.Administrator { // 관리자 세션이면 허용
// where += " and org_id = ? and group_id = ?"
// args = append(args, filter.OrgId, filter.GroupId)
// } else { // 일반사용자 세션이면 접근제어
// where += " and org_id = ? and group_id = ? and group_id in (select asset_id from mbr_asset where member_id = ?)"
// args = append(args, filter.OrgId, filter.GroupId, member.MemberId)
// }
// }
// }
//
// var rows []objs.TagCount
// o := orm.NewOrm()
// query = fmt.Sprintf(query, where)
// _, err := o.Raw(query, args).QueryRows(&rows)
// return rows, err
//}
// 순위 없는 통계데이터 요청 시
func GetStatsByOrgGroup(member *objs.Member, filter *objs.StatsFilter) ([]objs.Stats, error) {
var where string
var args []interface{}
query := `
select *
from stats_%s
where date = (select value_s from sys_config where section = ? and keyword = ?) %s
`
args = append(args, "stats", "last_updated")
if filter.OrgId < 1 { // 전체통계 요청 시
if member.Position >= objs.Administrator { // 관리자 세션이면 허용
} else { // 일반사용자 세션이면 접근제어
where += " and group_id in (select asset_id from mbr_asset where member_id = ?)"
args = append(args, member.MemberId)
}
} else { // 기관통계 요청 시
if filter.GroupId < 0 { // 기관 전체통계 요청 시
if member.Position >= objs.Administrator { // 관리자 세션이면 허용
where += " and org_id = ?"
args = append(args, filter.OrgId)
} else { // 일반사용자 세션이면 접근제어
where += " and org_id = ? and group_id in (select asset_id from mbr_asset where member_id = ?)"
args = append(args, filter.OrgId, member.MemberId)
}
} else { // 특정 그룹통계 요청 시
if member.Position >= objs.Administrator { // 관리자 세션이면 허용
where += " and org_id = ? and group_id = ?"
args = append(args, filter.OrgId, filter.GroupId)
} else { // 일반사용자 세션이면 접근제어
where += " and org_id = ? and group_id = ? and group_id in (select asset_id from mbr_asset where member_id = ?)"
args = append(args, filter.OrgId, filter.GroupId, member.MemberId)
}
}
}
var rows []objs.Stats
o := orm.NewOrm()
query = fmt.Sprintf(query, filter.StatsType, where)
_, err := o.Raw(query, args).QueryRows(&rows)
return rows, err
}
func GetEquipStats(member *objs.Member, filter objs.StatsFilter) ([]objs.Stats, error) {
var where string
var args []interface{}
query := `
select *
from stats_%s
where date >= ? and date <= ? and org_id = ? and equip_id = ? %s
`
args = append(args, filter.StartDate+":00", filter.EndDate+":59", filter.OrgId, filter.EquipIp)
if member.Position < objs.Administrator {
where += " and group_id in (select asset_id from mbr_asset where member_id = ?)"
args = append(args, member.MemberId)
}
var rows []objs.Stats
o := orm.NewOrm()
query = fmt.Sprintf(query, filter.StatsType, where)
_, err := o.Raw(query, args).QueryRows(&rows)
return rows, err
} |
package validate
import (
"fmt"
"net/url"
)
// URI ...
func URI(uri string) error {
_, err := url.ParseRequestURI(uri)
return err
}
// Currency ...
func Currency(currency string, currencyArray []string) error {
for _, c := range currencyArray {
if c == currency {
return nil
}
}
return fmt.Errorf("currency not supported")
}
|
package config
import (
"testing"
)
func TestRead(t *testing.T) {
//configDB := &ConfigDB{
// Dbhost: "172.26.163.76",
// Dbport: "3306",
// Dbuser: "rookie2",
// Dbpassword: "12345678",
// Dbname: "dbconfig",
// Tblname: "params",
//}
////err := configDB.GetParameters(PARAMS)
//if err != nil {
// fmt.Println(err)
//}
//fmt.Println(PARAMS)
}
|
package notifications
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/go-kit/kit/log"
"github.com/stretchr/testify/assert"
"gopkg.in/DATA-DOG/go-sqlmock.v1"
)
func TestCreateNotificationShouldReturnNoError(t *testing.T) {
db, mock, err := setUpTestDatabase()
assert.NoError(t, err)
n := &Notification{
Message: "Supper message",
CreatedAt: time.Now().Unix(),
Checked: false,
For: 1,
Status: Info,
}
mock.ExpectQuery(`^insert into notifications \(message, created_at, noti_status, for_user, checked\) values\(\$1, \$2, \$3, \$4, \$5\) returning id$`).
WithArgs(n.Message, n.CreatedAt, n.Status, n.For, n.Checked).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(1))
assert.NoError(t, NewRepository(db, log.NewNopLogger()).CreateNotification(context.Background(), n))
assert.NoError(t, mock.ExpectationsWereMet())
}
func TestCreateNotificationShouldReturnError(t *testing.T) {
db, mock, err := setUpTestDatabase()
assert.NoError(t, err)
n := &Notification{
Message: "Supper message",
CreatedAt: time.Now().Unix(),
Checked: false,
For: 1,
Status: "random",
}
mock.ExpectQuery(`^insert into notifications \(message, created_at, noti_status, for_user, checked\) values\(\$1, \$2, \$3, \$4, \$5\) returning id$`).
WithArgs(n.Message, n.CreatedAt, n.Status, n.For, n.Checked).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(1))
assert.Error(t, NewRepository(db, log.NewNopLogger()).CreateNotification(context.Background(), n))
assert.Error(t, mock.ExpectationsWereMet())
}
func TestFindNotificationByIdShouldReturnListOfNotifiaction(t *testing.T) {
db, mock, err := setUpTestDatabase()
assert.NoError(t, err)
rows := sqlmock.NewRows([]string{"id", "message", "created_at", "noti_status", "for_user", "checked"}).
AddRow(1, "New Message", time.Now().Unix(), Info, 1, false).
AddRow(2, "New Message 2", time.Now().Unix(), Info, 1, false)
mock.ExpectQuery(`select \* from notifications where for_user = \$1 and checked = false`).
WithArgs(1).
WillReturnRows(rows)
res, err := NewRepository(db, log.NewNopLogger()).FindNotificationsById(context.Background(), 1)
assert.NoError(t, err)
assert.Equal(t, 2, len(res))
assert.NoError(t, mock.ExpectationsWereMet())
}
func TestFindNotificationByIdShouldReturnErrFromDb(t *testing.T) {
db, mock, err := setUpTestDatabase()
assert.NoError(t, err)
rows := sqlmock.NewRows([]string{"id", "message", "created_at", "noti_status", "for_user", "checked"}).
AddRow(1, "New Message", time.Now().Unix(), Info, 1, false).
AddRow(2, "New Message 2", time.Now().Unix(), Info, 1, false)
mock.ExpectQuery(`select \* from notifications where for_user = \$1 and checked = false`).
WithArgs(1).
WillReturnRows(rows)
res, err := NewRepository(db, log.NewNopLogger()).FindNotificationsById(context.Background(), 2)
assert.Error(t, err)
assert.Nil(t, res)
assert.Error(t, mock.ExpectationsWereMet())
}
func TestCheckNotificationShouldReturnNoErr(t *testing.T) {
db, mock, err := setUpTestDatabase()
assert.NoError(t, err)
indexes := []int{1, 2}
sIndexes := []string{"1", "2"}
userId := 1
mock.ExpectExec(fmt.Sprintf("^update notifications set checked = true where id in \\(%s\\) and for_user = \\$1$", strings.Join(sIndexes, ","))).
WithArgs(userId).
WillReturnResult(sqlmock.NewResult(1, 1))
assert.NoError(t, NewRepository(db, log.NewNopLogger()).CheckNotification(context.Background(), indexes, userId))
assert.NoError(t, mock.ExpectationsWereMet())
}
func TestCheckNotificationShouldReturnErrWhenUserNotFound(t *testing.T) {
db, mock, err := setUpTestDatabase()
assert.NoError(t, err)
indexes := []int{1, 2}
sIndexes := []string{"1", "2"}
userId := -381
mock.ExpectExec(fmt.Sprintf("^update notifications set checked = true where id in \\(%s\\) and for_user = \\$1$", strings.Join(sIndexes, ","))).
WithArgs(userId)
assert.Error(t, NewRepository(db, log.NewNopLogger()).CheckNotification(context.Background(), indexes, userId))
}
func TestCheckNotificationShouldReturnErrWhenNotificationsNotFound(t *testing.T) {
db, mock, err := setUpTestDatabase()
assert.NoError(t, err)
indexes := []int{3, 4}
sIndexes := []string{"3", "4"}
userId := 1
mock.ExpectExec(fmt.Sprintf("^update notifications set checked = true where id in \\(%s\\) and for_user = \\$1$", strings.Join(sIndexes, ","))).
WithArgs(userId)
assert.Error(t, NewRepository(db, log.NewNopLogger()).CheckNotification(context.Background(), indexes, userId))
} |
package dto
type SignupUserDto struct {
Username string `json:"username"`
Password string `json:"password"`
Email string `json:"email"`
IsSubscribed bool `json:"isSubscribed"`
}
|
package main
import (
"fmt"
"image"
"math"
"os"
)
type Rnd interface {
Float64() float64
}
func randomInUnitSphere(rnd Rnd) Vector {
x := rnd.Float64()
y := rnd.Float64()
z := rnd.Float64()
s := math.Sqrt(1.0 / (x*x + y*y + z*z))
return Vector{s * x, s * y, s * z}
}
func randomInUnitDisk(rnd Rnd) Vector {
phi := rnd.Float64() * math.Pi * 2.0
r := math.Sqrt(rnd.Float64())
return Vector{r * math.Cos(phi), r * math.Sin(phi), 0.0}
}
/**
* \ /|
* \v r/ | B
* \ / |
* --*-----
* \ |
* \v| B
* \|
*
* length of B = dot(v,n)
* direction of B is n
*/
func reflect(v, n Vector) Vector {
return v.Sub(n.Scale(2.0 * Dot(v, n)))
}
func refract(v, n Vector, niOverNt float64) (bool, *Vector) {
uv := v.MakeUnitVector()
dt := Dot(uv, n)
discriminant := 1.0 - niOverNt*niOverNt*(1-dt*dt)
if discriminant > 0 {
refracted := uv.Sub(n.Scale(dt)).Scale(niOverNt).Sub(n.Scale(math.Sqrt(discriminant)))
return true, &refracted
}
return false, nil
}
func schlick(cosine, ref_idx float64) float64 {
r0 := (1.0 - ref_idx) / (1.0 + ref_idx)
r0 = r0 * r0
return r0 + (1-r0)*math.Pow((1.0-cosine), 5)
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func LoadImage(file string) image.Image {
imgfile, err := os.Open(file)
if err != nil {
fmt.Println("file not found!")
os.Exit(1)
}
defer imgfile.Close()
img, _, err := image.Decode(imgfile)
return img
}
|
package db
import (
"context"
"time"
"gcp-trace/ltrace"
"go.opencensus.io/trace"
)
// Query emulates some DB request
func Query(ctx context.Context, traceable bool) {
if traceable {
_, span := trace.StartSpan(ctx, ltrace.Prefix+"/db")
defer span.End()
}
time.Sleep(50 * time.Millisecond)
}
|
package moex
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
type Moex struct{
start, limit int
ac telegraf.Accumulator
Log telegraf.Logger `toml:"-"`
Tickers []string `toml:"tickers"`
}
type History struct {
Data [][]interface{} `json:"data"`
}
type HistoryResponse struct {
History History `json:"history"`
}
func (m *Moex) Description() string {
return ""
}
func (m *Moex) SampleConfig() string {
return ""
}
func (m *Moex) Start(ac telegraf.Accumulator) error {
m.ac = ac
go func() {
for _, ticker := range m.Tickers {
start := 0
limit := 100
for {
time.Sleep(time.Second * 5)
url := fmt.Sprintf("http://iss.moex.com/iss/history/engines/%s/markets/%s/boards/TQBR/securities/%s.json?start=%d&limit=%d&history.columns=SECID,TRADEDATE,OPEN,HIGH,LOW,CLOSE", "stock", "shares", ticker, start, limit)
m.Log.Info(url)
resp, err := http.Get(url)
if err != nil {
fmt.Errorf("%s", err)
}
v, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Errorf("%s", err)
}
r := HistoryResponse{}
err = json.Unmarshal(v, &r)
if err != nil {
fmt.Errorf("%s", err)
}
if len(r.History.Data) == 0 {
m.Log.Info("end of data")
break
}
for _, t := range r.History.Data {
tradedate, err := time.Parse("2006-01-02", t[1].(string))
if err != nil {
fmt.Errorf("%s", err)
}
openPrice, _ := t[2].(float64)
m.ac.AddFields(
"price",
map[string]interface{}{"value": openPrice},
map[string]string{"type": "open", "ticker": ticker},
tradedate,
)
highPrice, _ := t[3].(float64)
m.ac.AddFields(
"price",
map[string]interface{}{"value": highPrice},
map[string]string{"type": "high", "ticker": ticker},
tradedate,
)
lowPrice, _ := t[4].(float64)
m.ac.AddFields(
"price",
map[string]interface{}{"value": lowPrice},
map[string]string{"type": "low", "ticker": ticker},
tradedate,
)
closePrice, _ := t[5].(float64)
m.ac.AddFields(
"price",
map[string]interface{}{"value": closePrice},
map[string]string{"type": "close", "ticker": ticker},
tradedate,
)
}
start = start + limit
}
}
}()
return nil
}
func (m *Moex) Stop() {
}
func (m *Moex) Gather(telegraf.Accumulator) error {
return nil
}
func init() {
inputs.Add("moex", func() telegraf.Input {
return &Moex{
start: 0,
limit: 100,
}
})
} |
package hoist
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/square/p2/pkg/util/size"
. "github.com/anthonybishopric/gotcha"
)
type testInstall struct {
name string
modTime time.Time
byteCount size.ByteCount
}
func (i testInstall) create(parent string) error {
path := filepath.Join(parent, "installs", i.name)
err := os.MkdirAll(path, 0755)
if err != nil {
return err
}
payloadPath := filepath.Join(parent, "installs", i.name, "payload")
file, err := os.OpenFile(payloadPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
var soFar int64
bytesToWrite := []byte{'a'}
for size.ByteCount(soFar) < i.byteCount {
written, err := file.Write(bytesToWrite)
if err != nil {
return err
}
soFar += int64(written)
}
err = file.Close()
if err != nil {
return err
}
if i.name == "current" {
err := os.Symlink(path, filepath.Join(parent, "current"))
if err != nil {
return err
}
}
if i.name == "last" {
err := os.Symlink(path, filepath.Join(parent, "last"))
if err != nil {
return err
}
}
return os.Chtimes(path, i.modTime, i.modTime)
}
func launchableWithInstallations(t *testing.T, installs []testInstall, testFn func(*Launchable)) {
parent, err := ioutil.TempDir("", "prune")
defer os.RemoveAll(parent)
if err != nil {
t.Fatal(err)
}
for _, i := range installs {
err := i.create(parent)
if err != nil {
t.Fatal(err)
}
}
launchable := &Launchable{
RootDir: parent,
}
testFn(launchable)
}
func assertShouldBePruned(t *testing.T, hl *Launchable, name string) {
_, err := os.Stat(filepath.Join(hl.AllInstallsDir(), name))
Assert(t).IsTrue(os.IsNotExist(err), fmt.Sprintf("Should have removed %v", name))
}
func assertShouldExist(t *testing.T, hl *Launchable, name string) {
info, err := os.Stat(filepath.Join(hl.AllInstallsDir(), name))
Assert(t).IsNil(err, fmt.Sprintf("should not have erred stat'ing %v", name))
Assert(t).IsTrue(info.IsDir(), fmt.Sprintf("Should not have removed the %v directory", name))
}
func TestPruneRemovesOldInstallations(t *testing.T) {
launchableWithInstallations(t, []testInstall{
{"first", time.Now().Add(-1000 * time.Hour), 10 * size.Kibibyte},
{"second", time.Now().Add(-800 * time.Hour), 10 * size.Kibibyte},
{"third", time.Now().Add(-600 * time.Hour), 10 * size.Kibibyte},
}, func(hl *Launchable) {
Assert(t).IsNil(hl.Prune(20*size.Kibibyte), "Should not have erred when pruning")
assertShouldBePruned(t, hl, "first")
assertShouldExist(t, hl, "second")
assertShouldExist(t, hl, "third")
})
}
func TestPruneIgnoresInstallsWhenUnderLimit(t *testing.T) {
launchableWithInstallations(t, []testInstall{
{"first", time.Now().Add(-1000 * time.Hour), 10 * size.Kibibyte},
{"second", time.Now().Add(-800 * time.Hour), 10 * size.Kibibyte},
{"third", time.Now().Add(-600 * time.Hour), 10 * size.Kibibyte},
}, func(hl *Launchable) {
Assert(t).IsNil(hl.Prune(2*size.Mebibyte), "Should not have erred when pruning")
assertShouldExist(t, hl, "first")
assertShouldExist(t, hl, "second")
assertShouldExist(t, hl, "third")
})
}
func TestPruneIgnoresCurrent(t *testing.T) {
launchableWithInstallations(t, []testInstall{
{"current", time.Now().Add(-1000 * time.Hour), 10 * size.Kibibyte},
{"second", time.Now().Add(-800 * time.Hour), 10 * size.Kibibyte},
{"third", time.Now().Add(-600 * time.Hour), 10 * size.Kibibyte},
}, func(hl *Launchable) {
Assert(t).IsNil(hl.Prune(20*size.Kibibyte), "Should not have erred when pruning")
assertShouldExist(t, hl, "current")
assertShouldBePruned(t, hl, "second")
assertShouldExist(t, hl, "third")
})
}
func TestPruneIgnoresCurrentAndLast(t *testing.T) {
launchableWithInstallations(t, []testInstall{
{"current", time.Now().Add(-1000 * time.Hour), 10 * size.Kibibyte},
{"last", time.Now().Add(-800 * time.Hour), 10 * size.Kibibyte},
{"third", time.Now().Add(-600 * time.Hour), 10 * size.Kibibyte},
}, func(hl *Launchable) {
Assert(t).IsNil(hl.Prune(20*size.Kibibyte), "Should not have erred when pruning")
assertShouldExist(t, hl, "current")
assertShouldExist(t, hl, "last")
assertShouldBePruned(t, hl, "third")
})
}
|
package cgroup
// Cgroup defines the common interface to control cgroups
// including v1 and v2 implementations.
// TODO: implement systemd integration
type Cgroup interface {
// AddProc add a process into the cgroup
AddProc(pid int) error
// Destroy deletes the cgroup
Destroy() error
// CPUUsage reads total cpu usage of cgroup
CPUUsage() (uint64, error)
// MemoryUsage reads current total memory usage
MemoryUsage() (uint64, error)
// MemoryMaxUsageInBytes reads max total memory usage. Not exist in cgroup v2
MemoryMaxUsage() (uint64, error)
// SetCPUBandwidth sets the cpu bandwidth. Times in ns
SetCPUBandwidth(quota, period uint64) error
// SetCpusetCpus sets the availabile cpu to use (cpuset.cpus).
SetCPUSet([]byte) error
// SetMemoryLimit sets memory.limit_in_bytes
SetMemoryLimit(uint64) error
// SetProcLimit sets pids.max
SetProcLimit(uint64) error
}
|
package goauth
import (
"bytes"
"net/http"
"net/http/httptest"
"testing"
)
func TestCheckInScopeTrue(t *testing.T) {
scope := []string{"1", "2", "3"}
check := "1"
b := checkInScope(check, scope)
if !b {
t.Error("Test failed, expected true but got false")
}
}
func TestCheckInScopeFalse(t *testing.T) {
scope := []string{"1", "2", "3"}
check := "4"
b := checkInScope(check, scope)
if b {
t.Error("Test failed, expected false but got true")
}
}
func TestCheckAuth(t *testing.T) {
grant := Grant{AccessToken: "testtoken", Scope: []string{"testscope"}}
handler := newTestHandler()
// Create the handler
middlewareHandler := handler.Secure([]string{"testscope"}, func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("approved"))
})
testCases([]testCase{
// Should throw an error due to no bearer token being passed on the request
{
"GET",
"",
nil,
middlewareHandler,
func(r *http.Request) {
},
func(r *httptest.ResponseRecorder) {
if r.Code != 401 {
t.Errorf("Test failed, status %v", r.Code)
}
expected := []byte(`{"code":"access_denied","description":"The resource owner or authorization server denied the request."}` + "\n")
if !bytes.Equal(r.Body.Bytes(), expected) {
t.Errorf("Test failed, expected %s but got %s", expected, r.Body.Bytes())
}
},
},
// Should approve the request and call the underlying handler
{
"GET",
"",
nil,
middlewareHandler,
func(r *http.Request) {
r.Header.Set("Authorization", "Bearer "+grant.AccessToken.RawString())
},
func(r *httptest.ResponseRecorder) {
if r.Code != 200 {
t.Errorf("Test failed, status %v", r.Code)
}
expected := []byte(`approved`)
if !bytes.Equal(r.Body.Bytes(), expected) {
t.Errorf("Test failed, expected %s but got %s", expected, r.Body.Bytes())
}
},
},
})
}
|
package storage_test
import (
"fmt"
"testing"
"github.com/lorthos/gosu/storage"
"strings"
)
//https://groups.google.com/forum/#!topic/google-appengine-go/Kh1eLUROq90
//https://blog.golang.org/gobs-of-data
func assertEqual(t *testing.T, a interface{}, b interface{}, message string) {
if a == b {
return
}
if len(message) == 0 {
message = fmt.Sprintf("%v != %v", a, b)
}
fmt.Println("value 1: " + fmt.Sprintf("%b", a))
fmt.Println("value 2: " + fmt.Sprintf("%b", b))
t.Fatal(message)
}
func TestRoundTrip(t *testing.T) {
to_store := "value1"
var roundTripped string
storage.DeSerialize(storage.Serialize(to_store), &roundTripped)
assertEqual(t, 0, strings.Compare(roundTripped, to_store), "roundtrip should work")
}
|
package main
import (
"context"
"fmt"
"os/exec"
"time"
)
//命令运行结果
type result struct {
output []byte
err error
}
func main() {
//执行一个cmd,让它在一个协程里去执行,让它执行2秒,sleep 2;echo hello;
//1秒的时候,我们杀死cmd
var (
ctx context.Context
cancelFunc context.CancelFunc
resultChan chan *result
)
resultChan = make(chan *result, 1000)
//生成cmd
ctx, cancelFunc = context.WithCancel(context.TODO())
go func() {
cmd := exec.CommandContext(ctx, "/bin/sh", "-c", "sleep 3;echo helloworld;")
//执行命令,捕获子进程的输出(pipe)
output, err := cmd.CombinedOutput()
if err != nil {
fmt.Println("ERROR=", err)
return
}
//正常运行,打印子协程的输出
fmt.Println("子协程 output:", string(output))
resultChan <- &result{
err: err,
output: output,
}
}()
//继续往下走
time.Sleep(1 * time.Second)
//模拟取消任务的执行
if false {
cancelFunc()
}
//在main协程里,等待子协程的退出,并打印任务执行结果
fmt.Println("等待中...")
res := <-resultChan
fmt.Println("res:", string(res.output))
}
|
// Copyright 2020 The Reed Developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
package vm
import (
"bytes"
"github.com/reed/crypto"
"github.com/reed/errors"
"github.com/reed/vm/vmcommon"
)
var (
vmErr = errors.New("virtualMachine run error")
)
type signFunc func(pk []byte, sig []byte) bool
type VM struct {
script []byte
stack [][]byte
signTx signFunc
}
func NewVirtualMachine(scriptSig []byte, scriptPK []byte, signTx signFunc) *VM {
return &VM{
script: bytes.Join([][]byte{
scriptSig, scriptPK,
}, []byte{}),
signTx: signTx,
}
}
func (v *VM) Run() error {
scriptLen := len(v.script)
push := func(data []byte) {
v.stack = append(v.stack, data)
}
pop := func() []byte {
top := v.stack[len(v.stack)-1]
v.stack = v.stack[:len(v.stack)-1]
return top
}
pointer := 0
for {
if pointer >= scriptLen {
break
}
op := v.script[pointer : pointer+1]
pointer++
switch {
case bytes.Equal(op, []byte{byte(vmcommon.OpPushData64)}):
push(v.script[pointer : pointer+64])
pointer += 64 - 1
case bytes.Equal(op, []byte{byte(vmcommon.OpPushData32)}):
push(v.script[pointer : pointer+32])
pointer += 32 - 1
case bytes.Equal(op, []byte{byte(vmcommon.OpDup)}):
d := v.stack[len(v.stack)-1]
v.stack = append(v.stack, d)
case bytes.Equal(op, []byte{byte(vmcommon.OpHash256)}):
push(crypto.Sha256(pop()))
case bytes.Equal(op, []byte{byte(vmcommon.OpEqualVerify)}):
a := pop()
b := pop()
if !bytes.Equal(a, b) {
return errors.Wrap(vmErr, "OP_EQUAL_VERIFY failed")
}
case bytes.Equal(op, []byte{byte(vmcommon.OpCheckSig)}):
if ok := v.signTx(pop(), pop()); !ok {
return errors.Wrap(vmErr, "OP_CHECK_SIG signature failed")
}
}
}
return nil
}
|
package pokemon
import (
"fmt"
"net/http"
"github.com/skos-ninja/truelayer-tech/svc/pokemon/app"
"github.com/skos-ninja/truelayer-tech/svc/pokemon/rpc"
"github.com/gin-gonic/gin"
"github.com/spf13/cobra"
)
var CMD = &cobra.Command{
Use: "pokemon",
RunE: runE,
}
func runE(cmd *cobra.Command, args []string) error {
// We use https://github.com/gin-gonic/gin here as this is
// the router I have typically used and the overhead compared
// to using net/http directly is negligible compared to the cost
// of development of the features gin provides.
//
// The default gin engine includes request logging out of the box
// and also includes a standard recovery handler for panics.
//
// In previous implementations of gin by me it has been abstracted
// away to provide standard functionality across all microservices.
// This would include standard auth handling, standardised error responses,
// error reporting to places like sentry.io and metric tracking using prometheus.
//
// Due to the nature of this service being just an example I have not included
// this functionality as it would increase the time of development here whilst
// also not being required by the tech test.
r := gin.Default()
r.GET("/health", func(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"status": "OK"})
})
app, err := app.New(128)
if err != nil {
return err
}
rpc := rpc.New(app)
r.GET("/pokemon/:id", rpc.GetPokemon)
// As this is designed to be run inside a container we should only
// allow binding to 0.0.0.0 due to how networking is done within docker.
//
// A good article explaining this can be found here:
// https://pythonspeed.com/articles/docker-connection-refused/
port, err := cmd.Flags().GetInt("port")
if err != nil {
return err
}
return r.Run(fmt.Sprintf("0.0.0.0:%v", port))
}
|
package contracts
import (
"context"
"math/big"
"time"
"github.com/smartcontractkit/integrations-framework/client"
"github.com/smartcontractkit/integrations-framework/config"
"github.com/smartcontractkit/integrations-framework/tools"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
"github.com/rs/zerolog/log"
)
var _ = Describe("Chainlink Node", func() {
var conf *config.Config
BeforeEach(func() {
var err error
conf, err = config.NewWithPath(config.LocalConfig, "../config")
Expect(err).ShouldNot(HaveOccurred())
})
DescribeTable("deploy and use basic functionality", func(
initFunc client.BlockchainNetworkInit,
ocrOptions OffchainOptions,
) {
// Setup
networkConfig, err := initFunc(conf)
Expect(err).ShouldNot(HaveOccurred())
blockchainClient, err := client.NewBlockchainClient(networkConfig)
Expect(err).ShouldNot(HaveOccurred())
contractDeployer, err := NewContractDeployer(blockchainClient)
Expect(err).ShouldNot(HaveOccurred())
wallets, err := networkConfig.Wallets()
Expect(err).ShouldNot(HaveOccurred())
_, err = contractDeployer.DeployLinkTokenContract(wallets.Default())
Expect(err).ShouldNot(HaveOccurred())
// Connect to running chainlink nodes
chainlinkNodes, err := client.ConnectToTemplateNodes()
Expect(err).ShouldNot(HaveOccurred())
Expect(len(chainlinkNodes)).To(Equal(5))
// Fund each chainlink node
for _, node := range chainlinkNodes {
nodeEthKeys, err := node.ReadETHKeys()
Expect(err).ShouldNot(HaveOccurred())
Expect(len(nodeEthKeys.Data)).Should(BeNumerically(">=", 1))
primaryEthKey := nodeEthKeys.Data[0]
err = blockchainClient.Fund(
wallets.Default(),
primaryEthKey.Attributes.Address,
big.NewInt(2000000000000000000), big.NewInt(2000000000000000000),
)
Expect(err).ShouldNot(HaveOccurred())
}
// Deploy and config OCR contract
ocrInstance, err := contractDeployer.DeployOffChainAggregator(wallets.Default(), ocrOptions)
Expect(err).ShouldNot(HaveOccurred())
err = ocrInstance.SetConfig(wallets.Default(), chainlinkNodes, DefaultOffChainAggregatorConfig())
Expect(err).ShouldNot(HaveOccurred())
err = ocrInstance.Fund(wallets.Default(), big.NewInt(2000000000000000), big.NewInt(2000000000000000))
Expect(err).ShouldNot(HaveOccurred())
// Create external adapter, returns 5 every time
go tools.NewExternalAdapter("6644")
// Initialize bootstrap node
bootstrapNode := chainlinkNodes[0]
bootstrapP2PIds, err := bootstrapNode.ReadP2PKeys()
Expect(err).ShouldNot(HaveOccurred())
bootstrapP2PId := bootstrapP2PIds.Data[0].Attributes.PeerID
bootstrapSpec := &client.OCRBootstrapJobSpec{
ContractAddress: ocrInstance.Address(),
P2PPeerID: bootstrapP2PId,
IsBootstrapPeer: true,
}
_, err = bootstrapNode.CreateJob(bootstrapSpec)
Expect(err).ShouldNot(HaveOccurred())
// Send OCR job to other nodes
for index := 1; index < len(chainlinkNodes); index++ {
nodeP2PIds, err := chainlinkNodes[index].ReadP2PKeys()
Expect(err).ShouldNot(HaveOccurred())
nodeP2PId := nodeP2PIds.Data[0].Attributes.PeerID
nodeTransmitterAddresses, err := chainlinkNodes[index].ReadETHKeys()
Expect(err).ShouldNot(HaveOccurred())
nodeTransmitterAddress := nodeTransmitterAddresses.Data[0].Attributes.Address
nodeOCRKeys, err := chainlinkNodes[index].ReadOCRKeys()
Expect(err).ShouldNot(HaveOccurred())
nodeOCRKeyId := nodeOCRKeys.Data[0].ID
observationSource := `fetch [type=http method=POST url="http://host.docker.internal:6644/five" requestData="{}"];
parse [type=jsonparse path="data,result"];
fetch -> parse;`
ocrSpec := &client.OCRTaskJobSpec{
ContractAddress: ocrInstance.Address(),
P2PPeerID: nodeP2PId,
P2PBootstrapPeers: []string{bootstrapP2PId},
KeyBundleID: nodeOCRKeyId,
TransmitterAddress: nodeTransmitterAddress,
ObservationSource: observationSource,
}
_, err = chainlinkNodes[index].CreateJob(ocrSpec)
Expect(err).ShouldNot(HaveOccurred())
}
// Request a new round from the OCR
err = ocrInstance.RequestNewRound(wallets.Default())
Expect(err).ShouldNot(HaveOccurred())
// Wait for a round
for i := 0; i < 60; i++ {
round, err := ocrInstance.GetLatestRound(context.Background())
Expect(err).ShouldNot(HaveOccurred())
log.Info().
Str("Contract Address", ocrInstance.Address()).
Str("Answer", round.Answer.String()).
Str("Round ID", round.RoundId.String()).
Str("Answered in Round", round.AnsweredInRound.String()).
Str("Started At", round.StartedAt.String()).
Str("Updated At", round.UpdatedAt.String()).
Msg("Latest Round Data")
if round.RoundId.Cmp(big.NewInt(0)) > 0 {
break // Break when OCR round processes
}
time.Sleep(time.Second)
}
// Check answer is as expected
answer, err := ocrInstance.GetLatestAnswer(context.Background())
log.Info().Str("Answer", answer.String()).Msg("Final Answer")
Expect(err).ShouldNot(HaveOccurred())
Expect(answer.Int64()).Should(Equal(int64(5)))
},
Entry("on Ethereum Hardhat", client.NewHardhatNetwork, DefaultOffChainAggregatorOptions()),
)
})
var _ = Describe("Contracts", func() {
var conf *config.Config
BeforeEach(func() {
var err error
conf, err = config.NewWithPath(config.LocalConfig, "../config")
Expect(err).ShouldNot(HaveOccurred())
})
DescribeTable("deploy and interact with the storage contract", func(
initFunc client.BlockchainNetworkInit,
value *big.Int,
) {
// Setup Network
networkConfig, err := initFunc(conf)
Expect(err).ShouldNot(HaveOccurred())
client, err := client.NewBlockchainClient(networkConfig)
Expect(err).ShouldNot(HaveOccurred())
wallets, err := networkConfig.Wallets()
Expect(err).ShouldNot(HaveOccurred())
contractDeployer, err := NewContractDeployer(client)
Expect(err).ShouldNot(HaveOccurred())
storeInstance, err := contractDeployer.DeployStorageContract(wallets.Default())
Expect(err).ShouldNot(HaveOccurred())
// Interact with contract
err = storeInstance.Set(value)
Expect(err).ShouldNot(HaveOccurred())
val, err := storeInstance.Get(context.Background())
Expect(err).ShouldNot(HaveOccurred())
Expect(val).To(Equal(value))
},
Entry("on Ethereum Hardhat", client.NewHardhatNetwork, big.NewInt(5)),
)
DescribeTable("deploy and interact with the FluxAggregator contract", func(
initFunc client.BlockchainNetworkInit,
fluxOptions FluxAggregatorOptions,
) {
// Setup network and client
networkConfig, err := initFunc(conf)
Expect(err).ShouldNot(HaveOccurred())
client, err := client.NewBlockchainClient(networkConfig)
Expect(err).ShouldNot(HaveOccurred())
wallets, err := networkConfig.Wallets()
Expect(err).ShouldNot(HaveOccurred())
contractDeployer, err := NewContractDeployer(client)
Expect(err).ShouldNot(HaveOccurred())
// Deploy LINK contract
linkInstance, err := contractDeployer.DeployLinkTokenContract(wallets.Default())
Expect(err).ShouldNot(HaveOccurred())
name, err := linkInstance.Name(context.Background())
Expect(err).ShouldNot(HaveOccurred())
Expect(name).To(Equal("ChainLink Token"))
// Deploy FluxMonitor contract
fluxInstance, err := contractDeployer.DeployFluxAggregatorContract(wallets.Default(), fluxOptions)
Expect(err).ShouldNot(HaveOccurred())
err = fluxInstance.Fund(wallets.Default(), big.NewInt(0), big.NewInt(50000000000))
Expect(err).ShouldNot(HaveOccurred())
// Interact with contract
desc, err := fluxInstance.Description(context.Background())
Expect(err).ShouldNot(HaveOccurred())
Expect(desc).To(Equal(fluxOptions.Description))
},
Entry("on Ethereum Hardhat", client.NewHardhatNetwork, DefaultFluxAggregatorOptions()),
)
DescribeTable("deploy and interact with the OffChain Aggregator contract", func(
initFunc client.BlockchainNetworkInit,
ocrOptions OffchainOptions,
) {
// Setup network and client
networkConfig, err := initFunc(conf)
Expect(err).ShouldNot(HaveOccurred())
client, err := client.NewEthereumClient(networkConfig)
Expect(err).ShouldNot(HaveOccurred())
wallets, err := networkConfig.Wallets()
Expect(err).ShouldNot(HaveOccurred())
contractDeployer, err := NewContractDeployer(client)
Expect(err).ShouldNot(HaveOccurred())
// Deploy LINK contract
linkInstance, err := contractDeployer.DeployLinkTokenContract(wallets.Default())
Expect(err).ShouldNot(HaveOccurred())
name, err := linkInstance.Name(context.Background())
Expect(err).ShouldNot(HaveOccurred())
Expect(name).To(Equal("ChainLink Token"))
// Deploy Offchain contract
offChainInstance, err := contractDeployer.DeployOffChainAggregator(wallets.Default(), ocrOptions)
Expect(err).ShouldNot(HaveOccurred())
err = offChainInstance.Fund(wallets.Default(), nil, big.NewInt(50000000000))
Expect(err).ShouldNot(HaveOccurred())
},
Entry("on Ethereum Hardhat", client.NewHardhatNetwork, DefaultOffChainAggregatorOptions()),
)
})
|
package etcdummy
import (
"log"
"net"
"sort"
"sync"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc/mvccpb"
)
var (
ErrNotImplemented = status.Errorf(codes.Unimplemented, "not implemented")
ErrInvalidLease = status.Errorf(codes.NotFound, "invalid lease")
)
type Lease struct {
ID int64
GrantedTTL int64
Expires time.Time
}
func (l Lease) TTL() int64 {
ttl := int64(time.Until(l.Expires) / time.Second)
if ttl < 0 {
return 0
}
return ttl
}
type Watch struct {
ID int64
Start, End string
Filters []etcdserverpb.WatchCreateRequest_FilterType
stream etcdserverpb.Watch_WatchServer
}
func (w Watch) Match(key string) bool {
if len(w.End) == 0 {
return key == w.Start
} else {
fetchAllAfter := w.End == "\x00"
fetchAll := w.Start == "\x00" && fetchAllAfter
return fetchAll || key >= w.Start && (key < w.End || fetchAllAfter)
}
}
type Server struct {
sync.Mutex
grpcServer *grpc.Server
addr net.Addr
KV map[string]mvccpb.KeyValue
Revision int64
Leases map[int64]Lease
LastLease int64
Watches map[int64]Watch
LastWatch int64
}
func New() *Server {
return &Server{
KV: map[string]mvccpb.KeyValue{},
Leases: map[int64]Lease{},
Watches: map[int64]Watch{},
}
}
// Addr blocks until it has a network address to listen from.
func (s *Server) Addr() net.Addr {
for {
s.Lock()
addr := s.addr
s.Unlock()
if addr != nil {
return addr
}
time.Sleep(10 * time.Millisecond)
}
}
func (s *Server) Close() error {
if s.grpcServer != nil {
s.grpcServer.GracefulStop()
}
return nil
}
func (s *Server) ListenAndServe(bind string) error {
s.Lock()
s.grpcServer = grpc.NewServer()
s.Unlock()
etcdserverpb.RegisterLeaseServer(s.grpcServer, s)
etcdserverpb.RegisterKVServer(s.grpcServer, s)
etcdserverpb.RegisterWatchServer(s.grpcServer, s)
lis, err := net.Listen("tcp", bind)
if err != nil {
s.Unlock()
return err
}
defer lis.Close()
s.Lock()
s.addr = lis.Addr()
s.Unlock()
if err := s.grpcServer.Serve(lis); err != nil {
return err
}
return nil
}
func (s *Server) IterateKeysLocked(start, end string, f func(k string, kv mvccpb.KeyValue) error) error {
if len(end) == 0 {
kv, ok := s.KV[start]
if !ok {
return nil
}
if err := f(start, kv); err != nil {
return err
}
} else {
fetchAllAfter := end == "\x00"
fetchAll := start == "\x00" && fetchAllAfter
for k, v := range s.KV {
if fetchAll || k >= start && (k < end || fetchAllAfter) {
if err := f(k, v); err != nil {
return err
}
}
}
}
return nil
}
// Range gets the keys in the range from the key-value store.
func (s *Server) Range(ctx context.Context, req *etcdserverpb.RangeRequest) (*etcdserverpb.RangeResponse, error) {
s.Lock()
defer s.Unlock()
if err := s.checkExpiredLeasesLocked(); err != nil {
return nil, err
}
var kvs []*mvccpb.KeyValue
start := string(req.Key)
end := string(req.RangeEnd)
if err := s.IterateKeysLocked(start, end, func(k string, kv mvccpb.KeyValue) error {
kvs = append(kvs, &kv)
return nil
}); err != nil {
return nil, err
}
SortKVs(kvs)
return &etcdserverpb.RangeResponse{
Kvs: kvs,
Count: int64(len(kvs)),
}, nil
}
// Put puts the given key into the key-value store.
// A put request increments the revision of the key-value store
// and generates one event in the event history.
func (s *Server) Put(ctx context.Context, req *etcdserverpb.PutRequest) (*etcdserverpb.PutResponse, error) {
s.Lock()
defer s.Unlock()
if err := s.checkExpiredLeasesLocked(); err != nil {
return nil, err
}
s.Revision++
key := string(req.Key)
prevKV, ok := s.KV[key]
kv := prevKV
kv.Key = req.Key
if !req.IgnoreValue {
kv.Value = req.Value
}
if !req.IgnoreLease {
kv.Lease = req.Lease
}
kv.ModRevision = s.Revision
if kv.CreateRevision == 0 {
kv.CreateRevision = s.Revision
}
s.KV[key] = kv
for _, w := range s.Watches {
if w.Match(key) {
if err := w.stream.Send(&etcdserverpb.WatchResponse{
WatchId: w.ID,
Events: []*mvccpb.Event{
{
Type: mvccpb.PUT,
Kv: &kv,
PrevKv: &prevKV,
},
},
}); err != nil {
return nil, err
}
}
}
resp := &etcdserverpb.PutResponse{}
if ok {
resp.PrevKv = &prevKV
}
return resp, nil
}
// DeleteRange deletes the given range from the key-value store.
// A delete request increments the revision of the key-value store
// and generates a delete event in the event history for every deleted key.
func (s *Server) DeleteRange(ctx context.Context, req *etcdserverpb.DeleteRangeRequest) (*etcdserverpb.DeleteRangeResponse, error) {
s.Lock()
defer s.Unlock()
if err := s.checkExpiredLeasesLocked(); err != nil {
return nil, err
}
s.Revision++
var prevKvs []*mvccpb.KeyValue
start := string(req.Key)
end := string(req.RangeEnd)
if err := s.IterateKeysLocked(start, end, func(k string, kv mvccpb.KeyValue) error {
prevKvs = append(prevKvs, &kv)
for _, w := range s.Watches {
if w.Match(k) {
if err := w.stream.Send(&etcdserverpb.WatchResponse{
WatchId: w.ID,
Events: []*mvccpb.Event{
{
Type: mvccpb.DELETE,
PrevKv: &kv,
},
},
}); err != nil {
return err
}
}
}
delete(s.KV, k)
return nil
}); err != nil {
return nil, err
}
SortKVs(prevKvs)
return &etcdserverpb.DeleteRangeResponse{
PrevKvs: prevKvs,
Deleted: int64(len(prevKvs)),
}, nil
}
// Txn processes multiple requests in a single transaction.
// A txn request increments the revision of the key-value store
// and generates events with the same revision for every completed request.
// It is not allowed to modify the same key several times within one txn.
func (s *Server) Txn(ctx context.Context, req *etcdserverpb.TxnRequest) (*etcdserverpb.TxnResponse, error) {
return nil, ErrNotImplemented
}
// Compact compacts the event history in the etcd key-value store. The key-value
// store should be periodically compacted or the event history will continue to grow
// indefinitely.
func (s *Server) Compact(ctx context.Context, req *etcdserverpb.CompactionRequest) (*etcdserverpb.CompactionResponse, error) {
return nil, nil
}
func (s *Server) checkExpiredLeases() error {
s.Lock()
defer s.Unlock()
return s.checkExpiredLeasesLocked()
}
func (s *Server) checkExpiredLeasesLocked() error {
revoked := false
for _, l := range s.Leases {
if l.TTL() == 0 {
if err := s.leaseRevokeLocked(l); err != nil {
return err
}
revoked = true
}
}
if revoked {
s.Revision++
}
return nil
}
// LeaseGrant creates a lease which expires if the server does not receive a keepAlive
// within a given time to live period. All keys attached to the lease will be expired and
// deleted if the lease expires. Each expired key generates a delete event in the event history.
func (s *Server) LeaseGrant(ctx context.Context, req *etcdserverpb.LeaseGrantRequest) (*etcdserverpb.LeaseGrantResponse, error) {
s.Lock()
defer s.Unlock()
id := req.ID
if id == 0 {
s.LastLease++
id = s.LastLease
}
lease := Lease{
ID: id,
GrantedTTL: req.TTL,
Expires: time.Now().Add(time.Duration(req.TTL) * time.Second),
}
s.Leases[id] = lease
return &etcdserverpb.LeaseGrantResponse{
TTL: lease.TTL(),
ID: lease.ID,
}, nil
}
func (s *Server) leaseRevokeLocked(l Lease) error {
for k, kv := range s.KV {
if kv.Lease == l.ID {
for _, w := range s.Watches {
if w.Match(k) {
if err := w.stream.Send(&etcdserverpb.WatchResponse{
WatchId: w.ID,
Events: []*mvccpb.Event{
{
Type: mvccpb.DELETE,
PrevKv: &kv,
},
},
}); err != nil {
return err
}
}
}
delete(s.KV, k)
}
}
delete(s.Leases, l.ID)
return nil
}
// LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
func (s *Server) LeaseRevoke(ctx context.Context, req *etcdserverpb.LeaseRevokeRequest) (*etcdserverpb.LeaseRevokeResponse, error) {
s.Lock()
defer s.Unlock()
l, ok := s.Leases[req.ID]
if !ok {
return nil, ErrInvalidLease
}
s.Revision++
s.leaseRevokeLocked(l)
return &etcdserverpb.LeaseRevokeResponse{}, nil
}
// LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
// to the server and streaming keep alive responses from the server to the client.
func (s *Server) LeaseKeepAlive(stream etcdserverpb.Lease_LeaseKeepAliveServer) error {
go func() {
for {
req, err := stream.Recv()
if err != nil {
log.Println(err)
break
}
l, ok := s.Leases[req.ID]
if !ok {
log.Println(ErrInvalidLease)
continue
}
l.Expires = time.Now().Add(time.Duration(l.GrantedTTL) * time.Second)
s.Leases[req.ID] = l
if err := stream.Send(&etcdserverpb.LeaseKeepAliveResponse{
ID: req.ID,
TTL: l.TTL(),
}); err != nil {
log.Println(err)
break
}
}
}()
return nil
}
// LeaseTimeToLive retrieves lease information.
func (s *Server) LeaseTimeToLive(ctx context.Context, req *etcdserverpb.LeaseTimeToLiveRequest) (*etcdserverpb.LeaseTimeToLiveResponse, error) {
s.Lock()
defer s.Unlock()
l, ok := s.Leases[req.ID]
if !ok {
return nil, ErrInvalidLease
}
keys := [][]byte{}
for k, kv := range s.KV {
if kv.Lease == req.ID {
keys = append(keys, []byte(k))
}
}
return &etcdserverpb.LeaseTimeToLiveResponse{
ID: l.ID,
TTL: l.TTL(),
GrantedTTL: l.GrantedTTL,
Keys: keys,
}, nil
}
// LeaseLeases lists all existing leases.
func (s *Server) LeaseLeases(ctx context.Context, req *etcdserverpb.LeaseLeasesRequest) (*etcdserverpb.LeaseLeasesResponse, error) {
s.Lock()
defer s.Unlock()
var leases []*etcdserverpb.LeaseStatus
for _, l := range s.Leases {
leases = append(leases, &etcdserverpb.LeaseStatus{
ID: l.ID,
})
}
return &etcdserverpb.LeaseLeasesResponse{
Leases: leases,
}, nil
}
// Watch watches for events happening or that have happened. Both input and output
// are streams; the input stream is for creating and canceling watchers and the output
// stream sends events. One watch RPC can watch on multiple key ranges, streaming events
// for several watches at once. The entire event history can be watched starting from the
// last compaction revision.
func (s *Server) Watch(stream etcdserverpb.Watch_WatchServer) error {
go func() {
for {
req, err := stream.Recv()
if err != nil {
log.Println(err)
break
}
create := req.GetCreateRequest()
if create != nil {
s.Lock()
s.LastWatch++
s.Watches[s.LastWatch] = Watch{
ID: s.LastWatch,
Start: string(create.Key),
End: string(create.RangeEnd),
Filters: create.Filters,
}
s.Unlock()
}
cancel := req.GetCancelRequest()
if cancel != nil {
s.Lock()
delete(s.Watches, cancel.WatchId)
s.Unlock()
stream.Send(&etcdserverpb.WatchResponse{
WatchId: cancel.WatchId,
Canceled: true,
CancelReason: "client canceled",
})
}
}
}()
return nil
}
func SortKVs(kvs []*mvccpb.KeyValue) {
sort.Slice(kvs, func(i, j int) bool {
return string(kvs[i].Key) < string(kvs[j].Key)
})
}
|
package e2e
import (
"context"
"fmt"
"github.com/blang/semver/v4"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/operator-framework/operator-lifecycle-manager/test/e2e/dsl"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
"github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil"
"github.com/operator-framework/operator-lifecycle-manager/test/e2e/ctx"
)
var _ = Describe("Garbage collection for dependent resources", func() {
var (
kubeClient operatorclient.ClientInterface
operatorClient versioned.Interface
ns corev1.Namespace
)
BeforeEach(func() {
kubeClient = ctx.Ctx().KubeClient()
operatorClient = ctx.Ctx().OperatorClient()
namespaceName := genName("gc-e2e-")
ns = SetupGeneratedTestNamespace(namespaceName, namespaceName)
})
AfterEach(func() {
TeardownNamespace(ns.GetName())
})
Context("Given a ClusterRole owned by a CustomResourceDefinition", func() {
var (
crd *apiextensionsv1.CustomResourceDefinition
cr *rbacv1.ClusterRole
)
BeforeEach(func() {
group := fmt.Sprintf("%s.com", rand.String(16))
crd = &apiextensionsv1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("plural.%s", group),
},
Spec: apiextensionsv1.CustomResourceDefinitionSpec{
Group: group,
Scope: apiextensionsv1.ClusterScoped,
Versions: []apiextensionsv1.CustomResourceDefinitionVersion{
{
Name: "v1",
Served: true,
Storage: true,
Schema: &apiextensionsv1.CustomResourceValidation{
OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{Type: "object"},
},
},
},
Names: apiextensionsv1.CustomResourceDefinitionNames{
Plural: "plural",
Singular: "singular",
Kind: "Kind",
ListKind: "KindList",
},
},
}
// Create a CustomResourceDefinition
var err error
Eventually(func() error {
crd, err = kubeClient.ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Create(context.Background(), crd, metav1.CreateOptions{})
return err
}).Should(Succeed())
cr = &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "clusterrole-",
OwnerReferences: []metav1.OwnerReference{ownerutil.NonBlockingOwner(crd)},
},
}
// Create a ClusterRole for the crd
Eventually(func() error {
cr, err = kubeClient.CreateClusterRole(cr)
return err
}).Should(Succeed())
})
AfterEach(func() {
// Clean up cluster role
IgnoreError(kubeClient.DeleteClusterRole(cr.GetName(), &metav1.DeleteOptions{}))
// Clean up CRD
IgnoreError(kubeClient.ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), crd.GetName(), metav1.DeleteOptions{}))
})
When("CustomResourceDefinition is deleted", func() {
BeforeEach(func() {
// Delete CRD
Eventually(func() bool {
err := kubeClient.ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), crd.GetName(), metav1.DeleteOptions{})
return apierrors.IsNotFound(err)
}).Should(BeTrue())
})
It("should delete the associated ClusterRole", func() {
Eventually(func() bool {
_, err := kubeClient.GetClusterRole(cr.GetName())
return apierrors.IsNotFound(err)
}).Should(BeTrue(), "get cluster role should eventually return \"not found\"")
})
})
})
Context("Given a ClusterRole owned by a APIService", func() {
var (
apiService *apiregistrationv1.APIService
cr *rbacv1.ClusterRole
)
BeforeEach(func() {
group := rand.String(16)
apiService = &apiregistrationv1.APIService{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("v1.%s", group),
},
Spec: apiregistrationv1.APIServiceSpec{
Group: group,
Version: "v1",
GroupPriorityMinimum: 1,
VersionPriority: 1,
},
}
// Create an API Service
var err error
Eventually(func() error {
apiService, err = kubeClient.CreateAPIService(apiService)
return err
}).Should(Succeed())
cr = &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "clusterrole-",
OwnerReferences: []metav1.OwnerReference{ownerutil.NonBlockingOwner(apiService)},
},
}
Eventually(func() error {
// Create a ClusterRole
cr, err = kubeClient.CreateClusterRole(cr)
return err
}).Should(Succeed())
})
AfterEach(func() {
IgnoreError(kubeClient.DeleteClusterRole(cr.GetName(), &metav1.DeleteOptions{}))
IgnoreError(kubeClient.DeleteAPIService(apiService.GetName(), &metav1.DeleteOptions{}))
})
When("APIService is deleted", func() {
BeforeEach(func() {
// Delete API service
Eventually(func() bool {
err := kubeClient.DeleteAPIService(apiService.GetName(), &metav1.DeleteOptions{})
return apierrors.IsNotFound(err)
}).Should(BeTrue())
})
It("should delete the associated ClusterRole", func() {
Eventually(func() bool {
_, err := kubeClient.GetClusterRole(cr.GetName())
return apierrors.IsNotFound(err)
}).Should(BeTrue(), "get cluster role should eventually return \"not found\"")
})
})
})
// TestOwnerReferenceGCBehavior runs a simple check on OwnerReference behavior to ensure
// a resource with multiple OwnerReferences will not be garbage collected when one of its
// owners has been deleted.
// Test Case:
// CSV-A CSV-B CSV-B
// \ / --Delete CSV-A--> |
// ConfigMap ConfigMap
Context("Given a dependent resource associated with multiple owners", func() {
var (
ownerA v1alpha1.ClusterServiceVersion
ownerB v1alpha1.ClusterServiceVersion
fetchedA *v1alpha1.ClusterServiceVersion
fetchedB *v1alpha1.ClusterServiceVersion
dependent *corev1.ConfigMap
propagation metav1.DeletionPropagation
options metav1.DeleteOptions
)
BeforeEach(func() {
ownerA = newCSV("ownera", ns.GetName(), "", semver.MustParse("0.0.0"), nil, nil, nil)
ownerB = newCSV("ownerb", ns.GetName(), "", semver.MustParse("0.0.0"), nil, nil, nil)
// create all owners
var err error
Eventually(func() error {
fetchedA, err = operatorClient.OperatorsV1alpha1().ClusterServiceVersions(ns.GetName()).Create(context.Background(), &ownerA, metav1.CreateOptions{})
return err
}).Should(Succeed())
Eventually(func() error {
fetchedB, err = operatorClient.OperatorsV1alpha1().ClusterServiceVersions(ns.GetName()).Create(context.Background(), &ownerB, metav1.CreateOptions{})
return err
}).Should(Succeed())
dependent = &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "dependent",
},
Data: map[string]string{},
}
// add owners
ownerutil.AddOwner(dependent, fetchedA, true, false)
ownerutil.AddOwner(dependent, fetchedB, true, false)
// create ConfigMap dependent
Eventually(func() error {
_, err = kubeClient.KubernetesInterface().CoreV1().ConfigMaps(ns.GetName()).Create(context.Background(), dependent, metav1.CreateOptions{})
return err
}).Should(Succeed(), "dependent could not be created")
propagation = metav1.DeletePropagationForeground
options = metav1.DeleteOptions{PropagationPolicy: &propagation}
})
When("removing one of the owner using 'Foreground' deletion policy", func() {
BeforeEach(func() {
// delete ownerA in the foreground (to ensure any "blocking" dependents are deleted before ownerA)
Eventually(func() bool {
err := operatorClient.OperatorsV1alpha1().ClusterServiceVersions(ns.GetName()).Delete(context.Background(), fetchedA.GetName(), options)
return apierrors.IsNotFound(err)
}).Should(BeTrue())
// wait for deletion of ownerA
Eventually(func() bool {
_, err := operatorClient.OperatorsV1alpha1().ClusterServiceVersions(ns.GetName()).Get(context.Background(), ownerA.GetName(), metav1.GetOptions{})
return apierrors.IsNotFound(err)
}).Should(BeTrue())
})
It("should not have deleted the dependent since ownerB CSV is still present", func() {
Eventually(func() error {
_, err := kubeClient.KubernetesInterface().CoreV1().ConfigMaps(ns.GetName()).Get(context.Background(), dependent.GetName(), metav1.GetOptions{})
return err
}).Should(Succeed(), "dependent deleted after one of the owner was deleted")
ctx.Ctx().Logf("dependent still exists after one owner was deleted")
})
})
When("removing both the owners using 'Foreground' deletion policy", func() {
BeforeEach(func() {
// delete ownerA in the foreground (to ensure any "blocking" dependents are deleted before ownerA)
Eventually(func() bool {
err := operatorClient.OperatorsV1alpha1().ClusterServiceVersions(ns.GetName()).Delete(context.Background(), fetchedA.GetName(), options)
return apierrors.IsNotFound(err)
}).Should(BeTrue())
// wait for deletion of ownerA
Eventually(func() bool {
_, err := operatorClient.OperatorsV1alpha1().ClusterServiceVersions(ns.GetName()).Get(context.Background(), ownerA.GetName(), metav1.GetOptions{})
return apierrors.IsNotFound(err)
}).Should(BeTrue())
// delete ownerB in the foreground (to ensure any "blocking" dependents are deleted before ownerB)
Eventually(func() bool {
err := operatorClient.OperatorsV1alpha1().ClusterServiceVersions(ns.GetName()).Delete(context.Background(), fetchedB.GetName(), options)
return apierrors.IsNotFound(err)
}).Should(BeTrue())
// wait for deletion of ownerB
Eventually(func() bool {
_, err := operatorClient.OperatorsV1alpha1().ClusterServiceVersions(ns.GetName()).Get(context.Background(), ownerB.GetName(), metav1.GetOptions{})
return apierrors.IsNotFound(err)
}).Should(BeTrue())
})
It("should have deleted the dependent since both the owners were deleted", func() {
Eventually(func() bool {
_, err := kubeClient.KubernetesInterface().CoreV1().ConfigMaps(ns.GetName()).Get(context.Background(), dependent.GetName(), metav1.GetOptions{})
return apierrors.IsNotFound(err)
}).Should(BeTrue(), "expected dependency configmap would be properly garabage collected")
ctx.Ctx().Logf("dependent successfully garbage collected after both owners were deleted")
})
})
})
When("a bundle with configmap and secret objects is installed", func() {
const (
packageName = "busybox"
channelName = "alpha"
subName = "test-subscription"
secretName = "mysecret"
configmapName = "special-config"
)
BeforeEach(func() {
const (
sourceName = "test-catalog"
imageName = "quay.io/olmtest/single-bundle-index:objects"
)
var installPlanRef string
// create catalog source
source := &v1alpha1.CatalogSource{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.CatalogSourceKind,
APIVersion: v1alpha1.CatalogSourceCRDAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: sourceName,
Namespace: ns.GetName(),
Labels: map[string]string{"olm.catalogSource": sourceName},
},
Spec: v1alpha1.CatalogSourceSpec{
SourceType: v1alpha1.SourceTypeGrpc,
Image: imageName,
GrpcPodConfig: &v1alpha1.GrpcPodConfig{
SecurityContextConfig: v1alpha1.Restricted,
},
},
}
Eventually(func() error {
cs, err := operatorClient.OperatorsV1alpha1().CatalogSources(source.GetNamespace()).Create(context.Background(), source, metav1.CreateOptions{})
if err != nil {
return err
}
source = cs.DeepCopy()
return nil
}).Should(Succeed(), "could not create catalog source")
// Wait for the CatalogSource to be ready
_, err := fetchCatalogSourceOnStatus(operatorClient, source.GetName(), source.GetNamespace(), catalogSourceRegistryPodSynced)
Expect(err).ToNot(HaveOccurred(), "catalog source did not become ready")
// Create a Subscription for package
_ = createSubscriptionForCatalog(operatorClient, source.GetNamespace(), subName, source.GetName(), packageName, channelName, "", v1alpha1.ApprovalAutomatic)
// Wait for the Subscription to succeed
sub, err := fetchSubscription(operatorClient, ns.GetName(), subName, subscriptionStateAtLatestChecker)
Expect(err).ToNot(HaveOccurred(), "could not get subscription at latest status")
installPlanRef = sub.Status.InstallPlanRef.Name
// Wait for the installplan to complete (5 minute timeout)
_, err = fetchInstallPlan(GinkgoT(), operatorClient, installPlanRef, ns.GetName(), buildInstallPlanPhaseCheckFunc(v1alpha1.InstallPlanPhaseComplete))
Expect(err).ToNot(HaveOccurred(), "could not get installplan at complete phase")
ctx.Ctx().Logf("install plan %s completed", installPlanRef)
// confirm extra bundle objects (secret and configmap) are installed
Eventually(func() error {
_, err := kubeClient.GetSecret(ns.GetName(), secretName)
return err
}).Should(Succeed(), "expected no error getting secret object associated with CSV")
Eventually(func() error {
_, err := kubeClient.GetConfigMap(ns.GetName(), configmapName)
return err
}).Should(Succeed(), "expected no error getting configmap object associated with CSV")
})
When("the CSV is deleted", func() {
const csvName = "busybox.v2.0.0"
BeforeEach(func() {
// Delete subscription first
Eventually(func() bool {
err := operatorClient.OperatorsV1alpha1().Subscriptions(ns.GetName()).Delete(context.Background(), subName, metav1.DeleteOptions{})
return apierrors.IsNotFound(err)
}).Should(BeTrue())
// wait for deletion
Eventually(func() bool {
_, err := operatorClient.OperatorsV1alpha1().Subscriptions(ns.GetName()).Get(context.Background(), subName, metav1.GetOptions{})
return apierrors.IsNotFound(err)
}).Should(BeTrue())
// Delete CSV
Eventually(func() bool {
err := operatorClient.OperatorsV1alpha1().ClusterServiceVersions(ns.GetName()).Delete(context.Background(), csvName, metav1.DeleteOptions{})
return apierrors.IsNotFound(err)
}).Should(BeTrue())
// wait for deletion
Eventually(func() bool {
_, err := operatorClient.OperatorsV1alpha1().ClusterServiceVersions(ns.GetName()).Get(context.Background(), csvName, metav1.GetOptions{})
return apierrors.IsNotFound(err)
}).Should(BeTrue())
})
It("OLM should delete the associated configmap and secret", func() {
// confirm extra bundle objects (secret and configmap) are no longer installed on the cluster
Eventually(func() bool {
_, err := kubeClient.GetSecret(ns.GetName(), secretName)
return apierrors.IsNotFound(err)
}).Should(BeTrue())
Eventually(func() bool {
_, err := kubeClient.GetConfigMap(ns.GetName(), configmapName)
return apierrors.IsNotFound(err)
}).Should(BeTrue())
ctx.Ctx().Logf("dependent successfully garbage collected after csv owner was deleted")
})
})
})
When("a bundle with a configmap is installed", func() {
const (
subName = "test-subscription"
configmapName = "special-config"
)
BeforeEach(func() {
const (
packageName = "busybox"
channelName = "alpha"
sourceName = "test-catalog"
imageName = "quay.io/olmtest/single-bundle-index:objects-upgrade-samename"
)
var installPlanRef string
// create catalog source
source := &v1alpha1.CatalogSource{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.CatalogSourceKind,
APIVersion: v1alpha1.CatalogSourceCRDAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: sourceName,
Namespace: ns.GetName(),
Labels: map[string]string{"olm.catalogSource": sourceName},
},
Spec: v1alpha1.CatalogSourceSpec{
SourceType: v1alpha1.SourceTypeGrpc,
Image: imageName,
GrpcPodConfig: &v1alpha1.GrpcPodConfig{
SecurityContextConfig: v1alpha1.Restricted,
},
},
}
var err error
Eventually(func() error {
source, err = operatorClient.OperatorsV1alpha1().CatalogSources(source.GetNamespace()).Create(context.Background(), source, metav1.CreateOptions{})
return err
}).Should(Succeed(), "could not create catalog source")
// Wait for the CatalogSource to be ready
_, err = fetchCatalogSourceOnStatus(operatorClient, source.GetName(), source.GetNamespace(), catalogSourceRegistryPodSynced)
Expect(err).ToNot(HaveOccurred(), "catalog source did not become ready")
// Create a Subscription for package
_ = createSubscriptionForCatalog(operatorClient, source.GetNamespace(), subName, source.GetName(), packageName, channelName, "", v1alpha1.ApprovalAutomatic)
// Wait for the Subscription to succeed
sub, err := fetchSubscription(operatorClient, ns.GetName(), subName, subscriptionStateAtLatestChecker)
Expect(err).ToNot(HaveOccurred(), "could not get subscription at latest status")
installPlanRef = sub.Status.InstallPlanRef.Name
// Wait for the installplan to complete (5 minute timeout)
_, err = fetchInstallPlan(GinkgoT(), operatorClient, installPlanRef, ns.GetName(), buildInstallPlanPhaseCheckFunc(v1alpha1.InstallPlanPhaseComplete))
Expect(err).ToNot(HaveOccurred(), "could not get installplan at complete phase")
Eventually(func() error {
_, err := kubeClient.GetConfigMap(ns.GetName(), configmapName)
return err
}).Should(Succeed(), "expected no error getting configmap object associated with CSV")
})
When("the subscription is updated to a later CSV with a configmap with the same name but new data", func() {
const (
upgradeChannelName = "beta"
newCSVname = "busybox.v3.0.0"
)
var installPlanRef string
BeforeEach(func() {
Eventually(func() error {
// update subscription first
sub, err := operatorClient.OperatorsV1alpha1().Subscriptions(ns.GetName()).Get(context.Background(), subName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("could not get subscription")
}
// update channel on sub
sub.Spec.Channel = upgradeChannelName
_, err = operatorClient.OperatorsV1alpha1().Subscriptions(ns.GetName()).Update(context.Background(), sub, metav1.UpdateOptions{})
return err
}).Should(Succeed(), "could not update subscription")
// Wait for the Subscription to succeed
sub, err := fetchSubscription(operatorClient, ns.GetName(), subName, subscriptionStateAtLatestChecker)
Expect(err).ToNot(HaveOccurred(), "could not get subscription at latest status")
installPlanRef = sub.Status.InstallPlanRef.Name
// Wait for the installplan to complete (5 minute timeout)
_, err = fetchInstallPlan(GinkgoT(), operatorClient, installPlanRef, ns.GetName(), buildInstallPlanPhaseCheckFunc(v1alpha1.InstallPlanPhaseComplete))
Expect(err).ToNot(HaveOccurred(), "could not get installplan at complete phase")
// Ensure the new csv is installed
Eventually(func() error {
_, err := operatorClient.OperatorsV1alpha1().ClusterServiceVersions(ns.GetName()).Get(context.Background(), newCSVname, metav1.GetOptions{})
return err
}).Should(BeNil())
})
It("OLM should have upgraded associated configmap in place", func() {
Eventually(func() (string, error) {
cfg, err := kubeClient.GetConfigMap(ns.GetName(), configmapName)
if err != nil {
return "", err
}
// check data in configmap to ensure it is the new data (configmap was updated in the newer bundle)
// new value in the configmap is "updated-very-much"
return cfg.Data["special.how"], nil
}).Should(Equal("updated-very-much"))
ctx.Ctx().Logf("dependent successfully updated after csv owner was updated")
})
})
})
When("a bundle with a new configmap is installed", func() {
const (
subName = "test-subscription"
configmapName = "special-config"
)
BeforeEach(func() {
const (
packageName = "busybox"
channelName = "alpha"
sourceName = "test-catalog"
imageName = "quay.io/olmtest/single-bundle-index:objects-upgrade-diffname"
)
var installPlanRef string
// create catalog source
source := &v1alpha1.CatalogSource{
TypeMeta: metav1.TypeMeta{
Kind: v1alpha1.CatalogSourceKind,
APIVersion: v1alpha1.CatalogSourceCRDAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: sourceName,
Namespace: ns.GetName(),
Labels: map[string]string{"olm.catalogSource": sourceName},
},
Spec: v1alpha1.CatalogSourceSpec{
SourceType: v1alpha1.SourceTypeGrpc,
Image: imageName,
GrpcPodConfig: &v1alpha1.GrpcPodConfig{
SecurityContextConfig: v1alpha1.Restricted,
},
},
}
var err error
Eventually(func() error {
source, err = operatorClient.OperatorsV1alpha1().CatalogSources(source.GetNamespace()).Create(context.Background(), source, metav1.CreateOptions{})
return err
}).Should(Succeed())
// Wait for the CatalogSource to be ready
_, err = fetchCatalogSourceOnStatus(operatorClient, source.GetName(), source.GetNamespace(), catalogSourceRegistryPodSynced)
Expect(err).ToNot(HaveOccurred(), "catalog source did not become ready")
// Create a Subscription for package
_ = createSubscriptionForCatalog(operatorClient, source.GetNamespace(), subName, source.GetName(), packageName, channelName, "", v1alpha1.ApprovalAutomatic)
// Wait for the Subscription to succeed
sub, err := fetchSubscription(operatorClient, ns.GetName(), subName, subscriptionStateAtLatestChecker)
Expect(err).ToNot(HaveOccurred(), "could not get subscription at latest status")
installPlanRef = sub.Status.InstallPlanRef.Name
// Wait for the installplan to complete (5 minute timeout)
_, err = fetchInstallPlan(GinkgoT(), operatorClient, installPlanRef, ns.GetName(), buildInstallPlanPhaseCheckFunc(v1alpha1.InstallPlanPhaseComplete))
Expect(err).ToNot(HaveOccurred(), "could not get installplan at complete phase")
Eventually(func() error {
_, err := kubeClient.GetConfigMap(ns.GetName(), configmapName)
return err
}).Should(Succeed(), "expected no error getting configmap object associated with CSV")
})
When("the subscription is updated to a later CSV with a configmap with a new name", func() {
const (
upgradeChannelName = "beta"
upgradedConfigMapName = "not-special-config"
newCSVname = "busybox.v3.0.0"
)
var installPlanRef string
BeforeEach(func() {
Eventually(func() error {
// update subscription first
sub, err := operatorClient.OperatorsV1alpha1().Subscriptions(ns.GetName()).Get(context.Background(), subName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("could not get subscription")
}
// update channel on sub
sub.Spec.Channel = upgradeChannelName
_, err = operatorClient.OperatorsV1alpha1().Subscriptions(ns.GetName()).Update(context.Background(), sub, metav1.UpdateOptions{})
return err
}).Should(Succeed(), "could not update subscription")
// Wait for the Subscription to succeed
sub, err := fetchSubscription(operatorClient, ns.GetName(), subName, subscriptionStateAtLatestChecker)
Expect(err).ToNot(HaveOccurred(), "could not get subscription at latest status")
installPlanRef = sub.Status.InstallPlanRef.Name
// Wait for the installplan to complete (5 minute timeout)
_, err = fetchInstallPlan(GinkgoT(), operatorClient, installPlanRef, ns.GetName(), buildInstallPlanPhaseCheckFunc(v1alpha1.InstallPlanPhaseComplete))
Expect(err).ToNot(HaveOccurred(), "could not get installplan at complete phase")
// Ensure the new csv is installed
Eventually(func() error {
_, err := operatorClient.OperatorsV1alpha1().ClusterServiceVersions(ns.GetName()).Get(context.Background(), newCSVname, metav1.GetOptions{})
return err
}).Should(BeNil())
})
// flake issue: https://github.com/operator-framework/operator-lifecycle-manager/issues/2626
It("[FLAKE] should have removed the old configmap and put the new configmap in place", func() {
Eventually(func() bool {
_, err := kubeClient.GetConfigMap(ns.GetName(), configmapName)
return apierrors.IsNotFound(err)
}).Should(BeTrue())
Eventually(func() error {
_, err := kubeClient.GetConfigMap(ns.GetName(), upgradedConfigMapName)
return err
}).Should(BeNil())
ctx.Ctx().Logf("dependent successfully updated after csv owner was updated")
})
})
})
})
|
package issue_test
import (
"context"
"testing"
"go.mongodb.org/mongo-driver/bson/primitive"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"go.mongodb.org/mongo-driver/bson"
"williamfeng323/mooncake-duty/src/domains/issue"
"williamfeng323/mooncake-duty/src/domains/project"
repoimpl "williamfeng323/mooncake-duty/src/infrastructure/db/repo_impl"
)
func TestIssueService(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Issue Service Suite")
}
var _ = Describe("Issue Service", func() {
prj := project.NewProject("issueTestProject", "test project for testing issue")
BeforeEach(func() {
prj.Create()
})
AfterEach(func() {
repoimpl.GetProjectRepo().DeleteOne(context.Background(), bson.M{"_id": prj.ID})
})
Describe("#CreateNewIssue", func() {
It("Should create new issue in DB when project ID is correct", func() {
print(prj.ID.Hex())
i, e := issue.GetIssueService().CreateNewIssue(prj.ID, "testService")
Expect(e).To(BeNil())
Expect(i.ProjectID).To(Equal(prj.ID))
Expect(i.IssueKey).To(Equal("testService"))
repoimpl.GetIssueRepo().DeleteOne(context.Background(), bson.M{"_id": i.ID})
})
It("should return project not found error when project id is invalid", func() {
i, e := issue.GetIssueService().CreateNewIssue(primitive.NewObjectID(), "testService")
Expect(e).ToNot(BeNil())
Expect(e).To(MatchError(project.NotFoundError{}))
Expect(i).To(BeNil())
})
})
Describe("#GetIssueLists", func() {
prj2 := project.NewProject("issueTestProject2", "test project for testing issue")
var i1 *issue.Issue
var i2 *issue.Issue
var i3 *issue.Issue
var i4 *issue.Issue
var i5 *issue.Issue
var i6 *issue.Issue
BeforeEach(func() {
prj2.Create()
i1, _ = issue.GetIssueService().CreateNewIssue(prj.ID, "mock1")
i2, _ = issue.GetIssueService().CreateNewIssue(prj.ID, "mock1")
i3, _ = issue.GetIssueService().CreateNewIssue(prj.ID, "mock3")
i4, _ = issue.GetIssueService().CreateNewIssue(prj2.ID, "mock1")
i5, _ = issue.GetIssueService().CreateNewIssue(prj2.ID, "mock2")
i6, _ = issue.GetIssueService().CreateNewIssue(prj2.ID, "mock3")
})
AfterEach(func() {
repoimpl.GetProjectRepo().DeleteOne(context.Background(), bson.M{"_id": prj2.ID})
repoimpl.GetIssueRepo().DeleteOne(context.Background(), bson.M{"_id": i1.ID})
repoimpl.GetIssueRepo().DeleteOne(context.Background(), bson.M{"_id": i2.ID})
repoimpl.GetIssueRepo().DeleteOne(context.Background(), bson.M{"_id": i3.ID})
repoimpl.GetIssueRepo().DeleteOne(context.Background(), bson.M{"_id": i4.ID})
repoimpl.GetIssueRepo().DeleteOne(context.Background(), bson.M{"_id": i5.ID})
repoimpl.GetIssueRepo().DeleteOne(context.Background(), bson.M{"_id": i6.ID})
})
It("Should return project not found error if project id does not exist", func() {
issues, err := issue.GetIssueService().GetIssueLists(primitive.NewObjectID(), "", issue.Init)
Expect(issues).To(BeNil())
Expect(err).To(MatchError(project.NotFoundError{}))
})
It("Should return issue list with specific status, and issue key", func() {
issues, err := issue.GetIssueService().GetIssueLists(prj.ID, "mock1", issue.Init)
Expect(err).To(BeNil())
Expect(len(issues)).To(Equal(2))
i4.UpdateStatus(issue.Resolved, "test")
issues, err = issue.GetIssueService().GetIssueLists(prj2.ID, "", issue.Init)
Expect(err).To(BeNil())
Expect(len(issues)).To(Equal(2))
issues, err = issue.GetIssueService().GetIssueLists(prj2.ID, "", -1)
Expect(err).To(BeNil())
Expect(len(issues)).To(Equal(3))
})
})
Describe("#GetIssueByID", func() {
It("should return not found error when issue id doesn't exist", func() {
i, e := issue.GetIssueService().GetIssueByID(primitive.NewObjectID())
Expect(e).To(MatchError(issue.NotFoundError{}))
Expect(i).To(BeNil())
})
It("should return the issue instance if issue exist", func() {
i0, _ := issue.GetIssueService().CreateNewIssue(prj.ID, "testService")
i, e := issue.GetIssueService().GetIssueByID(i0.ID)
Expect(e).To(BeNil())
Expect(i.ID).To(Equal(i0.ID))
repoimpl.GetIssueRepo().DeleteOne(context.Background(), bson.M{"_id": i0.ID})
})
})
})
|
// +build generic
package agrasta
func (s *State) rand() uint64 {
s.rpos--
if s.rpos < 0 {
binary.Read(s.ShakeHash, binary.LittleEndian, &s.rbuf)
s.rpos = 16
}
return s.rbuf[s.rpos]
}
|
// Copyright (c) 2012-2014 Jeremy Latt
// Copyright (c) 2014-2015 Edmund Huber
// Copyright (c) 2017 Daniel Oaks <daniel@danieloaks.net>
// released under the MIT license
package irc
import (
"fmt"
"net"
"github.com/oragono/oragono/irc/modes"
"github.com/oragono/oragono/irc/utils"
)
type webircConfig struct {
PasswordString string `yaml:"password"`
Password []byte `yaml:"password-bytes"`
Fingerprint string
Hosts []string
}
// Populate fills out our password or fingerprint.
func (wc *webircConfig) Populate() (err error) {
if wc.Fingerprint == "" && wc.PasswordString == "" {
return ErrNoFingerprintOrPassword
}
if wc.PasswordString != "" {
wc.Password, err = decodeLegacyPasswordHash(wc.PasswordString)
}
return err
}
func isGatewayAllowed(addr net.Addr, gatewaySpec string) bool {
// "localhost" includes any loopback IP or unix domain socket
if gatewaySpec == "localhost" {
return utils.AddrIsLocal(addr)
}
ip := utils.AddrToIP(addr)
if ip == nil {
return false
}
// exact IP match
if ip.String() == gatewaySpec {
return true
}
// CIDR match
_, gatewayNet, err := net.ParseCIDR(gatewaySpec)
if err != nil {
return false
}
return gatewayNet.Contains(ip)
}
// ApplyProxiedIP applies the given IP to the client.
func (client *Client) ApplyProxiedIP(proxiedIP string, tls bool) (exiting bool) {
// ensure IP is sane
parsedProxiedIP := net.ParseIP(proxiedIP)
if parsedProxiedIP == nil {
client.Quit(fmt.Sprintf(client.t("Proxied IP address is not valid: [%s]"), proxiedIP))
return true
}
isBanned, banMsg := client.server.checkBans(parsedProxiedIP)
if isBanned {
client.Quit(banMsg)
return true
}
// given IP is sane! override the client's current IP
rawHostname := utils.LookupHostname(proxiedIP)
client.stateMutex.Lock()
client.proxiedIP = parsedProxiedIP
client.rawHostname = rawHostname
client.stateMutex.Unlock()
// nickmask will be updated when the client completes registration
// set tls info
client.certfp = ""
client.SetMode(modes.TLS, tls)
return false
}
|
/*
Package crypto "Every package should have a package comment, a block comment preceding the package clause.
For multi-file packages, the package comment only needs to be present in one file, and any
one will do. The package comment should introduce the package and provide information
relevant to the package as a whole. It will appear first on the godoc page and should set
up the detailed documentation that follows."
*/
package crypto
import (
"crypto/hmac"
"crypto/sha256"
"errors"
"fmt"
"io"
"os"
)
// Errors
var (
ErrInvalidFileType = errors.New("invalid file type to encrypt")
)
// GetMAC generates and returns a hmac as a string.
func GetMAC(i interface{}) (string, error) {
mac := hmac.New(sha256.New, []byte("someKey"))
switch v := i.(type) {
case string:
io.WriteString(mac, v)
case io.Reader:
io.Copy(mac, v)
default:
return "", ErrInvalidFileType
}
return fmt.Sprintf("%x", mac.Sum(nil)), nil
}
// CheckMAC reports whether messageMAC is a valid HMAC tag for message.
func CheckMAC(message interface{}, messageMAC string) bool {
// mac := hmac.New(sha256.New, key)
// mac.Write(message)
// expectedMAC := mac.Sum(nil)
// return hmac.Equal(messageMAC, expectedMAC)
macString, _ := GetMAC(message)
expectedMAC := []byte(macString)
return hmac.Equal([]byte(messageMAC), expectedMAC)
}
// GetSha generates and returns a sha256 as a string.
func GetSha(i interface{}) (string, error) {
h := sha256.New()
switch v := i.(type) {
case string:
io.WriteString(h, v)
case io.Reader:
io.Copy(h, v)
case *os.File:
io.Copy(h, v)
default:
return "", ErrInvalidFileType
}
return fmt.Sprintf("%x", h.Sum(nil)), nil
}
|
package main
import (
"context"
"fmt"
"os"
"sort"
"github.com/google/go-github/v31/github"
"github.com/rotisserie/eris"
"github.com/solo-io/go-utils/versionutils"
"github.com/spf13/cobra"
"golang.org/x/oauth2"
)
func main() {
ctx := context.Background()
app := rootApp(ctx)
if err := app.Execute(); err != nil {
fmt.Printf("unable to run: %v\n", err)
os.Exit(1)
}
}
type options struct {
ctx context.Context
HugoDataSoloOpts HugoDataSoloOpts
}
type HugoDataSoloOpts struct {
product string
version string
// if set, will override the version when rendering the
callLatest bool
noScope bool
}
func rootApp(ctx context.Context) *cobra.Command {
opts := &options{
ctx: ctx,
}
app := &cobra.Command{
Use: "docs-util",
RunE: func(cmd *cobra.Command, args []string) error {
return nil
},
}
app.AddCommand(changelogMdFromGithubCmd(opts))
app.AddCommand(minorReleaseChangelogMdFromGithubCmd(opts))
app.PersistentFlags().StringVar(&opts.HugoDataSoloOpts.version, "version", "", "version of docs and code")
app.PersistentFlags().StringVar(&opts.HugoDataSoloOpts.product, "product", "gloo", "product to which the docs refer (defaults to gloo)")
app.PersistentFlags().BoolVar(&opts.HugoDataSoloOpts.noScope, "no-scope", false, "if set, will not nest the served docs by product or version")
app.PersistentFlags().BoolVar(&opts.HugoDataSoloOpts.callLatest, "call-latest", false, "if set, will use the string 'latest' in the scope, rather than the particular release version")
return app
}
func changelogMdFromGithubCmd(opts *options) *cobra.Command {
app := &cobra.Command{
Use: "gen-changelog-md",
Short: "generate a markdown file from Github Release pages API",
RunE: func(cmd *cobra.Command, args []string) error {
if os.Getenv(skipChangelogGeneration) != "" {
return nil
}
return generateChangelogMd(args)
},
}
return app
}
func minorReleaseChangelogMdFromGithubCmd(opts *options) *cobra.Command {
app := &cobra.Command{
Use: "gen-minor-releases-changelog-md",
Short: "generate an aggregated changelog markdown file for each minor release version",
RunE: func(cmd *cobra.Command, args []string) error {
if os.Getenv(skipChangelogGeneration) != "" {
return nil
}
return generateMinorReleaseChangelog(args)
},
}
return app
}
const (
latestVersionPath = "latest"
)
const (
glooDocGen = "gloo"
glooEDocGen = "glooe"
skipChangelogGeneration = "SKIP_CHANGELOG_GENERATION"
)
var (
InvalidInputError = func(arg string) error {
return eris.Errorf("invalid input, must provide exactly one argument, either '%v' or '%v', (provided %v)",
glooDocGen,
glooEDocGen,
arg)
}
MissingGithubTokenError = func() error {
return eris.Errorf("Must either set GITHUB_TOKEN or set %s environment variable to true", skipChangelogGeneration)
}
)
func generateChangelogMd(args []string) error {
if len(args) != 1 {
return InvalidInputError(fmt.Sprintf("%v", len(args)-1))
}
client := github.NewClient(nil)
target := args[0]
var repo string
switch target {
case glooDocGen:
repo = "gloo"
case glooEDocGen:
repo = "solo-projects"
ctx := context.Background()
if os.Getenv("GITHUB_TOKEN") == "" {
return MissingGithubTokenError()
}
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: os.Getenv("GITHUB_TOKEN")},
)
tc := oauth2.NewClient(ctx, ts)
client = github.NewClient(tc)
default:
return InvalidInputError(target)
}
allReleases, err := getAllReleases(client, repo)
if err != nil {
return err
}
for _, release := range allReleases {
fmt.Printf("### %v\n\n", *release.TagName)
fmt.Printf("%v", *release.Body)
}
return nil
}
func generateMinorReleaseChangelog(args []string) error {
if len(args) != 1 {
return InvalidInputError(fmt.Sprintf("%v", len(args)-1))
}
client := github.NewClient(nil)
target := args[0]
var repo string
switch target {
case glooDocGen:
repo = "gloo"
case glooEDocGen:
repo = "solo-projects"
ctx := context.Background()
if os.Getenv("GITHUB_TOKEN") == "" {
return MissingGithubTokenError()
}
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: os.Getenv("GITHUB_TOKEN")},
)
tc := oauth2.NewClient(ctx, ts)
client = github.NewClient(tc)
default:
return InvalidInputError(target)
}
allReleases, err := getAllReleases(client, repo)
if err != nil {
return err
}
var releaseList []*github.RepositoryRelease
for _, release := range allReleases {
releaseList = append(releaseList, release)
}
err = parseReleases(releaseList)
if err != nil {
return err
}
return nil
}
func getAllReleases(client *github.Client, repo string) ([]*github.RepositoryRelease, error) {
allReleases, _, err := client.Repositories.ListReleases(context.Background(), "solo-io", repo,
&github.ListOptions{
Page: 0,
PerPage: 10000000,
})
if err != nil {
return nil, err
}
return allReleases, nil
}
func parseReleases(releases []*github.RepositoryRelease) error {
var minorReleaseMap = make(map[Version]string)
for _, release := range releases {
var releaseTag = release.GetTagName()
version, err := versionutils.ParseVersion(releaseTag)
if err != nil {
return err
}
minorVersion := Version{
Major: version.Major,
Minor: version.Minor,
}
minorReleaseMap[minorVersion] = minorReleaseMap[minorVersion] + fmt.Sprintf("##### %v\n", version.String()) + release.GetBody()
}
var versions Versions
for minorVersion, _ := range minorReleaseMap {
versions = append(versions, minorVersion)
}
sort.Sort(versions)
for _, version := range versions {
body := minorReleaseMap[version]
fmt.Printf("### v%v.%v\n\n", version.Major, version.Minor)
fmt.Printf("%v", body)
}
return nil
}
type Version versionutils.Version
type Versions []Version
// The following functions are used to display the releases in order of release version
func (v Version) LessThan(version Version) bool {
result, _ := versionutils.Version(v).IsGreaterThanOrEqualTo(versionutils.Version(version))
return result
}
func (s Versions) Len() int {
return len(s)
}
func (s Versions) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s Versions) Less(i, j int) bool {
return s[i].LessThan(s[j])
}
|
package main
import (
"exer9"
"fmt"
"math"
)
func main() {
fmt.Println(exer9.Message)
pt := exer9.NewPoint(3, 4.5)
fmt.Println(pt) // should print (3, 4.5)
fmt.Println(pt.String() == "(3, 4.5)") // should print true
p1 := exer9.NewPoint(3, 4)
fmt.Println(p1.Norm() == 5.0)
p1.Scale(5)
fmt.Println(p1)
p2 := exer9.NewPoint(1, 0)
p2.Rotate(math.Pi / 2)
fmt.Println(p2)
p2.Rotate(math.Pi / 2)
fmt.Println(p2)
fmt.Println(exer9.RandomArray(5,10))
fmt.Println(exer9.RandomArray(5,10))
fmt.Println(exer9.RandomArray(5,10))
arr := []int{1,2,3,4,5,6,7,8,9,10}
results := make(chan int)
go exer9.GenerateSum(arr, results)
sum := <- results
fmt.Println("Sum of ", arr, " is ", sum)
chunks := 5
mean,stddev := exer9.MeanStddev(arr,chunks)
fmt.Println("MeanStddev(", arr, chunks, ") = ", mean, ", ", stddev)
arr2 := []float64{10,9,8,7,6,5,4,3,2,1}
fmt.Println(arr2)
exer9.InsertionSort(arr2)
fmt.Println(arr2)
arr3 := []float64{10,9,8,7,6,5,4,3,2,1}
fmt.Println(arr3)
exer9.QuickSort(arr3)
fmt.Println(arr3)
}
|
/*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package netutil
import (
"io/ioutil"
"log"
"net"
"strings"
"testing"
)
var _ = log.Printf
// TODO: test IPv6. probably not working.
func TestIdent4(t *testing.T) {
lip := net.ParseIP("67.218.110.129")
lport := 43436
rip := net.ParseIP("207.7.148.195")
rport := 80
// 816EDA43:A9AC C39407CF:0050
// 43436 80
uid, err := uidFromReader(lip, lport, rip, rport, ioutil.NopCloser(strings.NewReader(tcpstat4)))
if err != nil {
t.Error(err)
}
if e, g := 61652, uid; e != g {
t.Errorf("expected uid %d, got %d", e, g)
}
}
var tcpstat4 = ` sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
0: 0100007F:C204 00000000:0000 0A 00000000:00000000 00:00000000 00000000 61652 0 8722922 1 ffff880036b36180 300 0 0 2 -1
1: 0100007F:0CEA 00000000:0000 0A 00000000:00000000 00:00000000 00000000 120 0 5714729 1 ffff880036b35480 300 0 0 2 -1
2: 0100007F:2BCB 00000000:0000 0A 00000000:00000000 00:00000000 00000000 65534 0 7381 1 ffff880136370000 300 0 0 2 -1
3: 0100007F:13AD 00000000:0000 0A 00000000:00000000 00:00000000 00000000 61652 0 4846349 1 ffff880123eb5480 300 0 0 2 -1
4: 00000000:0050 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 8307 1 ffff880123eb0d00 300 0 0 2 -1
5: 00000000:0071 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 8558503 1 ffff88001a242080 300 0 0 2 -1 6: 0100007F:7533 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 8686 1 ffff880136371380 300 0 0 2 -1
7: 017AA8C0:0035 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 6015 1 ffff880123eb0680 300 0 0 2 -1
8: 0100007F:0277 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 8705543 1 ffff88001a242d80 300 0 0 2 -1
9: 816EDA43:D4DC 35E07D4A:01BB 01 00000000:00000000 02:00000E25 00000000 61652 0 8720744 2 ffff88001a243a80 346 4 24 3 2
10: 0100007F:C204 0100007F:D981 01 00000000:00000000 00:00000000 00000000 61652 0 8722934 1 ffff88006712a700 21 4 30 5 -1
11: 816EDA43:A9AC C39407CF:0050 01 00000000:00000000 00:00000000 00000000 61652 0 8754873 1 ffff88006712db00 27 0 0 3 -1
12: 816EDA43:AFEF 51357D4A:01BB 01 00000000:00000000 02:00000685 00000000 61652 0 8752937 2 ffff880136375480 87 4 2 4 -1
13: 0100007F:D981 0100007F:C204 01 00000000:00000000 00:00000000 00000000 61652 0 8722933 1 ffff880036b30d00 21 4 0 3 -1
`
|
package db_analyze
import (
"database/sql"
"encoding/csv"
"encoding/json"
"fmt"
_ "github.com/go-sql-driver/mysql"
"log"
"os"
"strconv"
"time"
)
type DbWorker struct {
//mysql data source name
Dsn string
}
func checkErr(err error) {
if err != nil {
panic(err)
}
}
type raiingTCMSUser struct {
id int64
uuid string
caseNum string // 病人ID
bedNum int
name string
sex int
birthday interface{}
model int
height int
weight int
inHospitalTime int64
pacing int
hospital interface{}
department interface{}
status int
addTime int64
addId int
lastUpdateTime int64
lastUpdateId int64
}
type raiingTCMSTempData struct {
id int
uuid string
userUuid string
time int64
timeZone int
hardwareSn string
hardwareVersion string
firmwareVersion string
b2wSn string
b2wVersion string
algorithmVersion string
appVersion string
dataVersion string
tempMin int
tempMax int
tempAvg int
tempValid int
addTime int64
lastUpdateTime int64
data string
}
//{"time":1540859820,"temp":36842,"wear_quality":81,"stable":3,"wear_stage":1,"add_time":1540859881}
type temperatureData struct {
Time int64 `json:"time"`
Temp int `json:"temp"`
WearQuality int `json:"wear_quality"`
Stable int `json:"stable"`
WearStage int `json:"wear_stage"`
AddTime int64 `json:"add_time"`
}
type raiingTCMSEventData struct {
id int
uuid string
casesId string // 病人ID
userUuid string
eventUuid string
eventType int
timezone int
startTime int64
endTime int64
createTime int64
updateTime int64
detail string
addTime int64
lastUpdateTime int64
}
// 术后统计
type PostSt struct {
sex bool // 男true,女false
sexInt int // 男true,女false
below360 bool
between375And380 bool
between380And385 bool
exceed385 bool
isHanzhan bool
isZhanwang bool
inHospitalTime int64 // 进入病房时间
}
func AnalyzePostOperationData(user, password, ip, dbName string, ch chan<- StData) {
if user == "" || password == "" || ip == "" || dbName == "" {
fmt.Println("传入的用户名等信息为空")
return
}
dbw := DbWorker{
//Dsn: "root:123456@tcp(127.0.0.1:3306)/raiing_tcms_v6_temp",
Dsn: user + ":" + password + "@tcp(" + ip + ")/" + dbName,
}
db, err := sql.Open("mysql",
dbw.Dsn)
if err != nil {
panic(err)
return
}
fmt.Println("数据库打开成功!")
defer func() {
err = db.Close()
checkErr(err)
}()
rows3, err := db.Query("SELECT * FROM " + UserTableName)
if err != nil {
log.Fatal(err)
}
var userData raiingTCMSUser
userUUIDS := make(map[string]string, 10) // 用户UUID
postST := make(map[string]*PostSt, 10)
for rows3.Next() {
err := rows3.Scan(&userData.id, &userData.uuid, &userData.caseNum, &userData.bedNum, &userData.name,
&userData.sex, &userData.birthday, &userData.model, &userData.height, &userData.weight,
&userData.inHospitalTime, &userData.pacing, &userData.hospital, &userData.department, &userData.status,
&userData.addTime, &userData.addId, &userData.lastUpdateTime, &userData.lastUpdateId)
checkErr(err)
userUUIDS[userData.uuid] = userData.caseNum
if userData.sex == 1 { // 1为男性
postST[userData.uuid] = &PostSt{sex: true, sexInt: userData.sex, inHospitalTime: userData.inHospitalTime}
} else { // 2为女性
postST[userData.uuid] = &PostSt{sex: false, sexInt: userData.sex, inHospitalTime: userData.inHospitalTime}
}
}
fmt.Println("用户UUID: ", len(userUUIDS), userUUIDS)
fmt.Println("查询时间: ", time.Now().Format("2006-01-02 15:04:05"))
//userTempST := make(chan UserTempDistribution, 20)
// 异步把温度分布写入文件
//go func() {
// saveUserTempDistribution(userTempST)
//}()
userUUIDCount := 0
for k, v := range userUUIDS {
//stmt, err := db.Prepare("SELECT * FROM " + TEMP_TABLE_NAME + "WHERE hardware_sn=?" + " ORDER BY time ASC")
// 只查询出病房的数据
rows, err := db.Query("SELECT * FROM " + TempTableName + " WHERE user_uuid =" + "\"" + k + "\"" + "AND time >" +
strconv.Itoa(int(postST[k].inHospitalTime)) + " ORDER BY time ASC")
//rows, err := stmt.Exec(k)
//if err != nil {
// log.Fatal(err)
//}
//column, err := rows.Columns()
checkErr(err)
//for _, name := range column {
// fmt.Print(name, " ")
//}
//fmt.Println()
fmt.Println("查询开始时间: ", time.Now().Format("2006-01-02 15:04:05"))
var tcmsData raiingTCMSTempData
var tempDataArray []temperatureData
var count int
var continueTime int64 // 持续时间
var below360 int64 // 低于36
var between350And360 int64 // 大于35,小于等于36
var exceed375Time int64 // 大于37.5
var between375And380 int64 // 大于37.5,小于等于38
var between380And385 int64 // 大于38,小于等于38.5
var exceed385Time int64 // 大于38.5
var maxTemperature int // 最高温度
startTime := time.Now().Unix() // 温度开始时间,
endTime := int64(0) // 温度结束时间
for rows.Next() {
err := rows.Scan(&tcmsData.id, &tcmsData.uuid, &tcmsData.userUuid, &tcmsData.time, &tcmsData.timeZone, &tcmsData.hardwareSn, &tcmsData.hardwareVersion,
&tcmsData.firmwareVersion, &tcmsData.b2wSn, &tcmsData.b2wVersion, &tcmsData.algorithmVersion, &tcmsData.appVersion, &tcmsData.dataVersion,
&tcmsData.tempMin, &tcmsData.tempMax, &tcmsData.tempAvg, &tcmsData.tempValid, &tcmsData.addTime, &tcmsData.lastUpdateTime, &tcmsData.data)
checkErr(err)
if startTime > tcmsData.time {
startTime = tcmsData.time
}
if endTime < tcmsData.time {
endTime = tcmsData.time
}
err = json.Unmarshal([]byte(tcmsData.data), &tempDataArray)
for _, data := range tempDataArray {
// 只统计稳定以后的温度
if data.Stable < 2 {
continue
}
temperatureValue := data.Temp
if temperatureValue < 36000 {
below360 += TemperatureInterval
}
if temperatureValue > 35000 && temperatureValue <= 36000 {
between350And360 += TemperatureInterval
}
if temperatureValue > 37500 {
exceed375Time += TemperatureInterval
}
if temperatureValue > 37500 && temperatureValue <= 38000 {
between375And380 += TemperatureInterval
}
if temperatureValue > 38000 && temperatureValue <= 38500 {
between380And385 += TemperatureInterval
}
if temperatureValue > 38500 {
exceed385Time += TemperatureInterval
}
continueTime += TemperatureInterval
if maxTemperature < temperatureValue {
maxTemperature = temperatureValue
}
}
//fmt.Println(tempDataArray)
checkErr(err)
count ++
}
fmt.Println("查询体温数据结束时间: ", time.Now().Format("2006-01-02 15:04:05"))
fmt.Println("数据库记录条数: ", count)
if err != nil {
panic(err)
}
// 查询事件
rows2, err := db.Query("SELECT * FROM " + EventTableName + " WHERE cases_id =" + "\"" + v + "\"")
if err != nil {
log.Fatal(err)
}
var event raiingTCMSEventData
var hangzhanCount int //寒战
var zhanwangCount int //谵妄
for rows2.Next() {
err := rows2.Scan(&event.id, &event.uuid, &event.casesId, &event.userUuid,
&event.eventUuid, &event.eventType, &event.timezone, &event.startTime,
&event.endTime, &event.createTime, &event.updateTime, &event.detail,
&event.addTime, &event.lastUpdateTime)
checkErr(err)
if event.eventType == 1018 || event.eventType == 3006 { // 出现寒战事件
hangzhanCount ++
} else if event.eventType == 1019 || event.eventType == 3009 { // 出现谵妄事件
zhanwangCount++
}
}
//userTempDistribution := UserTempDistribution{v,
// between350And360,
// below360,
// exceed375Time,
// between375And380,
// between380And385,
// exceed385Time,
// maxTemperature,
// continueTime,
// hangzhanCount,
// zhanwangCount}
//userTempST <- userTempDistribution
var post = postST[k]
stData := StData{caseID: v,
sex: post.sexInt,
between350And360PostOperation: between350And360,
below360PostOperation: below360,
exceed375TimePostOperation: exceed375Time,
between375And380PostOperation: between375And380,
between380And385PostOperation: between380And385,
exceed385TimePostOperation: exceed385Time,
maxTemperaturePostOperation: maxTemperature,
continueTimePostOperation: continueTime,
chillCountPostOperation: hangzhanCount,
deliriumCountPostOperation: zhanwangCount,
}
// 输出数据
ch <- stData
//var post = postST[k]
//if below360 > 0 {
// post.below360 = true
//}
//if between375And380 > 0 {
// post.between375And380 = true
//}
//if between380And385 > 0 {
// post.between380And385 = true
//}
//if exceed385Time > 0 {
// post.exceed385 = true
//}
//if hangzhanCount > 0 {
// post.isHanzhan = true
//}
//if zhanwangCount > 0 {
// post.isZhanwang = true
//}
userUUIDCount ++
//if userUUIDCount > 10 {
// break
//}
fmt.Println("用户个数: ", userUUIDCount, " ,查询结束时间: ", time.Now().Format("2006-01-02 15:04:05"))
}
fmt.Println("结束时间: ", time.Now().Format("2006-01-02 15:04:05"))
// 关闭channel
//close(userTempST)
// 产生统计表格
//integratedAnalyze(postST)
close(ch) // 关闭通道
}
type UserTempDistribution struct {
caseID string
between350And360 int64
below360 int64
exceed375Time int64
between375And380 int64
between380And385 int64
exceed385Time int64
maxTemperature int
continueTime int64
hangzhanCount int
zhanwangCount int
}
// 保存用户温度分布
func saveUserTempDistribution(ch chan UserTempDistribution) {
// 创建CSV文件,用于保存记录。术后的体温监测分布统计
csvFile, err := os.Create("手术后温度分布统计_" + GetTimeNow() + ".csv") //创建文件
if err != nil {
fmt.Println(err)
return
}
defer csvFile.Close()
_, _ = csvFile.WriteString("\xEF\xBB\xBF") // 写入UTF-8 BOM
w := csv.NewWriter(csvFile) //创建一个新的写入文件流
data := []string{
"病例号", // 病人ID
"35<T≤36℃时长",
"T>37.5℃时长",
"37.5<T≤38.0℃时长",
"38.0<T≤38.5℃时长",
"T>38.5℃时长",
"最高体温",
"术后总测量时长",
"是否寒战",
"是否谵妄",
}
err = w.Write(data)
if err != nil {
checkErr(err)
}
w.Flush()
for {
data, ok := <-ch
if ok {
w := csv.NewWriter(csvFile) //创建一个新的写入文件流
dataString := []string{
data.caseID, // 病人ID
fmt.Sprintf("%.2f", float64(data.between350And360)/60),
fmt.Sprintf("%.2f", float64(data.exceed375Time)/60),
fmt.Sprintf("%.2f", float64(data.between375And380)/60),
fmt.Sprintf("%.2f", float64(data.between380And385)/60),
fmt.Sprintf("%.2f", float64(data.exceed385Time)/60),
strconv.Itoa(int(data.maxTemperature)),
fmt.Sprintf("%.2f", float64(data.continueTime)/3600), // 小时
strconv.Itoa(int(data.hangzhanCount)),
strconv.Itoa(int(data.zhanwangCount)),
}
err = w.Write(dataString)
if err != nil {
checkErr(err)
}
w.Flush()
} else {
fmt.Println("从channel读取温度数据失败")
}
}
}
// 整体分析
func integratedAnalyze(postST map[string]*PostSt) {
maleCount := 0 // 男患者个数
femaleCount := 0 // 女患者个数
below360MaleCount := 0
between375And380MaleCount := 0
between380And385MaleCount := 0
exceed385MaleCount := 0
hanzhanMaleCount := 0
zhanwangMaleCount := 0
below360FemaleCount := 0
between375And380FemaleCount := 0
between380And385FemaleCount := 0
exceed385FemaleCount := 0
hanzhanFemaleCount := 0
zhanwangFemaleCount := 0
for _, v := range postST {
if v.sex {
maleCount++
if v.below360 {
below360MaleCount++
}
if v.between375And380 {
between375And380MaleCount++
}
if v.between380And385 {
between380And385MaleCount++
}
if v.exceed385 {
exceed385MaleCount++
}
if v.isHanzhan {
hanzhanMaleCount++
}
if v.isZhanwang {
zhanwangMaleCount++
}
} else {
femaleCount++
if v.below360 {
below360FemaleCount++
}
if v.between375And380 {
between375And380FemaleCount++
}
if v.between380And385 {
between380And385FemaleCount++
}
if v.exceed385 {
exceed385FemaleCount++
}
if v.isHanzhan {
hanzhanFemaleCount++
}
if v.isZhanwang {
zhanwangFemaleCount++
}
}
}
fmt.Println("男个数: ", maleCount,
"低于360个数: ", below360MaleCount,
"大于375小于380个数: ", between375And380MaleCount,
"大于380小于385个数: ", between380And385MaleCount,
"超过385个数: ", exceed385MaleCount,
"寒战个数: ", hanzhanMaleCount,
"谵妄个数: ", zhanwangMaleCount)
fmt.Println("女个数: ", femaleCount,
"低于360个数: ", below360FemaleCount,
"大于375小于380个数: ", between375And380FemaleCount,
"大于380小于385个数: ", between380And385FemaleCount,
"超过385个数: ", exceed385FemaleCount,
"寒战个数: ", hanzhanFemaleCount,
"谵妄个数: ", zhanwangFemaleCount)
// 创建CSV文件,用于保存记录, 术后的概率统计
csvFile1, err := os.Create("手术后温度概率统计_" + GetTimeNow() + ".csv") //创建文件
if err != nil {
fmt.Println(err)
return
}
defer csvFile1.Close()
_, _ = csvFile1.WriteString("\xEF\xBB\xBF") // 写入UTF-8 BOM
w1 := csv.NewWriter(csvFile1) //创建一个新的写入文件流
data1 := [][]string{
{"性别",
"低于36℃发生率",
"37.5-38.0℃发生率",
"38.0-38.5℃发生率",
"高于38.5℃发生率",
"寒战发生率",
"谵妄发生率",},
{
"男",
fmt.Sprintf("%.2f", float64(below360MaleCount)/float64(maleCount)),
fmt.Sprintf("%.2f", float64(between375And380MaleCount)/float64(maleCount)),
fmt.Sprintf("%.2f", float64(between380And385MaleCount)/float64(maleCount)),
fmt.Sprintf("%.2f", float64(exceed385MaleCount)/float64(maleCount)),
fmt.Sprintf("%.2f", float64(hanzhanMaleCount)/float64(maleCount)),
fmt.Sprintf("%.2f", float64(zhanwangMaleCount)/float64(maleCount)),
},
{
"女",
fmt.Sprintf("%.2f", float64(below360FemaleCount)/float64(femaleCount)),
fmt.Sprintf("%.2f", float64(between375And380FemaleCount)/float64(femaleCount)),
fmt.Sprintf("%.2f", float64(between380And385FemaleCount)/float64(femaleCount)),
fmt.Sprintf("%.2f", float64(exceed385FemaleCount)/float64(femaleCount)),
fmt.Sprintf("%.2f", float64(hanzhanFemaleCount)/float64(femaleCount)),
fmt.Sprintf("%.2f", float64(zhanwangFemaleCount)/float64(femaleCount)),
},
{
"全部",
fmt.Sprintf("%.2f", float64(below360MaleCount)/float64(maleCount)+float64(below360FemaleCount)/float64(femaleCount)),
fmt.Sprintf("%.2f", float64(between375And380MaleCount)/float64(maleCount)+float64(between375And380FemaleCount)/float64(femaleCount)),
fmt.Sprintf("%.2f", float64(between380And385MaleCount)/float64(maleCount)+float64(between380And385FemaleCount)/float64(femaleCount)),
fmt.Sprintf("%.2f", float64(exceed385MaleCount)/float64(maleCount)+float64(exceed385FemaleCount)/float64(femaleCount)),
fmt.Sprintf("%.2f", float64(hanzhanMaleCount)/float64(maleCount)+float64(hanzhanFemaleCount)/float64(femaleCount)),
fmt.Sprintf("%.2f", float64(zhanwangMaleCount)/float64(maleCount)+float64(zhanwangFemaleCount)/float64(femaleCount)),
},
}
err = w1.WriteAll(data1)
if err != nil {
checkErr(err)
}
w1.Flush()
}
|
//
// Copyright (C) 2019-2021 vdaas.org vald team <vald@vdaas.org>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package sidecar
import (
context "context"
fmt "fmt"
math "math"
_ "github.com/danielvladco/go-proto-gql/pb"
proto "github.com/gogo/protobuf/proto"
_ "github.com/vdaas/vald/apis/grpc/payload"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func init() { proto.RegisterFile("sidecar/sidecar.proto", fileDescriptor_a79f12d5eccb8a6a) }
var fileDescriptor_a79f12d5eccb8a6a = []byte{
// 188 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2d, 0xce, 0x4c, 0x49,
0x4d, 0x4e, 0x2c, 0xd2, 0x87, 0xd2, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xec, 0x50, 0xae,
0x14, 0x6f, 0x41, 0x62, 0x65, 0x4e, 0x7e, 0x62, 0x0a, 0x44, 0x5c, 0x4a, 0x26, 0x3d, 0x3f, 0x3f,
0x3d, 0x27, 0x55, 0x3f, 0xb1, 0x20, 0x53, 0x3f, 0x31, 0x2f, 0x2f, 0xbf, 0x24, 0xb1, 0x24, 0x33,
0x3f, 0xaf, 0x18, 0x2a, 0xcb, 0x53, 0x90, 0xa4, 0x9f, 0x5e, 0x98, 0x03, 0xe1, 0x19, 0x71, 0x72,
0xb1, 0x07, 0x43, 0x4c, 0x71, 0xca, 0x3d, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07,
0x8f, 0xe4, 0x18, 0xb9, 0x64, 0xf2, 0x8b, 0xd2, 0xf5, 0xca, 0x52, 0x12, 0x13, 0x8b, 0xf5, 0xca,
0x12, 0x73, 0x52, 0xf4, 0x12, 0xd3, 0x53, 0xf3, 0x4a, 0xf4, 0xa0, 0x36, 0x3a, 0x09, 0x84, 0x25,
0xe6, 0xa4, 0x38, 0x82, 0x84, 0xa0, 0xba, 0x03, 0x18, 0xa3, 0x74, 0xd3, 0x33, 0x4b, 0x32, 0x4a,
0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xc1, 0x1a, 0xf5, 0x41, 0x1a, 0x41, 0xae, 0x28, 0xd6, 0x4f,
0x2f, 0x2a, 0x48, 0xd6, 0x07, 0x1b, 0x01, 0xf3, 0x43, 0x12, 0x1b, 0xd8, 0x01, 0xc6, 0x80, 0x00,
0x00, 0x00, 0xff, 0xff, 0x1c, 0x2f, 0x76, 0x1a, 0xdd, 0x00, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// SidecarClient is the client API for Sidecar service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type SidecarClient interface {
}
type sidecarClient struct {
cc *grpc.ClientConn
}
func NewSidecarClient(cc *grpc.ClientConn) SidecarClient {
return &sidecarClient{cc}
}
// SidecarServer is the server API for Sidecar service.
type SidecarServer interface {
}
// UnimplementedSidecarServer can be embedded to have forward compatible implementations.
type UnimplementedSidecarServer struct {
}
func RegisterSidecarServer(s *grpc.Server, srv SidecarServer) {
s.RegisterService(&_Sidecar_serviceDesc, srv)
}
var _Sidecar_serviceDesc = grpc.ServiceDesc{
ServiceName: "sidecar.Sidecar",
HandlerType: (*SidecarServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{},
Metadata: "sidecar/sidecar.proto",
}
|
package group
import "titan-auth/types"
// 创建组织请求
type CreateGroupReq struct {
Name string `json:"name"`
Parent string `json:"parent_id"`
Description string `json:"description"`
}
// 创建组织响应
type CreateGroupResp struct {
types.Group
}
// 查询所有组织请求
type QueryGroupsReq struct {
}
// 查询所有组织响应
type QueryGroupsResp struct {
Groups []types.Group `json:"groups,omitempty"`
}
// 更新组织请求
type UpdateGroupReq struct {
Name string `json:"name,omptempty"`
Description string `json:"description,omptempty"`
}
// 添加组织成员请求
type AddGroupUsersReq struct {
Users []struct {
UserID string `json:"user_id"`
IsAdmin bool `json:"is_admin"`
} `json:"users,omitempty"`
}
// 添加组织成员响应
type AddGroupUsersResp struct {
}
type QueryGroupUsersReq struct {
Group string `json:"group"`
}
type QueryGroupUsersResp struct {
Users []types.GroupUser `json:"users,omitempty"`
}
type DelGroupUserReq struct {
Group string `json:"group"`
User string `json:"user"`
}
|
package main
import (
"flag"
"fmt"
"net"
"strings"
"syscall"
"github.com/syossan27/tebata"
)
var (
address string
protocol string
ports string
)
func listen(address, port, protocol string, t *tebata.Tebata) error {
listenAddress := address + ":" + port
fmt.Println(listenAddress, protocol)
lis, err := net.Listen(protocol, listenAddress)
if err != nil {
fmt.Println(err)
return err
}
defer lis.Close()
t.Reserve(lis.Close)
for {
conn, err := lis.Accept()
if err != nil {
fmt.Println(err)
return err
}
fmt.Printf("from: %v to: %s\n", conn.RemoteAddr(), port)
}
}
func main() {
flag.StringVar(&address, "address", "0.0.0.0", "Address to listen on")
flag.StringVar(&ports, "ports", "8121,8122,8123", "Ports to listen on, comma separated, no spaces.")
flag.StringVar(&protocol, "protocol", "tcp", "Protocol use")
flag.Parse()
t := tebata.New(syscall.SIGINT, syscall.SIGTERM)
splitPorts := strings.Split(ports, ",")
lastPort := splitPorts[len(splitPorts)-1]
splitPorts = splitPorts[:len(splitPorts)-1]
for _, port := range splitPorts {
go func(port string) {
if err := listen(address, port, protocol, t); err != nil {
fmt.Println("Error: ", err)
}
}(port)
//go listen(address, port, protocol, t)
}
if err := listen(address, lastPort, protocol, t); err != nil {
fmt.Println(err)
}
}
|
//go:build browsertest
package checkbox_test
import (
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/shurcooL/frontend/checkbox"
"github.com/shurcooL/go/gopherjs_http"
"github.com/shurcooL/go/open"
"github.com/shurcooL/httpfs/httputil"
)
func Test(t *testing.T) {
http.Handle("/script.js", httputil.FileHandler{gopherjs_http.Package("github.com/shurcooL/frontend/checkbox")})
{
defaultValue := false
queryParameter := "some-optional-thing"
http.HandleFunc("/index.html", func(w http.ResponseWriter, req *http.Request) {
query := req.URL.Query()
checkboxHtml := checkbox.New(defaultValue, query, queryParameter)
io.WriteString(w, `<html><head><script type="text/javascript" src="/script.js"></script></head><body>`+string(checkboxHtml)+"</body></html>")
})
}
ts := httptest.NewServer(nil)
defer ts.Close()
open.Open(ts.URL + "/index.html")
select {}
}
|
package 一维子序列问题
// dp[i] 表示: 以nums[i]结尾的最长递增子序列长度
// count[i]表示: 以nums[i]结尾的最长递增子序列个数
func findNumberOfLIS(nums []int) int {
dp := [2005]int{}
count := make(map[int]int, len(nums)+5)
for i := 0; i < len(nums); i++ {
dp[i] = 1 // 注意初始化
count[i] = 1 // 注意初始化
for t := 0; t < i; t++ {
if nums[i] > nums[t] {
if dp[t]+1 == dp[i] {
count[i] += count[t]
} else {
if dp[t]+1 > dp[i] {
dp[i] = dp[t] + 1
count[i] = count[t]
}
}
}
}
}
maxLength := 0 // nums中最长递增子序列长度
for i:=0;i<len(dp);i++{
maxLength = max(maxLength, dp[i])
}
ans := 0 // nums中最长递增子序列个数
for i := 0; i < len(nums); i++ {
if dp[i] == maxLength {
ans += count[i]
}
}
return ans
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
/*
题目链接:
https://leetcode-cn.com/problems/number-of-longest-increasing-subsequence/ 最长递增子序列的个数
*/
/*
总结
1. 之前就在纠结,当nums[i]==nums[t]时是否要拓展,纠结好久后就去看答案了,发现等于时不用拓展。
只在nums[i] > nums[t]时拓展。
*/ |
// Copyright 2019-2023 The sakuracloud_exporter Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collector
import (
"context"
"fmt"
"log/slog"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/sacloud/iaas-api-go"
"github.com/sacloud/iaas-api-go/types"
"github.com/sacloud/packages-go/newsfeed"
"github.com/sacloud/sakuracloud_exporter/platform"
)
// VPCRouterCollector collects metrics about all servers.
type VPCRouterCollector struct {
ctx context.Context
logger *slog.Logger
errors *prometheus.CounterVec
client platform.VPCRouterClient
Up *prometheus.Desc
SessionCount *prometheus.Desc
VPCRouterInfo *prometheus.Desc
Receive *prometheus.Desc
Send *prometheus.Desc
CPUTime *prometheus.Desc
DHCPLeaseCount *prometheus.Desc
L2TPSessionCount *prometheus.Desc
PPTPSessionCount *prometheus.Desc
SiteToSitePeerStatus *prometheus.Desc
SessionAnalysis *prometheus.Desc
MaintenanceScheduled *prometheus.Desc
MaintenanceInfo *prometheus.Desc
MaintenanceStartTime *prometheus.Desc
MaintenanceEndTime *prometheus.Desc
}
// NewVPCRouterCollector returns a new VPCRouterCollector.
func NewVPCRouterCollector(ctx context.Context, logger *slog.Logger, errors *prometheus.CounterVec, client platform.VPCRouterClient) *VPCRouterCollector {
errors.WithLabelValues("vpc_router").Add(0)
vpcRouterLabels := []string{"id", "name", "zone"}
vpcRouterInfoLabels := append(vpcRouterLabels, "plan", "ha", "vrid", "vip", "ipaddress1", "ipaddress2", "nw_mask_len", "internet_connection", "tags", "description")
nicLabels := append(vpcRouterLabels, "nic_index", "vip", "ipaddress1", "ipaddress2", "nw_mask_len")
s2sPeerLabels := append(vpcRouterLabels, "peer_address", "peer_index")
sessionAnalysisLabels := append(vpcRouterLabels, "type", "label")
return &VPCRouterCollector{
ctx: ctx,
logger: logger,
errors: errors,
client: client,
Up: prometheus.NewDesc(
"sakuracloud_vpc_router_up",
"If 1 the vpc_router is up and running, 0 otherwise",
vpcRouterLabels, nil,
),
SessionCount: prometheus.NewDesc(
"sakuracloud_vpc_router_session",
"Current session count",
vpcRouterLabels, nil,
),
VPCRouterInfo: prometheus.NewDesc(
"sakuracloud_vpc_router_info",
"A metric with a constant '1' value labeled by vpc_router information",
vpcRouterInfoLabels, nil,
),
CPUTime: prometheus.NewDesc(
"sakuracloud_vpc_router_cpu_time",
"VPCRouter's CPU time(unit: ms)",
vpcRouterLabels, nil,
),
DHCPLeaseCount: prometheus.NewDesc(
"sakuracloud_vpc_router_dhcp_lease",
"Current DHCPServer lease count",
vpcRouterLabels, nil,
),
L2TPSessionCount: prometheus.NewDesc(
"sakuracloud_vpc_router_l2tp_session",
"Current L2TP-IPsec session count",
vpcRouterLabels, nil,
),
PPTPSessionCount: prometheus.NewDesc(
"sakuracloud_vpc_router_pptp_session",
"Current PPTP session count",
vpcRouterLabels, nil,
),
SiteToSitePeerStatus: prometheus.NewDesc(
"sakuracloud_vpc_router_s2s_peer_up",
"If 1 the vpc_router's site to site peer is up, 0 otherwise",
s2sPeerLabels, nil,
),
Receive: prometheus.NewDesc(
"sakuracloud_vpc_router_receive",
"VPCRouter's receive bytes(unit: Kbps)",
nicLabels, nil,
),
Send: prometheus.NewDesc(
"sakuracloud_vpc_router_send",
"VPCRouter's receive bytes(unit: Kbps)",
nicLabels, nil,
),
SessionAnalysis: prometheus.NewDesc(
"sakuracloud_vpc_router_session_analysis",
"Session statistics for VPC routers",
sessionAnalysisLabels, nil,
),
MaintenanceScheduled: prometheus.NewDesc(
"sakuracloud_vpc_router_maintenance_scheduled",
"If 1 the vpc router has scheduled maintenance info, 0 otherwise",
vpcRouterLabels, nil,
),
MaintenanceInfo: prometheus.NewDesc(
"sakuracloud_vpc_router_maintenance_info",
"A metric with a constant '1' value labeled by maintenance information",
append(vpcRouterLabels, "info_url", "info_title", "description", "start_date", "end_date"), nil,
),
MaintenanceStartTime: prometheus.NewDesc(
"sakuracloud_vpc_router_maintenance_start",
"Scheduled maintenance start time in seconds since epoch (1970)",
vpcRouterLabels, nil,
),
MaintenanceEndTime: prometheus.NewDesc(
"sakuracloud_vpc_router_maintenance_end",
"Scheduled maintenance end time in seconds since epoch (1970)",
vpcRouterLabels, nil,
),
}
}
// Describe sends the super-set of all possible descriptors of metrics
// collected by this Collector.
func (c *VPCRouterCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- c.Up
ch <- c.VPCRouterInfo
ch <- c.CPUTime
ch <- c.SessionCount
ch <- c.DHCPLeaseCount
ch <- c.L2TPSessionCount
ch <- c.PPTPSessionCount
ch <- c.SiteToSitePeerStatus
ch <- c.Receive
ch <- c.Send
ch <- c.SessionAnalysis
ch <- c.MaintenanceScheduled
ch <- c.MaintenanceInfo
ch <- c.MaintenanceStartTime
ch <- c.MaintenanceEndTime
}
// Collect is called by the Prometheus registry when collecting metrics.
func (c *VPCRouterCollector) Collect(ch chan<- prometheus.Metric) {
vpcRouters, err := c.client.Find(c.ctx)
if err != nil {
c.errors.WithLabelValues("vpc_router").Add(1)
c.logger.Warn(
"can't list vpc routers",
slog.Any("err", err),
)
}
var wg sync.WaitGroup
wg.Add(len(vpcRouters))
for i := range vpcRouters {
func(vpcRouter *platform.VPCRouter) {
defer wg.Done()
vpcRouterLabels := c.vpcRouterLabels(vpcRouter)
var up float64
if vpcRouter.InstanceStatus.IsUp() {
up = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.Up,
prometheus.GaugeValue,
up,
vpcRouterLabels...,
)
ch <- prometheus.MustNewConstMetric(
c.VPCRouterInfo,
prometheus.GaugeValue,
float64(1.0),
c.vpcRouterInfoLabels(vpcRouter)...,
)
if vpcRouter.Availability.IsAvailable() && vpcRouter.InstanceStatus.IsUp() {
// collect metrics per resources under server
now := time.Now()
// CPU-TIME
wg.Add(1)
go func() {
c.collectCPUTime(ch, vpcRouter, now)
wg.Done()
}()
if len(vpcRouter.Interfaces) > 0 {
wg.Add(1)
go func() {
defer wg.Done()
status, err := c.client.Status(c.ctx, vpcRouter.ZoneName, vpcRouter.ID)
if err != nil {
c.errors.WithLabelValues("vpc_router").Add(1)
c.logger.Warn(
"can't fetch vpc_router's status",
slog.Any("err", err),
)
return
}
if status == nil {
return
}
// Session Count
ch <- prometheus.MustNewConstMetric(
c.SessionCount,
prometheus.GaugeValue,
float64(status.SessionCount),
c.vpcRouterLabels(vpcRouter)...,
)
// DHCP Server Leases
ch <- prometheus.MustNewConstMetric(
c.DHCPLeaseCount,
prometheus.GaugeValue,
float64(len(status.DHCPServerLeases)),
c.vpcRouterLabels(vpcRouter)...,
)
// L2TP/IPsec Sessions
ch <- prometheus.MustNewConstMetric(
c.L2TPSessionCount,
prometheus.GaugeValue,
float64(len(status.L2TPIPsecServerSessions)),
c.vpcRouterLabels(vpcRouter)...,
)
// PPTP Sessions
ch <- prometheus.MustNewConstMetric(
c.PPTPSessionCount,
prometheus.GaugeValue,
float64(len(status.PPTPServerSessions)),
c.vpcRouterLabels(vpcRouter)...,
)
// Site to Site Peer
for i, peer := range status.SiteToSiteIPsecVPNPeers {
up := float64(0)
if strings.ToLower(peer.Status) == "up" {
up = float64(1)
}
labels := append(c.vpcRouterLabels(vpcRouter),
peer.Peer,
fmt.Sprintf("%d", i),
)
ch <- prometheus.MustNewConstMetric(
c.SiteToSitePeerStatus,
prometheus.GaugeValue,
up,
labels...,
)
}
if status.SessionAnalysis != nil {
sessionAnalysis := map[string][]*iaas.VPCRouterStatisticsValue{
"SourceAndDestination": status.SessionAnalysis.SourceAndDestination,
"DestinationAddress": status.SessionAnalysis.DestinationAddress,
"DestinationPort": status.SessionAnalysis.DestinationPort,
"SourceAddress": status.SessionAnalysis.SourceAddress,
}
for typeName, analysis := range sessionAnalysis {
for _, v := range analysis {
labels := append(c.vpcRouterLabels(vpcRouter), typeName, v.Name)
ch <- prometheus.MustNewConstMetric(
c.SessionAnalysis,
prometheus.GaugeValue,
float64(v.Count),
labels...,
)
}
}
}
}()
// collect metrics
for _, nic := range vpcRouter.Interfaces {
// NIC(Receive/Send)
wg.Add(1)
go func(nic *iaas.VPCRouterInterface) {
c.collectNICMetrics(ch, vpcRouter, nic.Index, now)
wg.Done()
}(nic)
}
}
// maintenance info
var maintenanceScheduled float64
if vpcRouter.InstanceHostInfoURL != "" {
maintenanceScheduled = 1.0
wg.Add(1)
go func() {
c.collectMaintenanceInfo(ch, vpcRouter)
wg.Done()
}()
}
ch <- prometheus.MustNewConstMetric(
c.MaintenanceScheduled,
prometheus.GaugeValue,
maintenanceScheduled,
vpcRouterLabels...,
)
}
}(vpcRouters[i])
}
wg.Wait()
}
func (c *VPCRouterCollector) vpcRouterLabels(vpcRouter *platform.VPCRouter) []string {
return []string{
vpcRouter.ID.String(),
vpcRouter.Name,
vpcRouter.ZoneName,
}
}
var vpcRouterPlanMapping = map[types.ID]string{
types.VPCRouterPlans.Standard: "standard",
types.VPCRouterPlans.Premium: "premium",
types.VPCRouterPlans.HighSpec: "highspec",
}
func (c *VPCRouterCollector) vpcRouterInfoLabels(vpcRouter *platform.VPCRouter) []string {
labels := c.vpcRouterLabels(vpcRouter)
isHA := "0"
if vpcRouter.PlanID != types.VPCRouterPlans.Standard {
isHA = "1"
}
internetConn := "0"
if vpcRouter.Settings.InternetConnectionEnabled {
internetConn = "1"
}
vrid := vpcRouter.Settings.VRID
strVRID := fmt.Sprintf("%d", vrid)
if vrid < 0 {
strVRID = ""
}
var vip, ipaddress1, ipaddress2 string
var nwMaskLen = "-"
if nicSetting := findVPCRouterInterfaceSettingByIndex(vpcRouter.Settings.Interfaces, 0); nicSetting != nil {
vip = nicSetting.VirtualIPAddress
if len(nicSetting.IPAddress) > 0 {
ipaddress1 = nicSetting.IPAddress[0]
}
if len(nicSetting.IPAddress) > 1 {
ipaddress2 = nicSetting.IPAddress[1]
}
nwMaskLen = fmt.Sprintf("%d", nicSetting.NetworkMaskLen)
}
return append(labels,
vpcRouterPlanMapping[vpcRouter.GetPlanID()],
isHA,
strVRID,
vip,
ipaddress1,
ipaddress2,
nwMaskLen,
internetConn,
flattenStringSlice(vpcRouter.Tags),
vpcRouter.Description,
)
}
func findVPCRouterInterfaceSettingByIndex(settings []*iaas.VPCRouterInterfaceSetting, index int) *iaas.VPCRouterInterfaceSetting {
for _, s := range settings {
if s.Index == index {
return s
}
}
return nil
}
func getInterfaceByIndex(interfaces []*iaas.VPCRouterInterfaceSetting, index int) *iaas.VPCRouterInterfaceSetting {
for _, nic := range interfaces {
if nic.Index == index {
return nic
}
}
return nil
}
func (c *VPCRouterCollector) nicLabels(vpcRouter *platform.VPCRouter, index int) []string {
if len(vpcRouter.Interfaces) <= index {
return nil
}
var vip, ipaddress1, ipaddress2 string
nwMaskLen := ""
labels := c.vpcRouterLabels(vpcRouter)
nic := getInterfaceByIndex(vpcRouter.Settings.Interfaces, index)
if nic != nil {
vip = nic.VirtualIPAddress
if len(nic.IPAddress) > 0 {
ipaddress1 = nic.IPAddress[0]
}
if len(nic.IPAddress) > 1 {
ipaddress2 = nic.IPAddress[1]
}
nwMaskLen = fmt.Sprintf("%d", nic.NetworkMaskLen)
}
return append(labels,
fmt.Sprintf("%d", index),
vip,
ipaddress1,
ipaddress2,
nwMaskLen,
)
}
func (c *VPCRouterCollector) collectNICMetrics(ch chan<- prometheus.Metric, vpcRouter *platform.VPCRouter, index int, now time.Time) {
values, err := c.client.MonitorNIC(c.ctx, vpcRouter.ZoneName, vpcRouter.ID, index, now)
if err != nil {
c.errors.WithLabelValues("vpc_router").Add(1)
c.logger.Warn(
fmt.Sprintf("can't get vpc_router's receive bytes: ID=%d, NICIndex=%d", vpcRouter.ID, index),
slog.Any("err", err),
)
return
}
if values == nil {
return
}
receive := values.Receive
if receive > 0 {
receive = receive * 8 / 1000
}
m := prometheus.MustNewConstMetric(
c.Receive,
prometheus.GaugeValue,
receive,
c.nicLabels(vpcRouter, index)...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
send := values.Send
if send > 0 {
send = send * 8 / 1000
}
m = prometheus.MustNewConstMetric(
c.Send,
prometheus.GaugeValue,
send,
c.nicLabels(vpcRouter, index)...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
}
func (c *VPCRouterCollector) collectCPUTime(ch chan<- prometheus.Metric, vpcRouter *platform.VPCRouter, now time.Time) {
values, err := c.client.MonitorCPU(c.ctx, vpcRouter.ZoneName, vpcRouter.ID, now)
if err != nil {
c.errors.WithLabelValues("server").Add(1)
c.logger.Warn(
fmt.Sprintf("can't get server's CPU-TIME: ID=%d", vpcRouter.ID),
slog.Any("err", err),
)
return
}
if values == nil {
return
}
m := prometheus.MustNewConstMetric(
c.CPUTime,
prometheus.GaugeValue,
values.CPUTime*1000,
c.vpcRouterLabels(vpcRouter)...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
}
func (c *VPCRouterCollector) maintenanceInfoLabels(resource *platform.VPCRouter, info *newsfeed.FeedItem) []string {
labels := c.vpcRouterLabels(resource)
return append(labels,
info.URL,
info.Title,
info.Description,
fmt.Sprintf("%d", info.EventStart().Unix()),
fmt.Sprintf("%d", info.EventEnd().Unix()),
)
}
func (c *VPCRouterCollector) collectMaintenanceInfo(ch chan<- prometheus.Metric, resource *platform.VPCRouter) {
if resource.InstanceHostInfoURL == "" {
return
}
info, err := c.client.MaintenanceInfo(resource.InstanceHostInfoURL)
if err != nil {
c.errors.WithLabelValues("vpc_router").Add(1)
c.logger.Warn(
fmt.Sprintf("can't get vpc router's maintenance info: ID=%d", resource.ID),
slog.Any("err", err),
)
return
}
infoLabels := c.maintenanceInfoLabels(resource, info)
// info
ch <- prometheus.MustNewConstMetric(
c.MaintenanceInfo,
prometheus.GaugeValue,
1.0,
infoLabels...,
)
// start
ch <- prometheus.MustNewConstMetric(
c.MaintenanceStartTime,
prometheus.GaugeValue,
float64(info.EventStart().Unix()),
c.vpcRouterLabels(resource)...,
)
// end
ch <- prometheus.MustNewConstMetric(
c.MaintenanceEndTime,
prometheus.GaugeValue,
float64(info.EventEnd().Unix()),
c.vpcRouterLabels(resource)...,
)
}
|
package connector
import (
"sync"
"github.com/mayflower/docker-ls/lib/auth"
)
type tokenCache struct {
entries map[string]auth.Token
mutex sync.RWMutex
}
func (t *tokenCache) Get(hint string) (token auth.Token) {
t.mutex.RLock()
if value, cached := t.entries[hint]; cached {
token = value
}
t.mutex.RUnlock()
return
}
func (t *tokenCache) Set(hint string, token auth.Token) {
t.mutex.Lock()
t.entries[hint] = token
t.mutex.Unlock()
}
func newTokenCache() *tokenCache {
return &tokenCache{
entries: make(map[string]auth.Token),
}
}
|
/*
Copyright 2018 Mark DeNeve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"flag"
"fmt"
"net/http"
"os"
"runtime"
)
var (
buf bytes.Buffer
myHostname, _ = os.Hostname()
myOS = runtime.GOOS
)
func init() {
flag.Parse()
}
func rootHandler(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`<html>`)) // Begin html block
w.Write([]byte(`<head>
<title>Simple Hello World API</title>
<style>
label{
display:inline-block;
width:75px;
}
form label {
margin: 10px;
}
form input {
margin: 10px;
}
</style>
</head><body>`)) // Head Begin Body
w.Write([]byte(fmt.Sprintf("<h3>Hello from: %s operating system</h3>", myOS)))
w.Write([]byte(fmt.Sprintf("<h3>My hostname: %s</h3>", myHostname)))
w.Write([]byte(`</body></html>`)) //END
}
func main() {
fmt.Println("Starting App")
http.HandleFunc("/", rootHandler)
listenPort := ":8080"
fmt.Printf("Listening on port: %v\n", listenPort)
http.ListenAndServe(listenPort, nil)
}
|
package alg
import (
"algutil"
"math/rand"
"testing"
)
func TestHuffmanCode1(t *testing.T) {
input1 := make([]*HuffmanCodeInput, 0)
a := new(HuffmanCodeInput)
a.count = 45
a.Value = 'a'
input1 = append(input1, a)
input1[0] = a
b := new(HuffmanCodeInput)
b.count = 13
b.Value = 'b'
input1 = append(input1, b)
c := new(HuffmanCodeInput)
c.count = 12
c.Value = 'c'
input1 = append(input1, c)
d := new(HuffmanCodeInput)
d.count = 16
d.Value = 'd'
input1 = append(input1, d)
e := new(HuffmanCodeInput)
e.count = 9
e.Value = 'e'
input1 = append(input1, e)
f := new(HuffmanCodeInput)
f.count = 5
f.Value = 'f'
input1 = append(input1, f)
output := GenHuffmanCode(input1)
//answer a = 0, c = 100, b = 101, f = 1100, e = 1101, d = 111
for i, v1 := range output {
for j, v2 := range output {
v1Count := 0
v2Count := 0
for _, v3 := range input1 {
if v3.Value == v1.Value {
v1Count = v3.count
} else if v3.Value == v2.Value {
v2Count = v3.count
}
}
if i != j {
if v1Count > v2Count {
algutil.AssertTrue(t, len(v1.code) <= len(v2.code))
} else if v2Count > v1Count {
algutil.AssertTrue(t, len(v2.code) <= len(v1.code))
}
if len(v1.code) <= len(v2.code) {
algutil.AssertTrue(t, v1.code != v2.code[:len(v1.code)])
} else {
algutil.AssertTrue(t, v2.code != v1.code[:len(v2.code)])
}
}
}
}
}
func TestHuffmanCode2(t *testing.T) {
for ti := 0; ti < 100; ti++ {
input := make([]*HuffmanCodeInput, 0)
for i := 0; i < 26; i++ {
a := new(HuffmanCodeInput)
a.count = rand.Int() % 20
a.Value = 'a' + i
input = append(input, a)
}
output := GenHuffmanCode(input)
for i, v1 := range output {
for j, v2 := range output {
v1Count := 0
v2Count := 0
for _, v3 := range input {
if v3.Value == v1.Value {
v1Count = v3.count
} else if v3.Value == v2.Value {
v2Count = v3.count
}
}
if i != j {
if v1Count > v2Count {
algutil.AssertTrue(t, len(v1.code) <= len(v2.code))
} else if v2Count > v1Count {
algutil.AssertTrue(t, len(v2.code) <= len(v1.code))
}
if len(v1.code) <= len(v2.code) {
algutil.AssertTrue(t, v1.code != v2.code[:len(v1.code)])
} else {
algutil.AssertTrue(t, v2.code != v1.code[:len(v2.code)])
}
}
}
}
}
}
|
package skipList
import (
"math/rand"
"strconv"
"testing"
"time"
)
func BenchmarkSkipList_Insert_Ordered(b *testing.B) {
skipList := NewSkipList(10)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
t := rand.New(rand.NewSource(time.Now().UnixNano())).Intn(100000)
_ = Hash([]byte(strconv.Itoa(t)))
skipList.Insert(uint64(i), i)
}
}
func BenchmarkSkipList_Insert_Randomly(b *testing.B) {
skipList := NewSkipList(10)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
t := rand.New(rand.NewSource(time.Now().UnixNano())).Intn(100000)
index := Hash([]byte(strconv.Itoa(t)))
skipList.Insert(index, i)
}
}
func BenchmarkSkipList_Search_100000Elements(b *testing.B) {
skipList := NewSkipList(32)
for i := 0; i < 100000; i++ {
skipList.Insert(uint64(i), i)
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
t := rand.Intn(100000)
index := Hash([]byte(strconv.Itoa(t)))
skipList.Search(index)
}
}
func BenchmarkSkipList_Search_1000000Elements(b *testing.B) {
skipList := NewSkipList(32)
for i := 0; i < 1000000; i++ {
skipList.Insert(uint64(i), i)
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
t := rand.Intn(1000000)
index := Hash([]byte(strconv.Itoa(t)))
skipList.Search(index)
}
}
func BenchmarkSkipList_Search_10000000Elements(b *testing.B) {
skipList := NewSkipList(32)
for i := 0; i < 10000000; i++ {
skipList.Insert(uint64(i), i)
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
t := rand.Intn(10000000)
index := Hash([]byte(strconv.Itoa(t)))
skipList.Search(index)
}
}
func BenchmarkSkipList_Search_12Level(b *testing.B) {
skipList := NewSkipList(12)
for i := 0; i < 10000000; i++ {
skipList.Insert(uint64(i), i)
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
t := rand.Intn(10000000)
index := Hash([]byte(strconv.Itoa(t)))
skipList.Search(index)
}
}
func BenchmarkSkipList_Search_24Level(b *testing.B) {
skipList := NewSkipList(24)
for i := 0; i < 10000000; i++ {
skipList.Insert(uint64(i), i)
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
t := rand.Intn(10000000)
index := Hash([]byte(strconv.Itoa(t)))
skipList.Search(index)
}
}
|
package cmd
import (
"fmt"
"os"
"strings"
"github.com/mattn/go-pipeline"
"github.com/urfave/cli"
)
func Commit(c *cli.Context) error {
cmd, opts := getCmdOpts()
texts, err := filterMsgs(cmd, opts)
if err != nil {
fmt.Println(err)
return err
}
if len(texts) == 0 {
return nil
}
if len(texts) != 1 {
fmt.Println("Can only select 1 message")
os.Exit(1)
}
text := strings.Replace(texts[0], " ", " ", -1)
a := strings.Split(text, " ")[1]
msgs, err := getMsgs()
if err != nil {
return err
}
msg, ok := msgs.getMsgByAction(a)
if !ok {
return nil
}
obj := scan("message", text)
if err := commitMsg(msg, obj); err != nil {
return err
}
return nil
}
func commitMsg(msg Msg, object string) error {
text := msg.Emoji + msg.Action + " " + object
_, err := pipeline.Output(
[]string{"echo", text},
[]string{"git", "commit", "--file=-"},
)
if err != nil {
return err
}
return nil
}
|
// Copyright 2020 Bjerk AS
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tripletex
import (
"encoding/json"
"fmt"
"time"
apiclient "github.com/bjerkio/tripletex-go/client"
"github.com/bjerkio/tripletex-go/client/session"
"github.com/bjerkio/trippl/internal/pkg/db"
)
type tokenDataStruct struct {
Token string `json:"token"`
ExpirationDate time.Time `json:"expirationDate"`
}
func createTokenRequest(consumerToken string, employeeToken string) (*tokenDataStruct, error) {
fmt.Println("Getting a new token")
year, month, day := time.Now().Date()
expirationDate := time.Date(year, month, day+1, 0, 0, 0, 0, time.Now().Location())
client := apiclient.Default
sessionReq := &session.TokenSessionCreateCreateParams{
ConsumerToken: consumerToken,
EmployeeToken: employeeToken,
ExpirationDate: expirationDate.Format("2006-01-02"),
}
res, err := client.Session.TokenSessionCreateCreate(sessionReq.WithTimeout(10 * time.Second))
if err != nil {
return nil, err
}
return &tokenDataStruct{
Token: res.Payload.Value.Token,
ExpirationDate: expirationDate,
}, nil
}
func getTokenData(db db.KeyValueStore) (*tokenDataStruct, error) {
tokenRes, err := db.Get([]byte("tripletex-token"))
if tokenRes == nil {
return nil, nil
}
var tokenData tokenDataStruct
err = json.Unmarshal(tokenRes, &tokenData)
return &tokenData, err
}
func storeTokenData(db db.KeyValueStore, tokenData *tokenDataStruct) error {
jsonData, err := json.Marshal(&tokenData)
if err != nil {
return err
}
return db.Set([]byte("tripletex-token"), jsonData)
}
// CreateToken retrieves a new tripletex token
func CreateToken(consumerToken string, employeeToken string, db db.KeyValueStore) (*string, error) {
tokenData, err := getTokenData(db)
if err != nil {
return nil, err
}
if tokenData == nil {
tokenData, err = createTokenRequest(consumerToken, employeeToken)
if err != nil {
return nil, err
}
err = storeTokenData(db, tokenData)
}
if tokenData.ExpirationDate.Before(time.Now()) {
tokenData, err = createTokenRequest(consumerToken, employeeToken)
if err != nil {
return nil, err
}
err = storeTokenData(db, tokenData)
}
return &tokenData.Token, err
}
|
package main
type Client interface {
Conn()
}
type DBClient struct {
timeout int
retryTimes int
}
func (c DBClient) Conn() {
// Do SOMETHING
}
type ClientConnOption func(*ClientConnOptions)
type ClientConnOptions struct {
retryTimes int
timeout int
}
func WithRetryTimes(retryTimes int) ClientConnOption {
return func(options *ClientConnOptions) {
options.retryTimes = retryTimes
}
}
// 闭包
func WithTimeout(timeout int) ClientConnOption {
return func(options *ClientConnOptions) {
options.timeout = timeout
}
}
func NewClient(opts ...ClientConnOption) Client {
var defaultClientOptions = ClientConnOptions{
retryTimes: 3,
timeout: 5,
}
options := defaultClientOptions
for _, optFunc := range opts {
optFunc(&options)
}
return &DBClient{
timeout: options.timeout,
retryTimes: options.retryTimes,
}
}
|
/*
* This file is part of impacca. Copyright (C) 2013 and above Shogun <shogun@cowtech.it>.
* Licensed under the MIT license, which can be found at https://choosealicense.com/licenses/mit.
*/
package utils
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/Masterminds/semver"
"github.com/ShogunPanda/impacca/configuration"
)
var commitChecker = regexp.MustCompile("^[a-f0-9]+$")
var updateChangelogCommitFilter = regexp.MustCompile("(?i)^(?:(update(?:[ds])? changelog(?:\\.md)?(?:.)?))$")
var versionTagCommitFilter = regexp.MustCompile("(?i)^(?:version\\s+\\d+\\.\\d+\\.\\d+(?:.)?)$")
// Change represents a git commit
type Change struct {
Hash string
Message string
Type string
}
func filterCommit(change Change) bool {
_, err := semver.NewVersion(change.Message)
return updateChangelogCommitFilter.MatchString(change.Message) || versionTagCommitFilter.MatchString(change.Message) || err == nil
}
// GetFirstCommitHash gets the first commit hash
func GetFirstCommitHash() string {
result := Execute(false, "git", "log", "--reverse", "--format=%H")
result.Verify("git", "Cannot get first GIT commit")
return strings.Split(strings.TrimSpace(result.Stdout), "\n")[0]
}
// ListChanges lists changes since the last version or between specific version.
func ListChanges(version, previousVersion string) []Change {
// Get the current version
if version == "" {
versions := GetVersions()
version = versions[len(versions)-1].String()
}
if previousVersion == "" {
previousVersion = "HEAD"
}
if version != "HEAD" && !commitChecker.MatchString(version) {
version = fmt.Sprintf("v%s", version)
}
if previousVersion != "HEAD" && !commitChecker.MatchString(previousVersion) {
previousVersion = fmt.Sprintf("v%s", previousVersion)
}
// Get the list of changes
executionArgs := []string{"log", "--format=%h %s"}
if version != "0.0.0" {
executionArgs = append(executionArgs, fmt.Sprintf("%s...%s", previousVersion, version))
}
result := Execute(false, "git", executionArgs...)
result.Verify("git", "Cannot list GIT changes")
rawChanges := strings.Split(strings.TrimSpace(result.Stdout), "\n")
changes := make([]Change, 0)
for _, change := range rawChanges {
if change == "" {
continue
}
changeTokens := strings.SplitN(change, " ", 2)
messageComponents := []string{"feat", changeTokens[1]}
if strings.Index(messageComponents[1], ":") != -1 {
messageComponents = strings.SplitN(changeTokens[1], ":", 2)
} else if strings.Index(messageComponents[1], "fix") != -1 {
messageComponents[0] = "fix"
}
changes = append(
changes,
Change{
strings.ToLower(strings.TrimSpace(changeTokens[0])),
strings.TrimSpace(messageComponents[1]),
strings.ToLower(strings.TrimSpace(messageComponents[0])),
},
)
}
return changes
}
// FormatChanges formats changes to the CHANGELOG.md file format.
func FormatChanges(previous string, version *semver.Version, changes []Change, date time.Time) string {
// Create the new entry
var builder strings.Builder
if !date.IsZero() {
builder.WriteString(fmt.Sprintf("### %s / %s\n\n", date.Format("2006-01-02"), version.String()))
}
for _, change := range changes {
// Filter some commits
if filterCommit(change) {
continue
}
builder.WriteString(fmt.Sprintf("- %s: %s\n", change.Type, change.Message))
}
// Append the existing Changelog
builder.WriteString("\n")
builder.Write([]byte(previous))
return builder.String()
}
// FormatReleaseChanges formats changes for a GitHub release.
func FormatReleaseChanges(repository string, changes []Change) string {
// Create the new entry
var builder strings.Builder
for _, change := range changes {
// Filter some commits
if filterCommit(change) {
continue
}
builder.WriteString(
fmt.Sprintf(
"- %s: %s ([%s](https://github.com/%s/commit/%s))\n",
change.Type, change.Message, change.Hash, repository, change.Hash,
),
)
}
return builder.String()
}
// SaveChanges persist changes from GIT to the CHANGELOG.md file.
func SaveChanges(newVersion, currentVersion *semver.Version, changes []Change, dryRun bool) {
cwd, _ := os.Getwd()
changelog := ""
var err error
if _, err := os.Stat(filepath.Join(cwd, "CHANGELOG.md")); !os.IsNotExist(err) {
rawChangelog, err := ioutil.ReadFile(filepath.Join(cwd, "CHANGELOG.md"))
if err != nil {
Fatal("Cannot read file {errorPrimary}CHANGELOG.md{-}: {errorPrimary}%s{-}", err.Error())
}
changelog = string(rawChangelog)
}
if len(changes) == 0 {
changes = ListChanges(currentVersion.String(), "")
}
if NotifyExecution(dryRun, "Will append", "Appending", " {primary}%d{-} entries to the CHANGELOG.md file ...", len(changes)) {
newChangelog := FormatChanges(changelog, newVersion, changes, time.Now())
// Save the new file
err = ioutil.WriteFile(filepath.Join(cwd, "CHANGELOG.md"), []byte(newChangelog), 0644)
if err != nil {
Fatal("Cannot update file {errorPrimary}CHANGELOG.md{-}: {errorPrimary}%s{-}", err.Error())
}
}
// Commit changes
message := strings.TrimSpace(configuration.Current.CommitMessages.Changelog)
if NotifyExecution(dryRun, "Will execute", "Executing", ": {primary}git commit --all --message \"%s\"{-} ...", message) {
result := Execute(true, "git", "add", "CHANGELOG.md")
result.Verify("git", "Cannot add CHANGELOG.md update to git stage area")
result = Execute(true, "git", "commit", "--all", fmt.Sprintf("--message=%s", message))
result.Verify("git", "Cannot commit CHANGELOG.md update")
}
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//146. LRU Cache
//Design and implement a data structure for Least Recently Used (LRU) cache. It should support the following operations: get and put.
//get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.
//put(key, value) - Set or insert the value if the key is not already present. When the cache reached its capacity, it should invalidate the least recently used item before inserting a new item.
//Follow up:
//Could you do both operations in O(1) time complexity?
//Example:
//LRUCache cache = new LRUCache( 2 /* capacity */ );
//cache.put(1, 1);
//cache.put(2, 2);
//cache.get(1); // returns 1
//cache.put(3, 3); // evicts key 2
//cache.get(2); // returns -1 (not found)
//cache.put(4, 4); // evicts key 1
//cache.get(1); // returns -1 (not found)
//cache.get(3); // returns 3
//cache.get(4); // returns 4
//type LRUCache struct {
//}
//func Constructor(capacity int) LRUCache {
//}
//func (this *LRUCache) Get(key int) int {
//}
//func (this *LRUCache) Put(key int, value int) {
//}
///**
// * Your LRUCache object will be instantiated and called as such:
// * obj := Constructor(capacity);
// * param_1 := obj.Get(key);
// * obj.Put(key,value);
// */
// Time Is Money |
package requests
import (
"encoding/json"
"testing"
"github.com/mitchellh/mapstructure"
"github.com/stretchr/testify/assert"
)
func TestDecodePasswordChangeRequest(t *testing.T) {
encoded := `{"action":"password_change","password":"1234","wallet":"1234"}`
var decoded PasswordChangeRequest
json.Unmarshal([]byte(encoded), &decoded)
assert.Equal(t, "password_change", decoded.Action)
assert.Equal(t, "1234", decoded.Password)
assert.Equal(t, "1234", decoded.Wallet)
encoded = `{"action":"password_change","password":"","wallet":"1234"}`
json.Unmarshal([]byte(encoded), &decoded)
assert.Equal(t, "password_change", decoded.Action)
assert.Equal(t, "", decoded.Password)
assert.Equal(t, "1234", decoded.Wallet)
}
func TestMapStructureDecodePasswordChangeRequest(t *testing.T) {
request := map[string]interface{}{
"action": "password_change",
"password": "1234",
"wallet": "1234",
}
var decoded PasswordChangeRequest
mapstructure.Decode(request, &decoded)
assert.Equal(t, "password_change", decoded.Action)
assert.Equal(t, "1234", decoded.Password)
assert.Equal(t, "1234", decoded.Wallet)
}
|
package medias
import (
"encoding/json"
"errors"
"net/http"
"os"
"path/filepath"
"strconv"
"github.com/gorilla/mux"
log "github.com/sirupsen/logrus"
"github.com/Zenika/marcel/api/auth"
"github.com/Zenika/marcel/api/clients"
"github.com/Zenika/marcel/api/commons"
"github.com/Zenika/marcel/api/db/medias"
"github.com/Zenika/marcel/config"
)
type Service struct {
clientsService *clients.Service
}
func NewService(clientsService *clients.Service) *Service {
service := new(Service)
service.clientsService = clientsService
return service
}
// GetAllHandler gets information of all medias
func (m *Service) GetAllHandler(w http.ResponseWriter, r *http.Request) {
if !auth.CheckPermissions(r, nil, "user", "admin") {
commons.WriteResponse(w, http.StatusForbidden, "")
return
}
medias, err := medias.List()
if err != nil {
commons.WriteResponse(w, http.StatusInternalServerError, err.Error())
return
}
commons.WriteJsonResponse(w, medias)
}
// GetHandler gets information of a media
func (m *Service) GetHandler(w http.ResponseWriter, r *http.Request) {
if !auth.CheckPermissions(r, nil, "user", "admin") {
commons.WriteResponse(w, http.StatusForbidden, "")
return
}
if media := m.getMediaFromRequest(w, r); media != nil {
commons.WriteJsonResponse(w, media)
}
}
// SaveHandler saves a media
func (m *Service) SaveHandler(w http.ResponseWriter, r *http.Request) {
media := &medias.Media{}
if err := json.NewDecoder(r.Body).Decode(media); err != nil {
commons.WriteResponse(w, http.StatusBadRequest, err.Error())
return
}
tmpMedia, err := medias.Get(media.ID)
if err != nil {
commons.WriteResponse(w, http.StatusInternalServerError, err.Error())
return
}
if tmpMedia == nil {
commons.WriteResponse(w, http.StatusNotFound, "")
return
}
if !auth.CheckPermissions(r, []string{tmpMedia.Owner}, "admin") {
commons.WriteResponse(w, http.StatusForbidden, "")
return
}
if err := activate(media); err != nil {
commons.WriteResponse(w, http.StatusInternalServerError, err.Error())
return
}
media.IsActive = true
if err := medias.Update(media); err != nil {
commons.WriteResponse(w, http.StatusInternalServerError, err.Error())
return
}
commons.WriteJsonResponse(w, media)
m.clientsService.SendByMedia(media.ID, "update")
}
// CreateHandler creates a new empty media
func (m *Service) CreateHandler(w http.ResponseWriter, r *http.Request) {
if !auth.CheckPermissions(r, nil) {
commons.WriteResponse(w, http.StatusForbidden, "")
return
}
media := medias.New(auth.GetAuth(r).Subject)
if err := medias.Insert(media); err != nil {
commons.WriteResponse(w, http.StatusInternalServerError, err.Error())
return
}
commons.WriteJsonResponse(w, media)
}
// ActivateHandler activates a media
func (m *Service) ActivateHandler(w http.ResponseWriter, r *http.Request) {
media := m.getMediaFromRequest(w, r)
if media == nil {
return
}
if !auth.CheckPermissions(r, []string{media.Owner}, "admin") {
commons.WriteResponse(w, http.StatusForbidden, "")
return
}
if err := activate(media); err != nil {
commons.WriteResponse(w, http.StatusInternalServerError, err.Error())
return
}
media.IsActive = true
if err := medias.Update(media); err != nil {
commons.WriteResponse(w, http.StatusInternalServerError, err.Error())
return
}
commons.WriteResponse(w, http.StatusOK, "Media is active")
m.clientsService.SendByMedia(media.ID, "update")
}
// DeactivateHandler deactivates a media
func (m *Service) DeactivateHandler(w http.ResponseWriter, r *http.Request) {
media := m.getMediaFromRequest(w, r)
if media == nil {
return
}
if !auth.CheckPermissions(r, []string{media.Owner}, "admin") {
commons.WriteResponse(w, http.StatusForbidden, "")
return
}
media.IsActive = false
if err := medias.Update(media); err != nil {
commons.WriteResponse(w, http.StatusInternalServerError, err.Error())
return
}
commons.WriteResponse(w, http.StatusOK, "Media has been deactivated")
m.clientsService.SendByMedia(media.ID, "update")
}
// DeleteHandler deletes a media
func (m *Service) DeleteHandler(w http.ResponseWriter, r *http.Request) {
media := m.getMediaFromRequest(w, r)
if media == nil {
return
}
if !auth.CheckPermissions(r, []string{media.Owner}, "admin") {
commons.WriteResponse(w, http.StatusForbidden, "")
return
}
if err := medias.Delete(media.ID); err != nil {
commons.WriteResponse(w, http.StatusInternalServerError, err.Error())
return
}
if err := os.RemoveAll(filepath.Join(config.Default().API().MediasDir(), strconv.Itoa(media.ID))); err != nil {
commons.WriteResponse(w, http.StatusInternalServerError, err.Error())
return
}
commons.WriteResponse(w, http.StatusOK, "Media has been correctly deleted")
}
// GetPluginFilesHandler Serves static frontend files of the given plugin instance for the given media.
func (m *Service) GetPluginFilesHandler(w http.ResponseWriter, r *http.Request) {
if !auth.CheckPermissions(r, nil) {
commons.WriteResponse(w, http.StatusForbidden, "")
return
}
vars := mux.Vars(r)
eltName := vars["eltName"]
instanceID := vars["instanceId"]
filePath := vars["filePath"]
if filePath == "" {
filePath = "index.html"
}
if media := m.getMediaFromRequest(w, r); media != nil {
pluginDirectoryPath := getPluginDirectory(media, eltName, instanceID)
pluginFilePath := filepath.Join(pluginDirectoryPath, "frontend", filePath)
http.ServeFile(w, r, pluginFilePath)
}
}
func (m *Service) getMediaFromRequest(w http.ResponseWriter, r *http.Request) (media *medias.Media) {
vars := mux.Vars(r)
attr := vars["idMedia"]
idMedia, err := strconv.Atoi(attr)
if err != nil {
commons.WriteResponse(w, http.StatusBadRequest, err.Error())
return nil
}
media, err = medias.Get(idMedia)
if err != nil {
commons.WriteResponse(w, http.StatusInternalServerError, err.Error())
return nil
}
if media == nil {
commons.WriteResponse(w, http.StatusNotFound, "")
return nil
}
return media
}
func activate(media *medias.Media) error {
errorMessages := ""
for _, mp := range media.Plugins {
// duplicate plugin files into "medias/{idMedia}/{plugins_EltName}/{idInstance}"
mpPath := getPluginDirectory(media, mp.EltName, mp.InstanceID)
if err := copyNewInstanceOfPlugin(media, &mp, mpPath); err != nil {
log.Errorln(err.Error())
//Don't return an error now, we need to activate the other plugins
errorMessages += err.Error() + "\n"
}
}
if errorMessages != "" {
return errors.New(errorMessages)
}
return nil
}
func copyNewInstanceOfPlugin(media *medias.Media, mp *medias.MediaPlugin, path string) error {
return commons.CopyDir(filepath.Join(config.Default().API().PluginsDir(), mp.EltName, "frontend"), filepath.Join(path, "frontend"))
}
func getPluginDirectory(media *medias.Media, eltName string, instanceID string) string {
return filepath.Join(config.Default().API().MediasDir(), strconv.Itoa(media.ID), eltName, instanceID)
}
|
/////////////////////////////////////////////////////////////////////
// arataca89@gmail.com
// 20210417
//
// func Split(s, sep string) []string
//
// Retorna um slice dos tokens de s separados por sep.
// Se s não contém sep e sep não é vazio, retorna um slice de tamanho
// 1 cujo único elemento é s.
// Se sep é "" retorna um slice separando cada caracter de s.
// Se s e sep são iguais a "" retorna uma slice vazia.
// É equivalente a SplitN com n < 0
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.Split("Dividindo em tokens", " "))
fmt.Println(strings.Split("a,e,i,o,u", ","))
fmt.Println(strings.Split("I;II;III;IV;V", ";"))
fmt.Println(strings.Split("Dividindo em tokens", "#"))
fmt.Println(strings.Split("a,e,i,o,u", "#"))
fmt.Println(strings.Split("Dividindo em tokens", ""))
fmt.Println(strings.Split("", ""))
}
// Saída:
// [Dividindo em tokens]
// [a e i o u]
// [I II III IV V]
// [Dividindo em tokens]
// [a,e,i,o,u]
// [D i v i d i n d o e m t o k e n s]
// []
|
package plik
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"mime/multipart"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"github.com/root-gg/utils"
"github.com/root-gg/plik/server/common"
)
// Create creates a new empty upload on the Plik Server and return the upload metadata
func (c *Client) create(uploadParams *common.Upload) (uploadMetadata *common.Upload, err error) {
if uploadParams == nil {
return nil, errors.New("missing upload params")
}
var j []byte
j, err = json.Marshal(uploadParams)
if err != nil {
return nil, err
}
req, err := c.UploadRequest(uploadParams, "POST", c.URL+"/upload", bytes.NewBuffer(j))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.MakeRequest(req)
if err != nil {
return nil, err
}
defer func() { _ = resp.Body.Close() }()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
// Parse json response
uploadMetadata = &common.Upload{}
err = json.Unmarshal(body, uploadMetadata)
if err != nil {
return nil, err
}
if c.Debug {
fmt.Printf("upload created : %s\n", utils.Sdump(uploadMetadata))
}
return uploadMetadata, nil
}
// UploadFile uploads a data stream to the Plik Server and return the file metadata
func (c *Client) uploadFile(upload *common.Upload, fileParams *common.File, reader io.Reader) (fileInfo *common.File, err error) {
pipeReader, pipeWriter := io.Pipe()
multipartWriter := multipart.NewWriter(pipeWriter)
if upload == nil || fileParams == nil || reader == nil {
return nil, errors.New("missing file upload parameter")
}
errCh := make(chan error)
go func(errCh chan error) {
writer, err := multipartWriter.CreateFormFile("file", fileParams.Name)
if err != nil {
err = fmt.Errorf("unable to create multipartWriter : %s", err)
_ = pipeWriter.CloseWithError(err)
errCh <- err
return
}
_, err = io.Copy(writer, reader)
if err != nil {
_ = pipeWriter.CloseWithError(err)
errCh <- err
return
}
err = multipartWriter.Close()
if err != nil {
err = fmt.Errorf("unable to close multipartWriter : %s", err)
errCh <- err
return
}
_ = pipeWriter.CloseWithError(err)
errCh <- err
}(errCh)
mode := "file"
if upload.Stream {
mode = "stream"
}
var URL *url.URL
if fileParams.ID != "" {
URL, err = url.Parse(c.URL + "/" + mode + "/" + upload.ID + "/" + fileParams.ID + "/" + fileParams.Name)
} else {
// Old method without file id that can also be used to add files to an existing upload
if upload.Stream {
return nil, fmt.Errorf("files must be added to upload before creation for stream mode to work")
}
URL, err = url.Parse(c.URL + "/" + mode + "/" + upload.ID)
}
if err != nil {
return nil, err
}
req, err := c.UploadRequest(upload, "POST", URL.String(), pipeReader)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", multipartWriter.FormDataContentType())
resp, err := c.MakeRequest(req)
if err != nil {
return nil, err
}
err = <-errCh
if err != nil {
return nil, err
}
defer func() { _ = resp.Body.Close() }()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
// Parse json response
fileInfo = &common.File{}
err = json.Unmarshal(body, fileInfo)
if err != nil {
return nil, err
}
if c.Debug {
fmt.Printf("File uploaded : %s\n", utils.Sdump(fileInfo))
}
return fileInfo, nil
}
// UploadRequest creates a new HTTP request with the header generated from the given upload params
func (c *Client) UploadRequest(upload *common.Upload, method, URL string, body io.Reader) (req *http.Request, err error) {
req, err = http.NewRequest(method, URL, body)
if err != nil {
return nil, err
}
if upload.Token != "" {
req.Header.Set("X-PlikToken", upload.Token)
}
if upload.UploadToken != "" {
req.Header.Set("X-UploadToken", upload.UploadToken)
}
if upload.Login != "" && upload.Password != "" {
// The Authorization header will contain the base64 version of "login:password"
header := common.EncodeAuthBasicHeader(upload.Login, upload.Password)
req.Header.Set("Authorization", "Basic "+header)
}
return req, nil
}
// getUploadWithParams return the remote upload info for the given upload params
func (c *Client) getUploadWithParams(uploadParams *common.Upload) (upload *Upload, err error) {
URL := c.URL + "/upload/" + uploadParams.ID
req, err := c.UploadRequest(uploadParams, "GET", URL, nil)
if err != nil {
return nil, err
}
resp, err := c.MakeRequest(req)
if err != nil {
return nil, err
}
defer func() { _ = resp.Body.Close() }()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
// Parse json response
params := &common.Upload{}
err = json.Unmarshal(body, params)
if err != nil {
return nil, err
}
upload = newUploadFromMetadata(c, params)
return upload, nil
}
// downloadFile download the remote file from the server
func (c *Client) downloadFile(uploadParams *common.Upload, fileParams *common.File) (reader io.ReadCloser, err error) {
URL := c.URL + "/file/" + uploadParams.ID + "/" + fileParams.ID + "/" + fileParams.Name
req, err := c.UploadRequest(uploadParams, "GET", URL, nil)
if err != nil {
return nil, err
}
resp, err := c.MakeRequest(req)
if err != nil {
return nil, err
}
return resp.Body, nil
}
// downloadArchive download the remote upload files as a zip archive from the server
func (c *Client) downloadArchive(uploadParams *common.Upload) (reader io.ReadCloser, err error) {
URL := c.URL + "/archive/" + uploadParams.ID + "/archive.zip"
req, err := c.UploadRequest(uploadParams, "GET", URL, nil)
if err != nil {
return nil, err
}
resp, err := c.MakeRequest(req)
if err != nil {
return nil, err
}
return resp.Body, nil
}
// removeFile remove the remote file from the server
func (c *Client) removeFile(uploadParams *common.Upload, fileParams *common.File) (err error) {
URL := c.URL + "/file/" + uploadParams.ID + "/" + fileParams.ID + "/" + fileParams.Name
req, err := c.UploadRequest(uploadParams, "DELETE", URL, nil)
if err != nil {
return err
}
resp, err := c.MakeRequest(req)
if err != nil {
return err
}
_ = resp.Body.Close()
return nil
}
// removeUpload remove the remote upload and all the associated files from the server
func (c *Client) removeUpload(uploadParams *common.Upload) (err error) {
URL := c.URL + "/upload/" + uploadParams.ID
req, err := c.UploadRequest(uploadParams, "DELETE", URL, nil)
if err != nil {
return err
}
resp, err := c.MakeRequest(req)
if err != nil {
return err
}
_ = resp.Body.Close()
return nil
}
// MakeRequest perform an HTTP request to a Plik Server HTTP API.
// - Manage request header X-ClientApp and X-ClientVersion
// - Log the request and response if the client is in Debug mode
// - Parsing response error to Go error
func (c *Client) MakeRequest(req *http.Request) (resp *http.Response, err error) {
// Set client version headers
if c.ClientName != "" {
req.Header.Set("X-ClientApp", c.ClientName)
}
if c.ClientVersion != "" {
req.Header.Set("X-ClientVersion", c.ClientVersion)
}
if c.ClientUserAgent != "" {
req.Header.Set("User-Agent", c.ClientUserAgent)
}
// Log request
if c.Debug {
dumpBody := true
if req.Method == "POST" && (strings.Contains(req.URL.String(), "/file") || strings.Contains(req.URL.String(), "/stream")) {
dumpBody = false
}
dump, err := httputil.DumpRequest(req, dumpBody)
if err == nil {
fmt.Println(string(dump))
} else {
return nil, fmt.Errorf("unable to dump HTTP request : %s", err)
}
}
// Make request
resp, err = c.HTTPClient.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, parseErrorResponse(resp)
}
// Log response
if c.Debug {
dumpBody := true
if req.Method == "GET" && (strings.Contains(req.URL.String(), "/file") || strings.Contains(req.URL.String(), "/archive")) {
dumpBody = false
}
dump, err := httputil.DumpResponse(resp, dumpBody)
if err == nil {
fmt.Println(string(dump))
} else {
return nil, fmt.Errorf("unable to dump HTTP response : %s", err)
}
}
return resp, nil
}
func parseErrorResponse(resp *http.Response) (err error) {
defer func() { _ = resp.Body.Close() }()
// Reade response body
var body []byte
body, err = io.ReadAll(resp.Body)
if err != nil {
return err
}
if len(body) > 0 {
return fmt.Errorf("%s : %s", resp.Status, string(body))
}
return fmt.Errorf("%s", resp.Status)
}
|
package server
import (
"net/http"
"strconv"
"github.com/empirefox/esecend/cerr"
"github.com/empirefox/esecend/front"
"github.com/gin-gonic/gin"
)
func (s *Server) PostWishlistAdd(c *gin.Context) {
var payload front.WishlistSavePayload
if err := c.BindJSON(&payload); Abort(c, err) {
return
}
data, err := s.DB.WishlistSave(s.TokenUser(c).ID, &payload)
if Abort(c, err) {
return
}
c.JSON(http.StatusOK, data)
}
// product ids
func (s *Server) DeleteWishlistItems(c *gin.Context) {
rawids := c.Request.URL.Query()["s"]
var ids []uint
if !(len(rawids) == 1 && rawids[0] == "all") {
for _, rawid := range rawids {
id, _ := strconv.ParseUint(rawid, 10, 64)
if id != 0 {
ids = append(ids, uint(id))
}
}
if len(ids) == 0 {
front.NewCodev(cerr.InvalidUrlParam).Abort(c, http.StatusBadRequest)
return
}
}
err := s.DB.WishlistDel(s.TokenUser(c).ID, ids)
if Abort(c, err) {
return
}
c.AbortWithStatus(http.StatusOK)
}
|
// len.
package main
import (
"fmt"
)
func main() {
ss := []string{"hello world"}
printss := func() {
fmt.Printf("%q\n\tlen:%d\n\n", ss, len(ss))
}
printss()
ss = append(ss, "foo", "bar")
printss()
fmt.Printf("ss[1:1]:%q len:%d, cap:%d\n\n", ss[1:1], len(ss[1:1]), cap(ss[1:1]))
s := "hello world"
fmt.Printf("max:%s\n", string(s[len(s):]))
}
|
package friend
import (
"Open_IM/pkg/common/config"
"Open_IM/pkg/common/db/mysql_model/im_mysql_model"
"Open_IM/pkg/common/log"
pbFriend "Open_IM/pkg/proto/friend"
"Open_IM/pkg/utils"
"context"
)
func (s *friendServer) GetBlacklist(ctx context.Context, req *pbFriend.GetBlacklistReq) (*pbFriend.GetBlacklistResp, error) {
log.Info(req.Token, req.OperationID, "rpc get blacklist is server,args=%s", req.String())
var (
userInfoList []*pbFriend.UserInfo
comment string
)
//Parse token, to find current user information
claims, err := utils.ParseToken(req.Token)
if err != nil {
log.Error(req.Token, req.OperationID, "err=%s,parse token failed", err.Error())
return &pbFriend.GetBlacklistResp{ErrorCode: config.ErrParseToken.ErrCode, ErrorMsg: config.ErrParseToken.ErrMsg}, nil
}
blackListInfo, err := im_mysql_model.GetBlackListByUID(claims.UID)
if err != nil {
log.Error(req.Token, req.OperationID, "err=%s get blacklist failed", err.Error())
return &pbFriend.GetBlacklistResp{ErrorCode: config.ErrGetBlackList.ErrCode, ErrorMsg: config.ErrGetBlackList.ErrMsg}, nil
}
for _, blackUser := range blackListInfo {
var blackUserInfo pbFriend.UserInfo
//Find black user information
us, err := im_mysql_model.FindUserByUID(blackUser.BlockId)
if err != nil {
log.Error(req.Token, req.OperationID, "err=%s search black list userInfo failed", err.Error())
continue
}
friendShip, err := im_mysql_model.FindFriendRelationshipFromFriend(claims.UID, blackUser.BlockId)
if err == nil {
comment = friendShip.Comment
}
blackUserInfo.Uid = us.UID
blackUserInfo.Icon = us.Icon
blackUserInfo.Name = us.Name
blackUserInfo.Gender = us.Gender
blackUserInfo.Mobile = us.Mobile
blackUserInfo.Birth = us.Birth
blackUserInfo.Email = us.Email
blackUserInfo.Ex = us.Ex
blackUserInfo.Comment = comment
userInfoList = append(userInfoList, &blackUserInfo)
}
log.Info(req.Token, req.OperationID, "rpc get blacklist success return")
return &pbFriend.GetBlacklistResp{Data: userInfoList}, nil
}
|
package helper
import (
"bytes"
"fmt"
"io"
"strings"
)
//THIS files funtions has been borrowed from
func escapeString(txt string) string {
var (
esc string
buf bytes.Buffer
)
last := 0
for ii, bb := range txt {
switch bb {
case 0:
esc = `\0`
case '\n':
esc = `\n`
case '\r':
esc = `\r`
case '\\':
esc = `\\`
case '\'':
esc = `\'`
case '"':
esc = `\"`
case '\032':
esc = `\Z`
default:
continue
}
io.WriteString(&buf, txt[last:ii])
io.WriteString(&buf, esc)
last = ii + 1
}
io.WriteString(&buf, txt[last:])
return buf.String()
}
func escapeQuotes(txt string) string {
var buf bytes.Buffer
last := 0
for ii, bb := range txt {
if bb == '\'' {
io.WriteString(&buf, txt[last:ii])
io.WriteString(&buf, `''`)
last = ii + 1
}
}
io.WriteString(&buf, txt[last:])
return buf.String()
}
// Escapes special characters in the txt, so it is safe to place returned string
// to Query method.
func MySqlEscape(txt string) string {
//ME: by me-- not using commented featuers
//if c.Status()&SERVER_STATUS_NO_BACKSLASH_ESCAPES != 0 {
// return escapeQuotes(txt)
//}
return escapeString(txt)
}
func SqlManyDollars(colSize, repeat int, isMysql bool) string {
//isMysql = true
if isMysql {
s := strings.Repeat("?,", colSize)
s = "(" + s[0:len(s)-1] + "),"
insVals_ := strings.Repeat(s, repeat)
return insVals_[0 : len(insVals_)-1]
}
buff := bytes.NewBufferString("")
cnt := 1
for i := 0; i < repeat; i++ {
buff.WriteString("(")
for j := 0; j < colSize; j++ {
buff.WriteString(fmt.Sprintf("$%d", cnt))
if j+1 != colSize {
buff.WriteString(",")
}
cnt++
}
buff.WriteString(")")
if i+1 != repeat {
buff.WriteString(",")
}
}
return buff.String()
}
|
// Copyright 2019 The go-interpreter Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package compile
import (
ops "github.com/go-interpreter/wagon/wasm/operators"
)
type scanner struct {
supportedOpcodes map[byte]bool
}
// InstructionMetadata describes a bytecode instruction.
type InstructionMetadata struct {
Op byte
// Start represents the byte offset of this instruction
// in the function's instruction stream.
Start int
// Size is the number of bytes in the instruction stream
// needed to represent this instruction.
Size int
}
// CompilationCandidate describes a range of bytecode that can
// be translated to native code.
type CompilationCandidate struct {
Start uint // Bytecode index of the first opcode.
End uint // Bytecode index of the last byte in the instruction.
StartInstruction int // InstructionMeta index of the first instruction.
EndInstruction int // InstructionMeta index of the last instruction.
Metrics Metrics // Metrics about the instructions between first & last index.
}
func (s *CompilationCandidate) reset() {
s.Start = 0
s.End = 0
s.StartInstruction = 0
s.EndInstruction = 1
s.Metrics = Metrics{}
}
// Bounds returns the beginning & end index in the bytecode which
// this candidate would replace. The end index is not inclusive.
func (s *CompilationCandidate) Bounds() (uint, uint) {
return s.Start, s.End
}
// Metrics describes the heuristics of an instruction sequence.
type Metrics struct {
MemoryReads, MemoryWrites uint
StackReads, StackWrites uint
AllOps int
IntegerOps int
FloatOps int
}
// ScanFunc scans the given function information, emitting selections of
// bytecode which could be compiled into function code.
func (s *scanner) ScanFunc(bytecode []byte, meta *BytecodeMetadata) ([]CompilationCandidate, error) {
var finishedCandidates []CompilationCandidate
inProgress := CompilationCandidate{}
for i, inst := range meta.Instructions {
// Except for the first instruction, we cant emit a native section
// where other parts of code try and call into us halfway. Maybe we
// can support that in the future.
_, hasInboundTarget := meta.InboundTargets[int64(inst.Start)]
isInsideBranchTarget := hasInboundTarget && inst.Start > 0 && inProgress.Metrics.AllOps > 0
if !s.supportedOpcodes[inst.Op] || isInsideBranchTarget {
// See if the candidate can be emitted.
if inProgress.Metrics.AllOps > 2 {
finishedCandidates = append(finishedCandidates, inProgress)
}
inProgress.reset()
continue
}
// Still a supported run.
if inProgress.Metrics.AllOps == 0 {
// First instruction of the candidate - setup structure.
inProgress.Start = uint(inst.Start)
inProgress.StartInstruction = i
}
inProgress.EndInstruction = i + 1
inProgress.End = uint(inst.Start) + uint(inst.Size)
// TODO: Add to this table as backends support more opcodes.
switch inst.Op {
case ops.I64Load, ops.I32Load, ops.F64Load, ops.F32Load:
fakeBE := &AMD64Backend{}
memSize, _ := fakeBE.paramsForMemoryOp(inst.Op)
inProgress.Metrics.MemoryReads += memSize
inProgress.Metrics.StackWrites++
case ops.I64Store, ops.I32Store, ops.F64Store, ops.F32Store:
fakeBE := &AMD64Backend{}
memSize, _ := fakeBE.paramsForMemoryOp(inst.Op)
inProgress.Metrics.MemoryWrites += memSize
inProgress.Metrics.StackReads += 2
case ops.I64Const, ops.I32Const, ops.GetLocal, ops.GetGlobal:
inProgress.Metrics.IntegerOps++
inProgress.Metrics.StackWrites++
case ops.F64Const, ops.F32Const:
inProgress.Metrics.FloatOps++
inProgress.Metrics.StackWrites++
case ops.SetLocal, ops.SetGlobal:
inProgress.Metrics.IntegerOps++
inProgress.Metrics.StackReads++
case ops.I64Eqz:
inProgress.Metrics.IntegerOps++
inProgress.Metrics.StackReads++
inProgress.Metrics.StackWrites++
case ops.I64Eq, ops.I64Ne, ops.I64LtU, ops.I64GtU, ops.I64LeU, ops.I64GeU,
ops.I64Shl, ops.I64ShrU, ops.I64ShrS,
ops.I64DivU, ops.I32DivU, ops.I64RemU, ops.I32RemU, ops.I64DivS, ops.I32DivS, ops.I64RemS, ops.I32RemS,
ops.I64Add, ops.I32Add, ops.I64Sub, ops.I32Sub, ops.I64Mul, ops.I32Mul,
ops.I64And, ops.I32And, ops.I64Or, ops.I32Or, ops.I64Xor, ops.I32Xor:
inProgress.Metrics.IntegerOps++
inProgress.Metrics.StackReads += 2
inProgress.Metrics.StackWrites++
case ops.F64Add, ops.F32Add, ops.F64Sub, ops.F32Sub, ops.F64Div, ops.F32Div, ops.F64Mul, ops.F32Mul,
ops.F64Min, ops.F32Min, ops.F64Max, ops.F32Max,
ops.F64Eq, ops.F64Ne, ops.F64Lt, ops.F64Gt, ops.F64Le, ops.F64Ge,
ops.F32Eq, ops.F32Ne, ops.F32Lt, ops.F32Gt, ops.F32Le, ops.F32Ge:
inProgress.Metrics.FloatOps++
inProgress.Metrics.StackReads += 2
inProgress.Metrics.StackWrites++
case ops.F64ConvertUI64, ops.F64ConvertSI64, ops.F32ConvertUI64, ops.F32ConvertSI64,
ops.F64ConvertUI32, ops.F64ConvertSI32, ops.F32ConvertUI32, ops.F32ConvertSI32:
inProgress.Metrics.FloatOps++
inProgress.Metrics.StackReads++
inProgress.Metrics.StackWrites++
case ops.Drop:
inProgress.Metrics.StackReads++
case ops.Select:
inProgress.Metrics.StackReads += 3
inProgress.Metrics.StackWrites++
case ops.F64ReinterpretI64, ops.F32ReinterpretI32, ops.I64ReinterpretF64, ops.I32ReinterpretF32:
inProgress.Metrics.FloatOps++
inProgress.Metrics.IntegerOps++
}
inProgress.Metrics.AllOps++
}
// End of instructions - emit the inProgress candidate if
// its at least 3 instructions.
if inProgress.Metrics.AllOps > 2 {
finishedCandidates = append(finishedCandidates, inProgress)
}
return finishedCandidates, nil
}
|
package models
import (
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/mysql"
"github.com/stretchr/testify/assert"
"strconv"
"strings"
"testing"
)
func TestHarvesterStatus_TableName(t *testing.T) {
dsn := "root:WcGsHDMBmcv7mc#QWkuR@tcp(127.0.0.1:3306)/tezos_index?charset=utf8mb4&parseTime=True&loc=Local"
db, err := gorm.Open("mysql", dsn)
defer db.Close()
assert.NoError(t, err)
key := "AAAaaa"
val := "13eed"
err = UpdateHarvesterStatus(db, key, val)
assert.NoError(t, err)
}
func TestUpdateHarvesterStatus(t *testing.T) {
rr := "redis://127.0.0.1:6379/1"
spl := strings.Split(strings.TrimPrefix(rr, "redis://"), "/")
t.Log(spl[0])
aa, _ := strconv.Atoi(spl[1])
t.Log(aa)
}
|
package views
import (
"fmt"
"os"
"path/filepath"
"text/template"
)
var (
LayoutDir string = "views/layouts/"
TemplateDir string = "views/"
TemplateExt string = ".tmpl"
)
type View struct {
Template *template.Template
Layout string
}
func NewView(layout string, files ...string) *View {
for i, f := range files {
files[i] = TemplateDir + f
}
for i, f := range files {
files[i] = f + TemplateExt
}
layoutFiles, err := filepath.Glob(LayoutDir + "*" + TemplateExt)
if err != nil {
panic(err)
}
files = append(files, layoutFiles...)
t, err := template.ParseFiles(files...)
if err != nil {
panic(err)
}
return &View{
Template: t,
Layout: layout,
}
}
// Render builds a template using data
func (v *View) Render(data interface{}) {
err := v.Template.ExecuteTemplate(os.Stdout, v.Layout, data)
if err != nil {
fmt.Println(err)
}
}
|
package mock
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/pomerium/pomerium/internal/sessions"
)
func TestStore(t *testing.T) {
tests := []struct {
name string
store *Store
wantLoad string
saveSession *sessions.State
wantLoadErr bool
wantSaveErr bool
}{
{
"basic",
&Store{
ResponseSession: "test",
Session: &sessions.State{Subject: "0101"},
SaveError: nil,
LoadError: nil,
},
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIwMTAxIn0.Yfxj4xDTI0PHX7Mdi1wkY6S6Mn0dbROWNhS6xEe8LTc",
&sessions.State{Subject: "0101"},
false,
false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ms := tt.store
err := ms.SaveSession(nil, nil, tt.saveSession)
if (err != nil) != tt.wantSaveErr {
t.Errorf("mockstore.SaveSession() error = %v, wantSaveErr %v", err, tt.wantSaveErr)
return
}
got, err := ms.LoadSession(nil)
if (err != nil) != tt.wantLoadErr {
t.Errorf("mockstore.LoadSession() error = %v, wantLoadErr %v", err, tt.wantLoadErr)
return
}
assert.Equal(t, tt.wantLoad, got)
ms.ClearSession(nil, nil)
if ms.ResponseSession != "" {
t.Errorf("ResponseSession not empty! %s", ms.ResponseSession)
}
})
}
}
|
package requests
import "time"
var _ = time.Time{}
type CreateProject struct {
Status string
}
type UpdateProject struct {
Status string
}
func (c *CreateProject) Valid() error {
return validate.Struct(c)
}
func (c *UpdateProject) Valid() error {
return validate.Struct(c)
}
|
package drivers
// ToOneRelationship describes a relationship between two tables where the local
// table has no id, and the foreign table has an id that matches a column in the
// local table, that column can also be unique which changes the dynamic into a
// one-to-one style, not a to-many.
type ToOneRelationship struct {
Name string `json:"name"`
Table string `json:"table"`
Column string `json:"column"`
Nullable bool `json:"nullable"`
Unique bool `json:"unique"`
ForeignTable string `json:"foreign_table"`
ForeignColumn string `json:"foreign_column"`
ForeignColumnNullable bool `json:"foreign_column_nullable"`
ForeignColumnUnique bool `json:"foreign_column_unique"`
}
// ToManyRelationship describes a relationship between two tables where the
// local table has no id, and the foreign table has an id that matches a column
// in the local table.
type ToManyRelationship struct {
Name string `json:"name"`
Table string `json:"table"`
Column string `json:"column"`
Nullable bool `json:"nullable"`
Unique bool `json:"unique"`
ForeignTable string `json:"foreign_table"`
ForeignColumn string `json:"foreign_column"`
ForeignColumnNullable bool `json:"foreign_column_nullable"`
ForeignColumnUnique bool `json:"foreign_column_unique"`
ToJoinTable bool `json:"to_join_table"`
JoinTable string `json:"join_table"`
JoinLocalFKeyName string `json:"join_local_fkey_name"`
JoinLocalColumn string `json:"join_local_column"`
JoinLocalColumnNullable bool `json:"join_local_column_nullable"`
JoinLocalColumnUnique bool `json:"join_local_column_unique"`
JoinForeignFKeyName string `json:"join_foreign_fkey_name"`
JoinForeignColumn string `json:"join_foreign_column"`
JoinForeignColumnNullable bool `json:"join_foreign_column_nullable"`
JoinForeignColumnUnique bool `json:"join_foreign_column_unique"`
}
// ToOneRelationships relationship lookups
// Input should be the sql name of a table like: videos
func ToOneRelationships(table string, tables []Table) []ToOneRelationship {
localTable := GetTable(tables, table)
return toOneRelationships(localTable, tables)
}
// ToManyRelationships relationship lookups
// Input should be the sql name of a table like: videos
func ToManyRelationships(table string, tables []Table) []ToManyRelationship {
localTable := GetTable(tables, table)
return toManyRelationships(localTable, tables)
}
func toOneRelationships(table Table, tables []Table) []ToOneRelationship {
var relationships []ToOneRelationship
for _, t := range tables {
for _, f := range t.FKeys {
if f.ForeignTable == table.Name && !t.IsJoinTable && f.Unique {
relationships = append(relationships, buildToOneRelationship(table, f, t, tables))
}
}
}
return relationships
}
func toManyRelationships(table Table, tables []Table) []ToManyRelationship {
var relationships []ToManyRelationship
for _, t := range tables {
for _, f := range t.FKeys {
if f.ForeignTable == table.Name && (t.IsJoinTable || !f.Unique) {
relationships = append(relationships, buildToManyRelationship(table, f, t, tables))
}
}
}
return relationships
}
func buildToOneRelationship(localTable Table, foreignKey ForeignKey, foreignTable Table, tables []Table) ToOneRelationship {
return ToOneRelationship{
Name: foreignKey.Name,
Table: localTable.Name,
Column: foreignKey.ForeignColumn,
Nullable: foreignKey.ForeignColumnNullable,
Unique: foreignKey.ForeignColumnUnique,
ForeignTable: foreignTable.Name,
ForeignColumn: foreignKey.Column,
ForeignColumnNullable: foreignKey.Nullable,
ForeignColumnUnique: foreignKey.Unique,
}
}
func buildToManyRelationship(localTable Table, foreignKey ForeignKey, foreignTable Table, tables []Table) ToManyRelationship {
if !foreignTable.IsJoinTable {
return ToManyRelationship{
Name: foreignKey.Name,
Table: localTable.Name,
Column: foreignKey.ForeignColumn,
Nullable: foreignKey.ForeignColumnNullable,
Unique: foreignKey.ForeignColumnUnique,
ForeignTable: foreignTable.Name,
ForeignColumn: foreignKey.Column,
ForeignColumnNullable: foreignKey.Nullable,
ForeignColumnUnique: foreignKey.Unique,
ToJoinTable: false,
}
}
relationship := ToManyRelationship{
Table: localTable.Name,
Column: foreignKey.ForeignColumn,
Nullable: foreignKey.ForeignColumnNullable,
Unique: foreignKey.ForeignColumnUnique,
ToJoinTable: true,
JoinTable: foreignTable.Name,
JoinLocalFKeyName: foreignKey.Name,
JoinLocalColumn: foreignKey.Column,
JoinLocalColumnNullable: foreignKey.Nullable,
JoinLocalColumnUnique: foreignKey.Unique,
}
for _, fk := range foreignTable.FKeys {
if fk.Name == foreignKey.Name {
continue
}
relationship.JoinForeignFKeyName = fk.Name
relationship.JoinForeignColumn = fk.Column
relationship.JoinForeignColumnNullable = fk.Nullable
relationship.JoinForeignColumnUnique = fk.Unique
relationship.ForeignTable = fk.ForeignTable
relationship.ForeignColumn = fk.ForeignColumn
relationship.ForeignColumnNullable = fk.ForeignColumnNullable
relationship.ForeignColumnUnique = fk.ForeignColumnUnique
}
return relationship
}
|
/*
Today your goal is to find integers a and b given non-negative integer n such that:
(3 + sqrt(5))^n = a + b * sqrt(5)
You should write a program or a function that takes parameter n and outputs a and b in a format of your choice.
Standard loopholes apply. Additionally, it's intended that you implement the above problem using basic arithmetic yourself.
So you may not use built-in exact algebra functionality, rationals, or functions implementing non-trivial mathematical constructs (for example the Lucas sequence).
Shortest code in bytes wins.
Example input/output:
0 → 1, 0
1 → 3, 1
2 → 14, 6
3 → 72, 32
4 → 376, 168
5 → 1968, 880
6 → 10304, 4608
7 → 53952, 24128
8 → 282496, 126336
9 → 1479168, 661504
*/
package main
func main() {
tab := [][3]int{
{0, 1, 0},
{1, 3, 1},
{2, 14, 6},
{3, 72, 32},
{4, 376, 168},
{5, 1968, 880},
{6, 10304, 4608},
{7, 53952, 24128},
{8, 282496, 126336},
{9, 1479168, 661504},
}
for _, v := range tab {
assert(A(v[0]) == v[1])
assert(B(v[0]) == v[2])
}
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
/*
https://oeis.org/A098648
https://oeis.org/A084326
https://oeis.org/A000032
*/
func A(n int) int {
if n < 0 {
return 0
}
if n == 0 {
return 1
}
return 1 << (n - 1) * lucas(2*n)
}
func B(n int) int {
if n < 0 {
return 0
}
a, b := 0, 1
for i := 0; i < n; i++ {
a, b = b, 6*b-4*a
}
return a
}
func lucas(n int) int {
if n < 0 {
return 0
}
a, b := 2, 1
for i := 0; i < n; i++ {
a, b = b, a+b
}
return a
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//33. Search in Rotated Sorted Array
//Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
//(i.e., [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]).
//You are given a target value to search. If found in the array return its index, otherwise return -1.
//You may assume no duplicate exists in the array.
//Your algorithm's runtime complexity must be in the order of O(log n).
//Example 1:
//Input: nums = [4,5,6,7,0,1,2], target = 0
//Output: 4
//Example 2:
//Input: nums = [4,5,6,7,0,1,2], target = 3
//Output: -1
//func search(nums []int, target int) int {
//}
// Time Is Money |
package main
import (
"encoding/json"
"flag"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"os"
"regexp"
"strings"
)
var (
proxyFlag = flag.String("p", "", "valid proxy ip address with port (ex: 176.107.17.129:8080")
receiverTargetURLFlag = flag.String("t", "", "Target URL that has the receiver deployed to analyzes the request and send back results")
)
func main() {
flag.Parse()
client := NewProxiedClient(*proxyFlag)
resp := client.pingTarget(*receiverTargetURLFlag)
defer resp.Body.Close()
received := Received{}
errdec := json.NewDecoder(resp.Body).Decode(&received)
if errdec != nil {
log.Fatalf("Error decoding incoming json: %s", errdec)
}
myip := myIP()
ipdetected := received.containsIP(myip)
proxyinfodetected := received.containsProxyInfo()
outcome := NewOutcome(myip, *proxyFlag, ipdetected, proxyinfodetected)
j, _ := json.MarshalIndent(outcome, "", " ")
os.Stdout.Write(j)
}
type Outcome struct {
Score int
Level string
MyIP string
Proxy string
IPdetection map[string]string
Proxydetection map[string]string
}
func NewOutcome(ip net.IP, proxy string, ipdetection map[string]string, proxydetection map[string]string) Outcome {
ipdetected, proxydetected := true, true
score, level := 3, "transparent"
if ipdetection == nil || len(ipdetection) == 0 {
ipdetected = false
}
if proxydetection == nil || len(proxydetection) == 0 {
proxydetected = false
}
if !ipdetected {
if proxydetected {
level, score = "anonymous", 2
} else {
level, score = "elite", 1
}
}
return Outcome{
Proxy: proxy, Level: level,
Score: score,
MyIP: ip.String(), IPdetection: ipdetection,
Proxydetection: proxydetection,
}
}
type ProxiedClient struct {
*http.Client
}
func NewProxiedClient(proxy string) *ProxiedClient {
ip, _, err := net.SplitHostPort(proxy)
if err != nil {
log.Fatalf("cannot split proxy address into ip and port: %s", err)
}
if proxyip := net.ParseIP(ip); proxyip == nil {
log.Fatalf("invalid ip address provided")
}
proxyurl, errurl := url.Parse("http://" + proxy)
if errurl != nil {
log.Fatalf("Cannot parse proxy url %s", errurl)
}
transport := &http.Transport{
Proxy: http.ProxyURL(proxyurl),
}
return &ProxiedClient{&http.Client{Transport: transport}}
}
func (pc *ProxiedClient) pingTarget(url string) *http.Response {
resp, errget := pc.Get(url)
if errget != nil {
log.Fatalf("Error contacting target at %s: %s", url, errget)
}
if status := resp.StatusCode; status < 200 || status > 299 {
log.Fatalf("HTTP NOK - Received %d\n", status)
}
return resp
}
type Received struct {
Header map[string][]string
RemoteAddr string
}
func (r *Received) containsIP(myip net.IP) map[string]string {
detected := make(map[string]string)
for k, v := range r.Header {
for _, vv := range v {
if ip := net.ParseIP(vv); ip != nil && myip.Equal(ip) {
detected[k] = ip.String()
}
}
}
remoteip, _, _ := net.SplitHostPort(r.RemoteAddr)
if ip := net.ParseIP(remoteip); ip != nil && myip.Equal(ip) {
detected["RemoteAddr"] = ip.String()
}
return detected
}
var proxyinforeg = regexp.MustCompile(`(?i)forw|via|prox|client|ip`)
func (r *Received) containsProxyInfo() map[string]string {
detected := make(map[string]string)
for k, v := range r.Header {
if proxyinforeg.MatchString(k) {
detected[k] = v[0]
}
}
return detected
}
func myIP() net.IP {
service := "http://checkip.amazonaws.com/"
resp, err := http.Get(service)
if err != nil {
log.Fatalf("cannot get your ip from service %s: %s", service, err)
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatalf("cannot parse your ip '%q' from service %s: %s", b, service, err)
}
defer resp.Body.Close()
return net.ParseIP(strings.TrimSpace(string(b)))
}
|
package jsonrpc
import (
"bufio"
"net"
)
// DECL> OMIT
// Channel represents some absctract channel over net.Conn.
type Channel struct {
conn net.Conn
out chan Packet
}
func NewChannel(conn net.Conn) *Channel {
c := &Channel{conn, make(chan Packet, N)}
go c.reader()
go c.writer()
}
// DECL< OMIT
// IMPL> OMIT
func (c *Channel) reader() {
buf := bufio.NewReader(c.conn) // Allocation.
for {
readPacket(buf) // Possibly allocations too.
// ...
}
}
func (c *Channel) writer() {
buf := bufio.NewWriter(c.conn) // Allocation.
for pkt := range c.out {
writePacket(buf, pkt)
// ...
buf.Flush()
}
}
// IMPL< OMIT
|
package api_test
import (
"net/http"
"github.com/odpf/stencil/config"
"github.com/odpf/stencil/server"
"github.com/odpf/stencil/server/api"
"github.com/odpf/stencil/server/api/mocks"
)
func setup() (http.Handler, *mocks.StoreService, *mocks.MetadataService, *api.API) {
mockService := &mocks.StoreService{}
mockMetadataService := &mocks.MetadataService{}
v1 := &api.API{
Store: mockService,
Metadata: mockMetadataService,
}
router := server.Router(v1, &config.Config{})
return router, mockService, mockMetadataService, v1
}
|
package main
import "fmt"
type Human struct {
name string
age int
phone string
}
type Student struct {
Human
school string
}
type Employee struct {
Human
company string
}
func (h *Human) SayHi() {
fmt.Printf("Hi, I am %s u can call me on %s\n", h.name, h.phone)
}
func main() {
mark := Student{Human{"Mark", 19, "0911235468"}, "NKUST"}
leo := Employee{Human{"Leo", 20, "0988163254"}, "TSMC"}
mark.SayHi()
leo.SayHi()
}
|
package main
import (
"fmt"
"log"
"os"
_ "restaurantManageAPI/init/runtime"
"restaurantManageAPI/pkg/router"
)
func main() {
err := router.Router.Run(fmt.Sprintf(":%s", os.Getenv("GIN_SERVER_PORT")))
if err != nil {
log.Fatal(err)
}
}
|
package cache
import (
"fmt"
"github.com/pilillo/igovium/utils"
)
// todo: convert to a map
func NewDMCacheFromConfig(config *utils.DMCacheConfig) (DMCache, error) {
switch dmType := config.Type; dmType {
case "olric":
return NewOlricDMCache(), nil
case "redis":
return NewRedisDMCache(), nil
default:
return nil, fmt.Errorf("Unknown dm type %s", dmType)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.