text stringlengths 11 4.05M |
|---|
package main
import (
"fmt"
"github.com/lingdor/midlog"
"github.com/lingdor/midlog-examples/library2"
"os"
)
var Logger = midlog.New("routeLog")
func main() {
//config log writer
midlog.SetWriter(&MyWriter{})
library2.DumpLog("library2 log")
library2.DumpError("library2 error log")
Logger.Info("hello world!")
Logger.Error1("logger errror log")
fmt.Println("done")
}
type MyWriter struct {
}
func (MyWriter) Write(logger midlog.Midlog, level midlog.LogLevel, caller *midlog.Caller, pams ...interface{}) {
formater, err := midlog.NewFormater(midlog.FormatTextFull)
if err != nil {
panic(err)
}
// direct to error log
bs, err := formater.Format(logger.GetModule(), level, caller, logger.GetExt(), pams...)
if err != nil {
panic(err)
}
if logger == library2.Logger {
writelog("/tmp/library2.log", bs)
}
if level >= midlog.LevelError1 {
writelog("/tmp/error.log", bs)
}
// direct to error log
writelog("/tmp/run.log", bs)
}
func writelog(fpath string, bs []byte) error {
file, err := os.OpenFile(fpath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0777)
if err != nil {
return err
}
defer file.Close()
file.Write(bs)
return nil
}
|
package isogram
import (
"unicode"
)
//IsIsogram - determines whether or not 'word' is an isogram
func IsIsogram(word string) bool {
isogram := true
var values = map[rune]rune{}
for _, runeValue := range word {
runeValue = unicode.ToUpper(runeValue)
if runeValue == 45 || runeValue == 32 {
continue //skip " " (32) and "-" (45) as they don't matter in an isogram
} else if _, ok := values[runeValue]; ok {
isogram = false
} else {
values[runeValue] = runeValue
}
}
return isogram
}
|
package main
import "github.com/robertsmieja-templates/golang-cli-template/cmd"
func main() {
cmd.Execute()
}
|
package utils
import "time"
//待优化
type SimpleCache struct {
cache map[string]*element
globalExpire int64
}
func CreateSimpleCache() SimpleCache {
return CreateSimpleCacheExpire(defaultExpire)
}
func CreateSimpleCacheExpire(globalExpire int64) SimpleCache {
return SimpleCache{
cache:make(map[string]*element),
globalExpire: defaultExpire,
}
}
func (p *SimpleCache) Put(key string,value interface{}) {
ele := element{
value: value,
expire: p.globalExpire,
createTime: time.Now(),
}
p.cache[key] = &ele
}
func (p *SimpleCache) Get(key string) interface{}{
ele := p.cache[key]
if ele != nil {
if ele.isExpire() {
delete(p.cache,key)
return nil
}
return ele.value
}
return nil
}
const (
defaultExpire = 60*30 //半小时
)
type element struct {
value interface{}
expire int64
createTime time.Time
}
func (e *element) isExpire() bool {
return time.Now().Unix() - e.createTime.Unix() > e.expire
}
|
package main
import (
"fmt"
"io/ioutil"
"os"
"regexp"
)
func main() {
// var filePath string
for _, file := range os.Args[1:] {
fileInfos, err := ioutil.ReadDir(file)
fmt.Println(err)
for _, info := range fileInfos {
name := info.Name()
r1 := regexp.MustCompile(".js")
r2 := regexp.MustCompile(".jsx")
match1 := r1.MatchString(name)
match2 := r2.MatchString(name)
fmt.Println("info",file, name, match1, match2)
if match1 {
out := r1.ReplaceAllString(name, ".ts")
os.Rename(file+"/"+name, file+"/"+out)
}
if match2 {
out := r1.ReplaceAllString(name, ".tsx")
os.Rename(file+"/"+name, file+"/"+out)
}
}
}
} |
package main
import (
"fmt"
"github.com/Densuke-fitness/Practice4forGoCi/fizzbuzz"
)
func main() {
fmt.Println(fizzbuzz.Convert(15))
} |
package core
/*
** Defined Transfer Syntax UIDs
*/
var (
// UIDLittleEndianImplicitTransferSyntax : Implicit VR Little Endian: Default Transfer Syntax for DICOM
UIDLittleEndianImplicitTransferSyntax = "1.2.840.10008.1.2"
// UIDLittleEndianExplicitTransferSyntax : Explicit VR Little Endian
UIDLittleEndianExplicitTransferSyntax = "1.2.840.10008.1.2.1"
// UIDBigEndianExplicitTransferSyntax Explicit VR Big Endian
UIDBigEndianExplicitTransferSyntax = "1.2.840.10008.1.2.2"
// UIDDeflatedExplicitVRLittleEndianTransferSyntax : Deflated Explicit VR Little Endian
UIDDeflatedExplicitVRLittleEndianTransferSyntax = "1.2.840.10008.1.2.1.99"
// UIDJPEGProcess1TransferSyntax :JPEG Baseline (Process 1): Default Transfer Syntax
// for Lossy JPEG 8 Bit Image Compression
UIDJPEGProcess1TransferSyntax = "1.2.840.10008.1.2.4.50"
// UIDJPEGProcess24TransferSyntax : JPEG Extended (Process 2 & 4): Default Transfer Syntax
// for Lossy JPEG 12 Bit Image Compression (Process 4 only)
UIDJPEGProcess24TransferSyntax = "1.2.840.10008.1.2.4.51"
// UIDJPEGProcess35TransferSyntax : JPEG Extended (Process 3 & 5) - RETIRED
UIDJPEGProcess35TransferSyntax = "1.2.840.10008.1.2.4.52"
// UIDJPEGProcess68TransferSyntax : JPEG Spectral Selection, Non-Hierarchical (Process 6 & 8) - RETIRED
UIDJPEGProcess68TransferSyntax = "1.2.840.10008.1.2.4.53"
// UIDJPEGProcess79TransferSyntax : JPEG Spectral Selection, Non-Hierarchical (Process 7 & 9) - RETIRED
UIDJPEGProcess79TransferSyntax = "1.2.840.10008.1.2.4.54"
// UIDJPEGProcess1012TransferSyntax : JPEG Full Progression, Non-Hierarchical (Process 10 & 12) - RETIRED
UIDJPEGProcess1012TransferSyntax = "1.2.840.10008.1.2.4.55"
// UIDJPEGProcess1113TransferSyntax : JPEG Full Progression, Non-Hierarchical (Process 11 & 13) - RETIRED
UIDJPEGProcess1113TransferSyntax = "1.2.840.10008.1.2.4.56"
// UIDJPEGProcess14TransferSyntax : JPEG Lossless, Non-Hierarchical (Process 14)
UIDJPEGProcess14TransferSyntax = "1.2.840.10008.1.2.4.57"
// UIDJPEGProcess15TransferSyntax : JPEG Lossless, Non-Hierarchical (Process 15) - RETIRED
UIDJPEGProcess15TransferSyntax = "1.2.840.10008.1.2.4.58"
// UIDJPEGProcess1618TransferSyntax : JPEG Extended, Hierarchical (Process 16 & 18) - RETIRED
UIDJPEGProcess1618TransferSyntax = "1.2.840.10008.1.2.4.59"
// UIDJPEGProcess1719TransferSyntax : JPEG Extended, Hierarchical (Process 17 & 19) - RETIRED
UIDJPEGProcess1719TransferSyntax = "1.2.840.10008.1.2.4.60"
// UIDJPEGProcess2022TransferSyntax : JPEG Spectral Selection, Hierarchical (Process 20 & 22) - RETIRED
UIDJPEGProcess2022TransferSyntax = "1.2.840.10008.1.2.4.61"
// UIDJPEGProcess2123TransferSyntax : JPEG Spectral Selection, Hierarchical (Process 21 & 23) - RETIRED
UIDJPEGProcess2123TransferSyntax = "1.2.840.10008.1.2.4.62"
// UIDJPEGProcess2426TransferSyntax : JPEG Full Progression, Hierarchical (Process 24 & 26) - RETIRED
UIDJPEGProcess2426TransferSyntax = "1.2.840.10008.1.2.4.63"
// UIDJPEGProcess2527TransferSyntax : JPEG Full Progression, Hierarchical (Process 25 & 27) - RETIRED
UIDJPEGProcess2527TransferSyntax = "1.2.840.10008.1.2.4.64"
// UIDJPEGProcess28TransferSyntax : JPEG Lossless, Hierarchical (Process 28) - RETIRED
UIDJPEGProcess28TransferSyntax = "1.2.840.10008.1.2.4.65"
// UIDJPEGProcess29TransferSyntax : JPEG Lossless, Hierarchical (Process 29) - RETIRED
UIDJPEGProcess29TransferSyntax = "1.2.840.10008.1.2.4.66"
// UIDJPEGProcess14SV1TransferSyntax : JPEG Lossless, Non-Hierarchical, First-Order Prediction (Process 14
// [Selection Value 1]): Default Transfer Syntax for Lossless JPEG Image Compression
UIDJPEGProcess14SV1TransferSyntax = "1.2.840.10008.1.2.4.70"
// UIDJPEGLSLosslessTransferSyntax : JPEG-LS Lossless Image Compression
UIDJPEGLSLosslessTransferSyntax = "1.2.840.10008.1.2.4.80"
// UIDJPEGLSLossyTransferSyntax : JPEG-LS Lossy (Near-Lossless) Image Compression
UIDJPEGLSLossyTransferSyntax = "1.2.840.10008.1.2.4.81"
// UIDJPEG2000LosslessOnlyTransferSyntax : JPEG 2000 Image Compression (Lossless Only)
UIDJPEG2000LosslessOnlyTransferSyntax = "1.2.840.10008.1.2.4.90"
// UIDJPEG2000TransferSyntax : JPEG 2000 Image Compression (Lossless or Lossy)
UIDJPEG2000TransferSyntax = "1.2.840.10008.1.2.4.91"
// UIDJPEG2000Part2MulticomponentImageCompressionLosslessOnlyTransferSyntax : JPEG 2000 Part 2 Multi-component Image Compression (Lossless Only)
UIDJPEG2000Part2MulticomponentImageCompressionLosslessOnlyTransferSyntax = "1.2.840.10008.1.2.4.92"
// UIDJPEG2000Part2MulticomponentImageCompressionTransferSyntax : JPEG 2000 Part 2 Multi-component Image Compression (Lossless or Lossy)
UIDJPEG2000Part2MulticomponentImageCompressionTransferSyntax = "1.2.840.10008.1.2.4.93"
// UIDJPIPReferencedTransferSyntax : JPIP Referenced
UIDJPIPReferencedTransferSyntax = "1.2.840.10008.1.2.4.94"
// UIDJPIPReferencedDeflateTransferSyntax : JPIP Referenced Deflate
UIDJPIPReferencedDeflateTransferSyntax = "1.2.840.10008.1.2.4.95"
// UIDMPEG2MainProfileAtMainLevelTransferSyntax : MPEG2 Main Profile @ Main Level
UIDMPEG2MainProfileAtMainLevelTransferSyntax = "1.2.840.10008.1.2.4.100"
// UIDMPEG2MainProfileAtHighLevelTransferSyntax : MPEG2 Main Profile @ High Level
UIDMPEG2MainProfileAtHighLevelTransferSyntax = "1.2.840.10008.1.2.4.101"
// UIDRLELosslessTransferSyntax : RLE Lossless
UIDRLELosslessTransferSyntax = "1.2.840.10008.1.2.5"
)
|
package hello
import (
"fmt"
"time"
)
var SupportedLangs = map[string]string{
"en-US": "Hello, World!",
"ru-RU": "Здравствуй, Мир!",
"zh-CN": "你好,世界!",
"fr-FR": "Bonjour le Monde!",
}
func World() error {
return WorldIn("en-US")
}
func WorldIn(lang string) error {
if greeting, found := SupportedLangs[lang]; found {
_, err := fmt.Println(greeting)
time.Sleep(1 * time.Second)
if err != nil {
return err
}
return nil
}
return fmt.Errorf("failed to hello: lang '%s' is not supported", lang)
}
|
package model
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestConvBoolToBytes(t *testing.T) {
restrue := ConvBoolToBytes(true)
resfalse := ConvBoolToBytes(false)
assert.Len(t, restrue, 1)
assert.Len(t, resfalse, 1)
assert.Equal(t, byte(1), restrue[0])
assert.Equal(t, byte(0), resfalse[0])
}
|
package main
import "fmt"
type Account struct {
Name string
Amount float64
}
func (a *Account) Add(amt float64) {
a.Amount = a.Amount + amt
}
func (a *Account) Withdraw(amt float64) bool {
if a.Amount < amt {
return false
}
a.Amount = a.Amount - amt
return true
}
func (a Account) Print() {
fmt.Printf("Account Name: %s, Amount: %f\n", a.Name, a.Amount)
}
func main() {
fmt.Println("Starting balance for your checking account: ")
var startCheck float64
fmt.Scanf("%f", &startCheck)
checking := &Account{
"Checking",
startCheck,
}
fmt.Println("Starting balance for your savings account: ")
var startSaving float64
fmt.Scanf("%f", &startSaving)
savings := &Account{
"Saving",
startSaving,
}
run := true
for run {
fmt.Println("Enter option:")
fmt.Println("1: Add amount to account")
fmt.Println("2: Withdraw an amount from account")
fmt.Println("3: Print account information")
fmt.Println("4: Exit")
var choice int
fmt.Scanf("%d", &choice)
switch choice {
case 1:
fmt.Println("How much?")
var amt float64
fmt.Scanf("%f", &amt)
fmt.Println("Checking or Savings?")
var name string
fmt.Scanf("%s", &name)
if name == "Checking" {
checking.Add(amt)
} else if name == "Savings" {
savings.Add(amt)
} else {
fmt.Println("Invalid option choosen! Please choose Checking or Savings")
}
case 2:
fmt.Println("How much?")
var amt float64
fmt.Scanf("%f", &amt)
fmt.Println("Checking or Savings?")
var name string
fmt.Scanf("%s", &name)
if name == "Checking" {
res := checking.Withdraw(amt)
if res {
fmt.Println("Withdraw successful!")
} else {
fmt.Println("Insufficient funds")
}
} else if name == "Savings" {
res := savings.Withdraw(amt)
if res {
fmt.Println("Withdraw successful!")
} else {
fmt.Println("Insufficient funds")
}
} else {
fmt.Println("Invalid option choosen! Please choose Checking or Savings")
}
case 3:
fmt.Println("Checking or Savings?")
var name string
fmt.Scanf("%s", &name)
if name == "Checking" {
checking.Print()
} else if name == "Savings" {
savings.Print()
} else {
fmt.Println("Invalid option choosen! Please choose Checking or Savings")
}
case 4:
run = false
default:
fmt.Println("Invalud option")
}
}
}
|
package main
import (
"bytes"
"fmt"
"image"
"image/color"
"image/draw"
"image/jpeg"
"github.com/enjoy-web/ehttp"
"github.com/gin-gonic/gin"
)
type ErrorMessage struct {
Message string `json:"message" desc:"the error message"`
Details string `json:"detail" desc:"the error detail"`
}
var DocDownloadText = &ehttp.APIDocCommon{
Summary: "A download file demo",
Produces: []string{ehttp.Image_Jpeg},
Parameters: map[string]ehttp.Parameter{
"fileName": ehttp.Parameter{InPath: &ehttp.ValueInfo{Type: "string", Desc: "the fileName"}},
},
Responses: map[int]ehttp.Response{
200: ehttp.Response{
Description: "successful operation",
},
400: ehttp.Response{
Description: "failed operation",
Model: &ErrorMessage{},
},
},
}
func HandleDownloadText(c *gin.Context, err error) {
if err != nil {
c.JSON(400, &ErrorMessage{"parameter error", err.Error()})
return
}
fileName := c.Param("fileName")
jpgData, err := newImage()
if err != nil {
c.JSON(400, &ErrorMessage{"newImage error", err.Error()})
return
}
length := len(jpgData)
c.Writer.Header().Set("Content-Type", ehttp.Image_Jpeg)
c.Writer.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", fileName))
c.Writer.Header().Set("Content-Range", fmt.Sprintf("bytes=%d-%d/%d", 0, length, length))
c.Writer.Header().Set("Content-Length", fmt.Sprintf("%d", length))
c.Writer.Write(jpgData)
}
func newImage() ([]byte, error) {
m := image.NewRGBA(image.Rect(0, 0, 200, 200))
blue := color.RGBA{0, 0, 255, 255}
draw.Draw(m, m.Bounds(), &image.Uniform{blue}, image.ZP, draw.Src)
buf := &bytes.Buffer{}
if err := jpeg.Encode(buf, m, &jpeg.Options{60}); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func main() {
conf := &ehttp.Config{
Schemes: []ehttp.Scheme{ehttp.SchemeHTTP},
BasePath: "/demo",
Version: "v1",
Title: "book store APIS",
Description: "APIs of book",
AllowOrigin: true,
OpenAPIDocumentURL: true,
}
router := ehttp.NewEngine(conf)
err := router.GET("/text/:fileName", DocDownloadText, HandleDownloadText)
if err != nil {
panic(err)
}
router.Run(":8000")
}
|
package main
import (
"ch9/formatter"
"ch9/math"
"fmt"
)
func main() {
num := math.Double(2)
output := formatter.Format(num)
fmt.Println(output)
}
|
package article
import (
"github.com/go-chi/chi"
"github.com/go-chi/render"
"github.com/hardstylez72/bblog/internal/api/controller"
ma "github.com/hardstylez72/bblog/internal/api/model/article"
"github.com/hardstylez72/bblog/internal/storage/user"
"net/http"
)
func (c articleController) GetArticleByIdHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
id := chi.URLParam(r, "id")
article, err := c.articleStorage.GetArticleById(ctx, id)
if err != nil {
if err == user.ErrNotFound {
controller.ResponseWithError(ErrArticleNotFound(err), http.StatusNotFound, w)
} else {
controller.ResponseWithError(controller.ErrInternal(err), http.StatusInternalServerError, w)
}
return
}
render.JSON(w, r, ma.NewGetArticleByIdResponse(article))
}
|
package main
import (
"fmt"
"github.com/jlarusso/gonads/interactors"
)
func main() {
p1 := make(map[string]int)
p1["tomatoes"] = 1
p1["heat"] = 100
p1["salt"] = 2
result1 := interactors.MakeSauce(p1)
fmt.Println(result1) // => Failure(Not enough tomatoes)
p2 := make(map[string]int)
p2["tomatoes"] = 10
p2["heat"] = 20
p2["salt"] = 25
result2 := interactors.MakeSauce(p2)
fmt.Println(result2) // => Failure(Turn up the heat)
p3 := make(map[string]int)
p3["tomatoes"] = 5
p3["heat"] = 100
p3["salt"] = 2
result3 := interactors.MakeSauce(p3)
fmt.Println(result3) // => Success(30)
}
|
package fitbit
import (
"context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/fitbit"
)
type AuthConfig struct {
ClientID string
ClientSecret string
RedirectURL string
}
func newConfig() *oauth2.Config {
return &oauth2.Config{
Endpoint: fitbit.Endpoint,
Scopes: []string{"activity", "location", "social", "heartrate", "settings", "sleep", "weight", "profile", "nutrition"},
}
}
// GenerateToken returns the Token generated by authorization code grant flow.
func GenerateToken(c *AuthConfig, code string) (Token, error) {
cfg := newConfig()
cfg.ClientID = c.ClientID
cfg.ClientSecret = c.ClientSecret
cfg.RedirectURL = c.RedirectURL
ctx := context.WithValue(context.Background(), oauth2.HTTPClient, DefaultHttpClient)
token, err := cfg.Exchange(ctx, code)
if err != nil {
return Token{}, err
}
return fromToken(token), nil
}
// AuthCodeURL returns the URL to fitibit auth page with authorization code grant flow.
// expires_in parameter is default 8hours.
func AuthCodeURL(c *AuthConfig) string {
cfg := newConfig()
cfg.ClientID = c.ClientID
cfg.ClientSecret = c.ClientSecret
cfg.RedirectURL = c.RedirectURL
return cfg.AuthCodeURL("", oauth2.SetAuthURLParam("prompt", "consent"))
}
// ImplicitURL returns the URL to fitbit auth page with implicit grant flow.
// Expires parameter can be one of 86400(1day) 604800(1week) 2592000(30days) 31536000(1year)
func ImplicitURL(c *AuthConfig, expires string) string {
switch expires {
case "86400", "604800", "2592000", "31536000":
default:
expires = "86400"
}
cfg := newConfig()
cfg.ClientID = c.ClientID
cfg.ClientSecret = c.ClientSecret
cfg.RedirectURL = c.RedirectURL
// response_type is just token instead of code.
return cfg.AuthCodeURL("",
oauth2.SetAuthURLParam("response_type", "token"),
oauth2.SetAuthURLParam("prompt", "consent"),
oauth2.SetAuthURLParam("expires_in", expires),
)
}
|
package levigo
// #cgo LDFLAGS: -lleveldb
// #include "levigo.h"
import "C"
// CompressionOpt is a value for Options.SetCompression.
type CompressionOpt int
// Known compression arguments for Options.SetCompression.
const (
NoCompression = CompressionOpt(0)
SnappyCompression = CompressionOpt(1)
)
// Options represent all of the available options when opening a database with
// Open(). Options should be created with NewOptions().
//
// It is usually with to call SetCache() with a cache object. Otherwise, all
// data will be read off disk.
//
// To prevent memory leaks, DestroyOptions() must be called on an Options when
// the program no longer needs it.
//
type Options struct {
Opt *C.leveldb_options_t
}
// ReadOptions represent all of the available options when reading from a
// database.
//
// To prevent memory leaks, DestroyReadOptions() must called on a ReadOptions
// when the program no longer needs it
type ReadOptions struct {
Opt *C.leveldb_readoptions_t
}
// WriteOptions represent all of the available options when writeing from a
// database.
//
// To prevent memory leaks, DestroyWriteOptions() must called on a
// WriteOptions when the program no longer needs it
type WriteOptions struct {
Opt *C.leveldb_writeoptions_t
}
// NewOptions allocates a new Options object.
//
// To prevent memory leaks, the *Options returned must have DestroyOptions()
// called on it when it is no longer needed by the program.
func NewOptions() *Options {
opt := C.leveldb_options_create()
return &Options{opt}
}
// NewReadOptions allocates a new ReadOptions object.
//
// To prevent memory leaks, the *ReadOptions returned must have Close() called
// on it when it is no longer needed by the program.
func NewReadOptions() *ReadOptions {
opt := C.leveldb_readoptions_create()
return &ReadOptions{opt}
}
// NewWriteOptions allocates a new WriteOptions object.
//
// To prevent memory leaks, the *WriteOptions returned must have Close()
// called on it when it is no longer needed by the program.
func NewWriteOptions() *WriteOptions {
opt := C.leveldb_writeoptions_create()
return &WriteOptions{opt}
}
// Close deallocates the Options, freeing its underlying C struct.
func (o *Options) Close() {
C.leveldb_options_destroy(o.Opt)
}
// SetComparator sets the comparator to be used for all read and write
// operations.
//
// The comparator that created a database must be the same one (technically,
// one with the same name string) that is used to perform read and write
// operations.
//
// The default *C.leveldb_comparator_t is usually sufficient.
func (o *Options) SetComparator(cmp *C.leveldb_comparator_t) {
C.leveldb_options_set_comparator(o.Opt, cmp)
}
// SetErrorIfExists, if passed true, will cause the opening of a database that
// already exists to throw an error.
func (o *Options) SetErrorIfExists(error_if_exists bool) {
eie := boolToUchar(error_if_exists)
C.leveldb_options_set_error_if_exists(o.Opt, eie)
}
// SetCache places a cache object in the database when a database is opened.
//
// This is usually wise to use.
func (o *Options) SetCache(cache *Cache) {
C.leveldb_options_set_cache(o.Opt, cache.Cache)
}
// SetEnv sets the Env object for the new database handle.
func (o *Options) SetEnv(env *Env) {
C.leveldb_options_set_env(o.Opt, env.Env)
}
// SetInfoLog sets a *C.leveldb_logger_t object as the informational logger
// for the database.
func (o *Options) SetInfoLog(log *C.leveldb_logger_t) {
C.leveldb_options_set_info_log(o.Opt, log)
}
// SetWriteBufferSize sets the number of bytes the database will build up in
// memory (backed by an unsorted log on disk) before converting to a sorted
// on-disk file.
func (o *Options) SetWriteBufferSize(s int) {
C.leveldb_options_set_write_buffer_size(o.Opt, C.size_t(s))
}
// SetParanoidChecks, when called with true, will cause the database to do
// aggressive checking of the data it is processing and will stop early if it
// detects errors.
//
// See the LevelDB C++ documentation docs for details.
func (o *Options) SetParanoidChecks(pc bool) {
C.leveldb_options_set_paranoid_checks(o.Opt, boolToUchar(pc))
}
// SetMaxOpenFiles sets the number of files than can be used at once by the
// database.
//
// See the LevelDB C++ documentation for details.
func (o *Options) SetMaxOpenFiles(n int) {
C.leveldb_options_set_max_open_files(o.Opt, C.int(n))
}
// SetBlockSize sets the approximate size of user data packed per block.
//
// See the LevelDB C++ documentation for details.
func (o *Options) SetBlockSize(s int) {
C.leveldb_options_set_block_size(o.Opt, C.size_t(s))
}
// SetBlockRestartInterval is the number of keys between restarts points for
// delta encoding keys.
//
// Most clients should leave this parameter alone.
func (o *Options) SetBlockRestartInterval(n int) {
C.leveldb_options_set_block_restart_interval(o.Opt, C.int(n))
}
// SetCompression sets whether to compress blocks using the specified
// compresssion algorithm.
//
// The default value is SnappyCompression and it is fast
// enough that it is unlikely you want to turn it off. The other option is
// NoCompression.
//
// If the LevelDB library was built without Snappy compression enabled, the
// SnappyCompression setting will be ignored.
func (o *Options) SetCompression(t CompressionOpt) {
C.leveldb_options_set_compression(o.Opt, C.int(t))
}
// SetCreateIfMissing causes Open to create a new database on disk if it does
// not already exist.
func (o *Options) SetCreateIfMissing(b bool) {
C.leveldb_options_set_create_if_missing(o.Opt, boolToUchar(b))
}
// Close deallocates the ReadOptions, freeing its underlying C struct.
func (ro *ReadOptions) Close() {
C.leveldb_readoptions_destroy(ro.Opt)
}
// SetVerifyChecksums, when called with true, will cause all data read from
// underlying storage to verified against corresponding checksums.
//
// See the LevelDB C++ documentation for details.
func (ro *ReadOptions) SetVerifyChecksums(b bool) {
C.leveldb_readoptions_set_verify_checksums(ro.Opt, boolToUchar(b))
}
// SetFillCache, when called with true, will cause all data read from
// underlying storage to be placed in the database cache, if the cache exists.
//
// It is useful to turn this off on ReadOptions that are used for
// *DB.Iterator(), as it will prevent bulk scans from flushing out live user
// data in the cache.
func (ro *ReadOptions) SetFillCache(b bool) {
C.leveldb_readoptions_set_fill_cache(ro.Opt, boolToUchar(b))
}
// SetSnapshot causes reads to provided as they were when the passed in
// Snapshot was created by *DB.NewSnapshot().
//
// See the LevelDB C++ documentation for details.
func (ro *ReadOptions) SetSnapshot(snap *C.leveldb_snapshot_t) {
C.leveldb_readoptions_set_snapshot(ro.Opt, snap)
}
// Close deallocates the WriteOptions, freeing its underlying C struct.
func (wo *WriteOptions) Close() {
C.leveldb_writeoptions_destroy(wo.Opt)
}
// SetSync, when called with true, will cause each write to be flushed from
// the operating system buffer cache before the write is considered complete.
//
// If called with true, this will signficanly slow down writes. If called with
// false, and the machine crashes, some recent writes may be lost. Note that
// if it is just the process that crashes (i.e., the machine does not reboot),
// no writes will be lost even when SetSync is called with false.
// See the LevelDB C++ documentation for details.
func (wo *WriteOptions) SetSync(b bool) {
C.leveldb_writeoptions_set_sync(wo.Opt, boolToUchar(b))
}
|
package account
import (
"bytes"
"crypto/sha256"
"errors"
"time"
jwtLib "github.com/dgrijalva/jwt-go"
)
// RegisterAndSign - register the account to db, and sign the jwt for it
func (acct *AccountService) RegisterAndSign(name string, password string, isAdmin bool) (jwt string, err error) {
db := acct.DB
var hash []byte
// validate password
if err = validatePassword(password); err != nil {
return
}
// generate password hash
if hash, err = genHashPassword(password); err != nil {
return
}
var mask PermMask
if isAdmin == true {
mask = Admin
} else {
mask = None
}
newAccount := Account{
Name: name,
PasswordHash: hash,
PermissionMask: mask,
}
// insert into db
if err = db.Create(&newAccount).Error; err != nil {
jwt = ""
return
}
// construct payload
payload := map[string]interface{}{
"accountId": newAccount.ID,
}
if jwt, err = signJWT(payload, acct.JWTConfig); err != nil {
return
}
return
}
// VerifyAccount - check if account is correct
func (acct *AccountService) VerifyAccount(name string, password string) (jwt string, err error) {
db := acct.DB
var hash []byte
// generate password hash
if hash, err = genHashPassword(password); err != nil {
return
}
var nameAccount Account
if err = db.Find(&nameAccount, "name = ?", name).Error; err != nil {
return
}
// compare oldHash and newHash
if bytes.Compare(nameAccount.PasswordHash, hash) != 0 {
err = errors.New("invalid password")
return
}
// construct payload
payload := map[string]interface{}{
"accountId": nameAccount.ID,
}
if jwt, err = signJWT(payload, acct.JWTConfig); err != nil {
return
}
return
}
// internal function
// signJWT - sign auth JWT
func signJWT(payload map[string]interface{}, config *AccountJWTConfig) (jwt string, err error) {
claims := jwtLib.MapClaims{}
var expTime int64
for key, val := range payload {
claims[key] = val
}
claims["iss"] = config.Issuer
expTime = time.Now().Add(config.ExpireIn).Unix()
claims["exp"] = expTime
token := jwtLib.NewWithClaims(jwtLib.SigningMethodHS256, claims)
jwt, err = token.SignedString([]byte(config.Secret))
return
}
func genHashPassword(password string) (hash []byte, err error) {
// TODO: use pdkdf2
sum := sha256.Sum256([]byte(password))
return sum[:], nil
}
// validatePassword - check if password format
// fits the reuqirement, e.g.: length within 6-10 digits,
// must include numbers, etc.
func validatePassword(password string) error {
// TODO: add logic
return nil
}
|
package test
// ========== Unit tests for the init and run other tests ================
|
package oidc_test
import (
"context"
"io"
"net/http"
"net/http/httptest"
"net/url"
"regexp"
"testing"
"github.com/ory/fosite"
"github.com/ory/fosite/token/jwt"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/valyala/fasthttp"
"github.com/authelia/authelia/v4/internal/oidc"
"github.com/authelia/authelia/v4/internal/templates"
)
func TestOpenIDConnectProvider_ResponseModeHandler(t *testing.T) {
testCases := []struct {
name string
have fosite.ResponseModeHandler
expected any
}{
{
"ShouldReturnDefaultFosite",
nil,
&fosite.DefaultResponseModeHandler{},
},
{
"ShouldReturnInternal",
&oidc.ResponseModeHandler{},
&oidc.ResponseModeHandler{},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
config := &oidc.Config{Handlers: oidc.HandlersConfig{ResponseMode: tc.have}}
provider := &oidc.OpenIDConnectProvider{Config: config}
actual := provider.ResponseModeHandler(context.TODO())
assert.IsType(t, tc.expected, actual)
})
}
}
func TestOpenIDConnectProvider_WriteAuthorizeResponse(t *testing.T) {
testCases := []struct {
name string
requester fosite.AuthorizeRequester
responder fosite.AuthorizeResponder
setup func(t *testing.T, config *oidc.Config)
code int
header http.Header
headerFunc func(t *testing.T, header http.Header)
body string
bodyRegexp *regexp.Regexp
}{
{
"ShouldHandleResponseModeQuery",
&fosite.AuthorizeRequest{
ResponseMode: oidc.ResponseModeQuery,
Request: fosite.Request{
Client: &oidc.BaseClient{
ID: "example",
RedirectURIs: []string{
"https://app.example.com/callback",
},
},
},
RedirectURI: &url.URL{Scheme: "https", Host: "app.example.com", Path: "/callback"},
},
&fosite.AuthorizeResponse{
Parameters: url.Values{
oidc.FormParameterAuthorizationCode: []string{"1234"},
oidc.FormParameterIssuer: []string{"https://auth.example.com"},
},
},
nil,
fasthttp.StatusSeeOther,
http.Header{fasthttp.HeaderLocation: []string{"https://app.example.com/callback?code=1234&iss=https%3A%2F%2Fauth.example.com"}},
nil,
"",
nil,
},
{
"ShouldWriteHeaders",
&fosite.AuthorizeRequest{
ResponseMode: oidc.ResponseModeQuery,
Request: fosite.Request{
Client: &oidc.BaseClient{
ID: "example",
RedirectURIs: []string{
"https://app.example.com/callback",
},
},
},
RedirectURI: &url.URL{Scheme: "https", Host: "app.example.com", Path: "/callback"},
},
&fosite.AuthorizeResponse{
Header: http.Header{
fasthttp.HeaderAccept: []string{"123"},
},
Parameters: url.Values{
oidc.FormParameterAuthorizationCode: []string{"1234"},
oidc.FormParameterIssuer: []string{"https://auth.example.com"},
},
},
nil,
fasthttp.StatusSeeOther,
http.Header{
fasthttp.HeaderLocation: []string{"https://app.example.com/callback?code=1234&iss=https%3A%2F%2Fauth.example.com"},
fasthttp.HeaderAccept: []string{"123"},
},
nil,
"",
nil,
},
{
"ShouldHandleBadClient",
&fosite.AuthorizeRequest{
ResponseMode: oidc.ResponseModeQuery,
Request: fosite.Request{
Client: &fosite.DefaultClient{
ID: "example",
RedirectURIs: []string{
"https://app.example.com/callback",
},
},
},
RedirectURI: &url.URL{Scheme: "https", Host: "app.example.com", Path: "/callback"},
},
&fosite.AuthorizeResponse{
Header: http.Header{
fasthttp.HeaderAccept: []string{"123"},
},
Parameters: url.Values{
oidc.FormParameterAuthorizationCode: []string{"1234"},
oidc.FormParameterIssuer: []string{"https://auth.example.com"},
},
},
nil,
fasthttp.StatusInternalServerError,
http.Header{
fasthttp.HeaderContentType: []string{"application/json; charset=utf-8"},
fasthttp.HeaderAccept: []string{"123"},
},
nil,
"{\"error\":\"server_error\",\"error_description\":\"The authorization server encountered an unexpected condition that prevented it from fulfilling the request.\"}",
nil,
},
{
"ShouldHandleResponseModeQueryWithExistingQuery",
&fosite.AuthorizeRequest{
ResponseMode: oidc.ResponseModeQuery,
Request: fosite.Request{
Client: &oidc.BaseClient{
ID: "example",
RedirectURIs: []string{
"https://app.example.com/callback?abc=true",
},
},
},
RedirectURI: &url.URL{Scheme: "https", Host: "app.example.com", Path: "/callback", RawQuery: "abc=true"},
},
&fosite.AuthorizeResponse{
Parameters: url.Values{
oidc.FormParameterAuthorizationCode: []string{"1234"},
oidc.FormParameterIssuer: []string{"https://auth.example.com"},
},
},
nil,
fasthttp.StatusSeeOther,
http.Header{fasthttp.HeaderLocation: []string{"https://app.example.com/callback?abc=true&code=1234&iss=https%3A%2F%2Fauth.example.com"}},
nil,
"",
nil,
},
{
"ShouldHandleResponseModeFragment",
&fosite.AuthorizeRequest{
ResponseMode: oidc.ResponseModeFragment,
Request: fosite.Request{
Client: &oidc.BaseClient{
ID: "example",
RedirectURIs: []string{
"https://app.example.com/callback",
},
},
},
RedirectURI: &url.URL{Scheme: "https", Host: "app.example.com", Path: "/callback"},
},
&fosite.AuthorizeResponse{
Parameters: url.Values{
oidc.FormParameterAuthorizationCode: []string{"1234"},
oidc.FormParameterIssuer: []string{"https://auth.example.com"},
},
},
nil,
fasthttp.StatusSeeOther,
http.Header{fasthttp.HeaderLocation: []string{"https://app.example.com/callback#code=1234&iss=https%3A%2F%2Fauth.example.com"}},
nil,
"",
nil,
},
{
"ShouldHandleResponseModeFormPost",
&fosite.AuthorizeRequest{
ResponseMode: oidc.ResponseModeFormPost,
Request: fosite.Request{
Client: &oidc.BaseClient{
ID: "example",
RedirectURIs: []string{
"https://app.example.com/callback",
},
},
},
RedirectURI: &url.URL{Scheme: "https", Host: "app.example.com", Path: "/callback"},
},
&fosite.AuthorizeResponse{
Parameters: url.Values{
oidc.FormParameterAuthorizationCode: []string{"1234"},
oidc.FormParameterIssuer: []string{"https://auth.example.com"},
},
},
nil,
fasthttp.StatusOK,
http.Header{fasthttp.HeaderContentType: []string{"text/html; charset=utf-8"}},
nil,
"<!DOCTYPE html>\n<html lang=\"en\">\n\t<head>\n\t\t<title>Submit This Form</title>\n\t\t<script type=\"text/javascript\">\n\t\t\twindow.onload = function() {\n\t\t\t\tdocument.forms[0].submit();\n\t\t\t};\n\t\t</script>\n\t</head>\n\t<body>\n\t\t<form method=\"post\" action=\"https://app.example.com/callback\">\n\t\t\t\n\t\t\t\n\t\t\t<input type=\"hidden\" name=\"code\" value=\"1234\"/>\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t<input type=\"hidden\" name=\"iss\" value=\"https://auth.example.com\"/>\n\t\t\t\n\t\t\t\n\t\t</form>\n\t</body>\n</html>\n",
nil,
},
{
"ShouldReturnEncoderErrorResponseModeJWT",
&fosite.AuthorizeRequest{
ResponseMode: oidc.ResponseModeJWT,
ResponseTypes: fosite.Arguments{oidc.ResponseTypeAuthorizationCodeFlow},
Request: fosite.Request{
Client: &oidc.BaseClient{
ID: "example",
RedirectURIs: []string{
"https://app.example.com/callback",
},
},
},
RedirectURI: &url.URL{Scheme: "https", Host: "app.example.com", Path: "/callback"},
},
&fosite.AuthorizeResponse{
Parameters: url.Values{
oidc.FormParameterAuthorizationCode: []string{"1234"},
oidc.FormParameterIssuer: []string{"https://auth.example.com"},
},
},
func(t *testing.T, config *oidc.Config) {
config.Signer = nil
},
fasthttp.StatusInternalServerError,
http.Header{fasthttp.HeaderContentType: []string{"application/json; charset=utf-8"}},
nil,
"{\"error\":\"server_error\",\"error_description\":\"The authorization server encountered an unexpected condition that prevented it from fulfilling the request.\"}",
nil,
},
{
"ShouldReturnEncoderErrorResponseModeFormPostJWT",
&fosite.AuthorizeRequest{
ResponseMode: oidc.ResponseModeFormPostJWT,
ResponseTypes: fosite.Arguments{oidc.ResponseTypeAuthorizationCodeFlow},
Request: fosite.Request{
Client: &oidc.BaseClient{
ID: "example",
RedirectURIs: []string{
"https://app.example.com/callback",
},
},
},
RedirectURI: &url.URL{Scheme: "https", Host: "app.example.com", Path: "/callback"},
},
&fosite.AuthorizeResponse{
Parameters: url.Values{
oidc.FormParameterAuthorizationCode: []string{"1234"},
oidc.FormParameterIssuer: []string{"https://auth.example.com"},
},
},
func(t *testing.T, config *oidc.Config) {
config.Signer = nil
},
fasthttp.StatusInternalServerError,
http.Header{fasthttp.HeaderContentType: []string{"application/json; charset=utf-8"}},
nil,
"{\"error\":\"server_error\",\"error_description\":\"The authorization server encountered an unexpected condition that prevented it from fulfilling the request.\"}",
nil,
},
{
"ShouldReturnEncoderErrorResponseModeFragmentJWT",
&fosite.AuthorizeRequest{
ResponseMode: oidc.ResponseModeFragmentJWT,
ResponseTypes: fosite.Arguments{oidc.ResponseTypeAuthorizationCodeFlow},
Request: fosite.Request{
Client: &oidc.BaseClient{
ID: "example",
RedirectURIs: []string{
"https://app.example.com/callback",
},
},
},
RedirectURI: &url.URL{Scheme: "https", Host: "app.example.com", Path: "/callback"},
},
&fosite.AuthorizeResponse{
Parameters: url.Values{
oidc.FormParameterAuthorizationCode: []string{"1234"},
oidc.FormParameterIssuer: []string{"https://auth.example.com"},
},
},
func(t *testing.T, config *oidc.Config) {
config.Signer = nil
},
fasthttp.StatusInternalServerError,
http.Header{fasthttp.HeaderContentType: []string{"application/json; charset=utf-8"}},
nil,
"{\"error\":\"server_error\",\"error_description\":\"The authorization server encountered an unexpected condition that prevented it from fulfilling the request.\"}",
nil,
},
{
"ShouldEncodeJWTResponseModeJWTResponseTypesCode",
&fosite.AuthorizeRequest{
ResponseMode: oidc.ResponseModeJWT,
ResponseTypes: fosite.Arguments{oidc.ResponseTypeAuthorizationCodeFlow},
Request: fosite.Request{
Client: &oidc.BaseClient{
ID: "example",
RedirectURIs: []string{
"https://app.example.com/callback",
},
},
},
RedirectURI: &url.URL{Scheme: "https", Host: "app.example.com", Path: "/callback"},
},
&fosite.AuthorizeResponse{
Parameters: url.Values{
oidc.FormParameterAuthorizationCode: []string{"1234"},
oidc.FormParameterIssuer: []string{"https://auth.example.com"},
},
},
nil,
fasthttp.StatusSeeOther,
nil,
func(t *testing.T, header http.Header) {
uri, err := url.ParseRequestURI(header.Get(fasthttp.HeaderLocation))
assert.NoError(t, err)
require.NotNil(t, uri)
assert.Equal(t, "https", uri.Scheme)
assert.Equal(t, "app.example.com", uri.Host)
assert.Equal(t, "/callback", uri.Path)
assert.Regexp(t, regexp.MustCompile(`^[a-zA-Z0-9_-]+\.[a-zA-Z0-9_-]+\.[a-zA-Z0-9_-]+$`), uri.Query().Get(oidc.FormParameterResponse))
},
"",
nil,
},
{
"ShouldEncodeJWTResponseModeJWTResponseTypesNotCode",
&fosite.AuthorizeRequest{
ResponseMode: oidc.ResponseModeJWT,
ResponseTypes: fosite.Arguments{oidc.ResponseTypeImplicitFlowBoth},
Request: fosite.Request{
Client: &oidc.BaseClient{
ID: "example",
RedirectURIs: []string{
"https://app.example.com/callback",
},
},
},
RedirectURI: &url.URL{Scheme: "https", Host: "app.example.com", Path: "/callback"},
},
&fosite.AuthorizeResponse{
Parameters: url.Values{
oidc.FormParameterAuthorizationCode: []string{"1234"},
oidc.FormParameterIssuer: []string{"https://auth.example.com"},
},
},
nil,
fasthttp.StatusSeeOther,
nil,
func(t *testing.T, header http.Header) {
uri, err := url.Parse(header.Get(fasthttp.HeaderLocation))
assert.NoError(t, err)
require.NotNil(t, uri)
assert.Equal(t, "https", uri.Scheme)
assert.Equal(t, "app.example.com", uri.Host)
assert.Equal(t, "/callback", uri.Path)
assert.Regexp(t, regexp.MustCompile(`^response=[a-zA-Z0-9_-]+\.[a-zA-Z0-9_-]+\.[a-zA-Z0-9_-]+$`), uri.Fragment)
},
"",
nil,
},
{
"ShouldEncodeJWTResponseModeFormPost",
&fosite.AuthorizeRequest{
ResponseMode: oidc.ResponseModeFormPostJWT,
ResponseTypes: fosite.Arguments{oidc.ResponseTypeAuthorizationCodeFlow},
Request: fosite.Request{
Client: &oidc.BaseClient{
ID: "example",
RedirectURIs: []string{
"https://app.example.com/callback",
},
},
},
RedirectURI: &url.URL{Scheme: "https", Host: "app.example.com", Path: "/callback"},
},
&fosite.AuthorizeResponse{
Parameters: url.Values{
oidc.FormParameterAuthorizationCode: []string{"1234"},
oidc.FormParameterIssuer: []string{"https://auth.example.com"},
},
},
nil,
fasthttp.StatusOK,
http.Header{fasthttp.HeaderContentType: []string{"text/html; charset=utf-8"}},
nil,
"",
regexp.MustCompile(`<input type="hidden" name="response" value="[a-zA-Z0-9_-]+\.[a-zA-Z0-9_-]+\.[a-zA-Z0-9_-]+"/>`),
},
}
tp, err := templates.New(templates.Config{})
require.NoError(t, err)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
config := &oidc.Config{
Signer: &jwt.DefaultSigner{
GetPrivateKey: func(ctx context.Context) (interface{}, error) {
return keyRSA2048, nil
},
},
Templates: tp,
Issuers: oidc.IssuersConfig{
AuthorizationServerIssuerIdentification: "https://auth.example.com",
JWTSecuredResponseMode: "https://auth.example.com",
},
}
mock := httptest.NewRecorder()
handler := &oidc.ResponseModeHandler{
Config: config,
}
config.Handlers.ResponseMode = handler
provider := &oidc.OpenIDConnectProvider{Config: config}
if tc.setup != nil {
tc.setup(t, config)
}
provider.WriteAuthorizeResponse(context.TODO(), mock, tc.requester, tc.responder)
result := mock.Result()
assert.Equal(t, tc.code, result.StatusCode)
if tc.header != nil {
assert.Equal(t, tc.header, result.Header)
}
if tc.headerFunc != nil {
tc.headerFunc(t, result.Header)
}
data, err := io.ReadAll(result.Body)
require.NoError(t, err)
if tc.bodyRegexp == nil {
assert.Equal(t, tc.body, string(data))
} else {
assert.Regexp(t, tc.bodyRegexp, string(data))
}
})
}
}
func TestOpenIDConnectProvider_WriteAuthorizeError(t *testing.T) {
testCases := []struct {
name string
requester fosite.AuthorizeRequester
setup func(t *testing.T, config *oidc.Config)
error error
code int
header http.Header
headerFunc func(t *testing.T, header http.Header)
body string
bodyRegexp *regexp.Regexp
}{
{
"ShouldHandleErrorResponse",
&fosite.AuthorizeRequest{
ResponseMode: oidc.ResponseModeQuery,
Request: fosite.Request{
Client: &oidc.BaseClient{
ID: "example",
RedirectURIs: []string{
"https://app.example.com/callback",
},
},
},
RedirectURI: &url.URL{Scheme: "https", Host: "app.example.com", Path: "/callback"},
},
nil,
fosite.ErrServerError.WithDebug("The Debug."),
fasthttp.StatusSeeOther,
http.Header{
fasthttp.HeaderLocation: []string{"https://app.example.com/callback?error=server_error&error_description=The+authorization+server+encountered+an+unexpected+condition+that+prevented+it+from+fulfilling+the+request.&iss=https%3A%2F%2Fauth.example.com"},
},
nil,
"",
nil,
},
{
"ShouldHandleErrorResponseWithState",
&fosite.AuthorizeRequest{
ResponseMode: oidc.ResponseModeQuery,
State: "abc123state",
Request: fosite.Request{
Client: &oidc.BaseClient{
ID: "example",
RedirectURIs: []string{
"https://app.example.com/callback",
},
},
},
RedirectURI: &url.URL{Scheme: "https", Host: "app.example.com", Path: "/callback"},
},
nil,
fosite.ErrServerError.WithDebug("The Debug."),
fasthttp.StatusSeeOther,
http.Header{
fasthttp.HeaderLocation: []string{"https://app.example.com/callback?error=server_error&error_description=The+authorization+server+encountered+an+unexpected+condition+that+prevented+it+from+fulfilling+the+request.&iss=https%3A%2F%2Fauth.example.com&state=abc123state"},
},
nil,
"",
nil,
},
{
"ShouldHandleErrorResponseWithInvalidRedirectURI",
&fosite.AuthorizeRequest{
ResponseMode: oidc.ResponseModeQuery,
State: "abc123state",
Request: fosite.Request{
Client: &oidc.BaseClient{
ID: "example",
RedirectURIs: []string{
"https://app.example.com/callback",
},
},
},
RedirectURI: &url.URL{Scheme: "https", Host: "app.example.com", Path: "/invalid"},
},
nil,
fosite.ErrServerError.WithDebug("The Debug."),
fasthttp.StatusInternalServerError,
http.Header{
fasthttp.HeaderContentType: []string{"application/json; charset=utf-8"},
},
nil,
"{\"error\":\"server_error\",\"error_description\":\"The authorization server encountered an unexpected condition that prevented it from fulfilling the request.\"}",
nil,
},
}
tp, err := templates.New(templates.Config{})
require.NoError(t, err)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
config := &oidc.Config{
Signer: &jwt.DefaultSigner{
GetPrivateKey: func(ctx context.Context) (interface{}, error) {
return keyRSA2048, nil
},
},
Templates: tp,
Issuers: oidc.IssuersConfig{
AuthorizationServerIssuerIdentification: "https://auth.example.com",
JWTSecuredResponseMode: "https://auth.example.com",
},
}
mock := httptest.NewRecorder()
handler := &oidc.ResponseModeHandler{
Config: config,
}
config.Handlers.ResponseMode = handler
if tc.setup != nil {
tc.setup(t, config)
}
handler.WriteAuthorizeError(context.TODO(), mock, tc.requester, tc.error)
result := mock.Result()
assert.Equal(t, tc.code, result.StatusCode)
if tc.header != nil {
assert.Equal(t, tc.header, result.Header)
}
if tc.headerFunc != nil {
tc.headerFunc(t, result.Header)
}
data, err := io.ReadAll(result.Body)
require.NoError(t, err)
if tc.bodyRegexp == nil {
assert.Equal(t, tc.body, string(data))
} else {
assert.Regexp(t, tc.bodyRegexp, string(data))
}
})
}
}
|
/*
Copyright 2021 CodeNotary, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package database
import (
"testing"
"github.com/codenotary/immudb/embedded/store"
"github.com/stretchr/testify/require"
)
func TestDefaultOptions(t *testing.T) {
op := DefaultOption()
if op.GetDbName() != "db_name" {
t.Errorf("default sysdb name not what expected")
}
if op.GetDbRootPath() != DefaultOption().dbRootPath {
t.Errorf("default db rootpath not what expected")
}
if !op.GetCorruptionChecker() {
t.Errorf("default corruption checker not what expected")
}
DbName := "Charles_Aznavour"
rootpath := "rootpath"
storeOpts := store.DefaultOptions()
op = DefaultOption().
WithDbName(DbName).
WithDbRootPath(rootpath).
WithCorruptionChecker(false).
WithStoreOptions(storeOpts)
if op.GetDbName() != DbName {
t.Errorf("db name not set correctly , expected %s got %s", DbName, op.GetDbName())
}
if op.GetDbRootPath() != rootpath {
t.Errorf("rootpath not set correctly , expected %s got %s", rootpath, op.GetDbRootPath())
}
if op.GetCorruptionChecker() {
t.Errorf("coruuption checker not set correctly , expected %v got %v", false, op.GetCorruptionChecker())
}
require.Equal(t, storeOpts, op.storeOpts)
}
|
package charge
import "github.com/gucastiliao/special-case-pattern/pkg/model"
type CompleteCharge struct {
subscription model.Subscription
}
func NewCompleteCharge(subscription model.Subscription) CompleteCharge {
return CompleteCharge{
subscription: subscription,
}
}
func (c CompleteCharge) Execute() error {
c.setCompleteCharge()
return nil
}
func (c CompleteCharge) setCompleteCharge() error {
return nil
}
|
package persist_lib
func Testservice1UnaryExample1Query(tx Runable, req Testservice1UnaryExample1QueryParams) *Result {
row := tx.QueryRow(
"SELECT id AS 'table_key', id, value, msg as inner_message, status as inner_enum FROM test_table WHERE id = $1 ",
req.GetTableId(),
req.GetStartTime(),
)
return newResultFromRow(row)
}
func Testservice1UnaryExample2Query(tx Runable, req Testservice1UnaryExample2QueryParams) *Result {
row := tx.QueryRow(
"SELECT id AS 'table_id', key, value, msg as inner_message, status as inner_enum FROM test_table WHERE id = $1 ",
req.GetId(),
)
return newResultFromRow(row)
}
func Testservice1ServerStreamSelectQuery(tx Runable, req Testservice1ServerStreamSelectQueryParams) *Result {
res, err := tx.Query(
"SELECT id AS 'table_id', key, value, msg as inner_message, status as inner_enum FROM test_table WHERE id = $1 ",
req.GetTableId(),
)
if err != nil {
return newResultFromErr(err)
}
return newResultFromRows(res)
}
func Testservice1ClientStreamingExampleQuery(tx Runable, req Testservice1ClientStreamingExampleQueryParams) *Result {
res, err := tx.Exec(
"SELECT id AS 'table_id', key, value, msg as inner_message, status as inner_enum FROM test_table WHERE id = $1 ",
req.GetTableId(),
)
if err != nil {
return newResultFromErr(err)
}
return newResultFromSqlResult(res)
}
type Testservice1UnaryExample1QueryParams interface {
GetTableId() int32
GetStartTime() interface{}
}
type Testservice1UnaryExample2QueryParams interface {
GetId() int32
}
type Testservice1ServerStreamSelectQueryParams interface {
GetTableId() int32
}
type Testservice1ClientStreamingExampleQueryParams interface {
GetTableId() int32
}
|
package urlshort
import "net/http"
func MapHandler(pathsToUrls map[string]string, fallback http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if url, ok := pathsToUrls[r.URL.Path]; ok {
http.Redirect(w, r, url, http.StatusMovedPermanently)
} else {
fallback.ServeHTTP(w, r)
}
}
}
|
package binance
import (
"context"
bin "github.com/adshao/go-binance"
"github.com/google/uuid"
"github.com/mhereman/cryptotrader/logger"
"github.com/mhereman/cryptotrader/types"
)
// GetOrder executes the get order request
func (b Binance) GetOrder(ctx context.Context, order types.Order) (info types.OrderInfo, err error) {
var gos *bin.GetOrderService
var response *bin.Order
var binanceSymbol string
if binanceSymbol, err = b.symbolToBinance(order.Symbol); err != nil {
logger.Errorf("Binance::GetOrder Error %v\n", err)
return
}
gos = b.client.NewGetOrderService()
gos.Symbol(binanceSymbol)
gos.OrigClientOrderID(order.UserReference.String())
if response, err = gos.Do(ctx); err != nil {
logger.Errorf("Binance::GetOrder Error %v\n", err)
return
}
if info.UserReference, err = uuid.Parse(response.ClientOrderID); err != nil {
logger.Errorf("Binance::GetOrder Error %v\n", err)
return
}
info.ExchangeOrderID = response.OrderID
if info.Symbol, err = b.toSymbol(response.Symbol); err != nil {
logger.Errorf("Binance::GetOrder Error %v\n", err)
return
}
info.TransactionTime = b.toTime(response.Time)
info.OriginalQuantity = b.toFloat(response.OrigQuantity)
info.ExecutedQuantity = b.toFloat(response.ExecutedQuantity)
info.Price = b.toFloat(response.Price)
info.StopPrice = b.toFloat(response.StopPrice)
info.Status = b.toStatus(response.Status)
info.TimeInForce = b.toTimeInForce(response.TimeInForce)
info.OrderType = b.toOrderType(response.Type)
info.Side = b.toSide(response.Side)
return
}
|
package handlers
import (
"fmt"
"net/http"
"github.com/gorilla/mux"
log "github.com/sirupsen/logrus"
"../kvstore"
)
var (
// Store is the shared KV Store
Store kvstore.Store
)
func init() {
Store = kvstore.Initialize()
}
// Route defines the Mux
// router individual route
type Route struct {
Path string
Method string
Handler http.HandlerFunc
}
// Routes is a collection
// of individual Routes
var Routes = []Route{
{"/", "GET", Index},
{"/store/get/{key}", "GET", GetValue},
{"/store/set/{key}/{value}", "GET", PaxosSetValue},
{"/prepare/{id}", "GET", PrepareReceiveHandler},
{"/accept/{id}", "GET", AcceptReceiveHandler},
{"/learn/{key}/{value}", "GET", Learn},
}
// Index is the handler for the path "/"
func Index(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello World Paxos Server\n")
}
// Logger is the middleware to
// log the incoming request
func Logger(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.WithFields(log.Fields{
"path": r.URL,
"method": r.Method,
}).Info("incoming request")
next.ServeHTTP(w, r)
})
}
// Router returns a mux router
func Router() *mux.Router {
router := mux.NewRouter()
for _, route := range Routes {
router.HandleFunc(
route.Path,
route.Handler,
).Methods(route.Method)
}
router.Use(Logger)
return router
}
|
package main
import (
"bufio"
"encoding/csv"
"fmt"
"os"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/sqlite"
)
// GradientCSVFilePath path to gradient stock grid csv
const GradientCSVFilePath = "./data/sp500_grid.csv"
// DBFilename SQLite Database filename
const DBFilename = "db/development.db"
// DBFilenameTest SQLite Database filename
const DBFilenameTest = "db/test.db"
// var db *gorm.DB
var gradient Gradient
func main() {
db := loadDatabase(DBFilename)
seedData(db)
gradient.Load(db)
gradient.Print()
loadQuoteSample(db)
stock := FindStock(db, "AAPL")
fmt.Println(stock.Symbol)
fmt.Println(stock.Value(db))
}
func loadDatabase(filename string) *gorm.DB {
db, err := gorm.Open("sqlite3", filename)
if err != nil {
panic("failed to connect database")
}
clearTables := []string{"stocks", "quotes"}
for _, table := range clearTables {
if db.HasTable(table) {
db.DropTable(table)
}
}
db.CreateTable(&Stock{})
db.CreateTable(&Quote{})
return db
}
func seedData(db *gorm.DB) [][]string {
f, _ := os.Open(GradientCSVFilePath)
r := csv.NewReader(bufio.NewReader(f))
gradient, _ := r.ReadAll()
for rowID, row := range gradient {
for colID, symbol := range row {
db.Create(&Stock{Symbol: symbol, RowID: uint(rowID), ColID: uint(colID)})
}
}
return gradient
}
|
package main
import (
"encoding/json"
"fmt"
"shuxiang/common/mq"
"github.com/streadway/amqp"
)
var Client mq.MessagingClient
// 初始化rabbitmq
func init() {
Client.Conn = Client.ConnectToRabbitmq("amqp://guest:guest@192.168.10.252:5672")
// fmt.Println("************")
}
func getBooking(delivery amqp.Delivery) {
m := make(map[string]string)
json.Unmarshal(delivery.Body, &m)
fmt.Println(m)
}
func InsertUser(delivery amqp.Delivery) {
m := make(map[string]string)
json.Unmarshal(delivery.Body, &m)
fmt.Println(m)
fmt.Println("*********************woshi")
}
type UserLendMQ struct {
UserID string `json:"userid"`
Bookcode string `json:"bookcode"`
IsBN string `json:"ISBN"`
Status int `json:"status"`
}
func main() {
m := &UserLendMQ{
UserID: "111",
IsBN: "222",
Bookcode: "222",
Status: 0,
}
body, _ := json.Marshal(m)
Client.PublishOnQueue(body, "getBooking", "getBooking")
Client.PublishOnQueue(body, "InsertUser", "InsertUser")
for {
Client.ConsumeFromQueue("InsertUser", "InsertUser", InsertUser)
Client.ConsumeFromQueue("getBooking", "getBooking", getBooking)
}
}
|
package main
import "fmt"
func main() {
var numOfCases int
fmt.Scanf("%d", &numOfCases)
for i := 0; i < numOfCases; i++ {
var x int
fmt.Scanf("%d", &x)
result, calls := fib(x)
fmt.Printf("fib(%d) = %d calls = %d\n", x, calls, result)
}
}
// Gets the (n+1)th number and the number of recursive calls in the Fibonacci sequence
func fib(n int) (int64, int64) {
if n == 0 {
return 0, 0
}
if n == 1 {
return 1, 0
}
lastTwoResults := []int64{0, 1}
lastTwoCalls := []int64{0, 0}
counter := 2
for counter <= n {
nextFib := lastTwoResults[0] + lastTwoResults[1]
nextCalls := lastTwoCalls[0] + lastTwoCalls[1] + 2
lastTwoResults[0] = lastTwoResults[1]
lastTwoResults[1] = nextFib
lastTwoCalls[0] = lastTwoCalls[1]
lastTwoCalls[1] = nextCalls
counter++
}
return lastTwoResults[1], lastTwoCalls[1]
}
|
package dto
type Exercise struct {
ExerciseId int `json:"exercise_id" db:"ExerciseId"`
Name string `json:"name" db:"Name"`
ExerciseTime int `json:"exercise_time" db:"ExerciseTime"`
}
|
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func isPalindrome(head *ListNode) bool {
if(head == nil){
return true
}
arr := make([]int, 0)
for head != nil{
arr = append(arr, head.Val)
head = head.Next
}
start, end := 0, len(arr)-1
for start < end{
if arr[start] != arr[end]{
return false
}
start += 1
end -= 1
}
return true
}
|
package main
import (
"practice/studySort/bucketSort/dataStruct"
)
/**
桶排序
桶排序的动效 https://www.cs.usfca.edu/~galles/visualization/BucketSort.html
JavaScript的实现 http://bubkoo.com/2014/01/15/sort-algorithm/bucket-sort/
先预备好固定数量的桶,每个待排的数都通过一个函数计算得出对应的桶编号
每个桶中可以放多个数,以链表的数据结构进行存储
*/
const BucketNum = 10
// 入口函数
func main() {
var numArr = []int{21, 32, 19, 56, 29, 37, 16, 91, 126, 69}
BucketSort(numArr)
}
// 输入一组数 对他们进行排序 并输出
func BucketSort(numArr []int) {
numLength := len(numArr)
var max, min int;
max = numArr[0];
min = numArr[0];
//找出最大、最小的数
for i := 0; i < numLength; i++ {
if numArr[i] > max {
max = numArr[i]
}
if numArr[i] < min {
min = numArr[i]
}
}
//预先准备好一定数量的桶
var buckets [BucketNum]*dataStruct.LinkNode
for i := 0; i < BucketNum; i++ {
buckets[i] = dataStruct.GetNode(0)
}
for _, value := range numArr {
bucketIndex := getBucketIndex(value, max, numLength)
var newNode *dataStruct.LinkNode = new(dataStruct.LinkNode)
newNode.Data = value
buckets[bucketIndex] = buckets[bucketIndex].Push(newNode)
}
//打印输出
dataStruct.PrintLink(buckets[1])
}
/**
数字对应的桶索引计算
*/
func getBucketIndex(num, maximum, numberCount int) int {
return num * numberCount / (maximum + 1)
}
|
package mongo
import "go.mongodb.org/mongo-driver/bson/primitive"
// DBRef is a MongoDB DBRef type
type DBRef struct {
// A reference collection
Ref string `bson:"$ref"`
// A reference identifier
ID primitive.ObjectID `bson:"$id"`
}
|
package constant
const (
/****************************************** mongo ****************************************/
/****************************************** feedback ****************************************/
FeedbackUnReadStatus = 0
FeedbackReadedStatus = 1
/****************************************** redis ****************************************/
RedisUserLoc = "user:loc:%s" // format: user:loc:<id>
RedisUserLocValue = "%f,%f" // value: 使用逗号隔开 type: string
RedisUserEQ = "user:eq:%s" // format: user:eq:<id>
RedisUserStatus = "user:status:%s" // format: user:status:<id>, 0表示下线,1表示在线
RedisUserMsgQuene = "user:msg:quene:%s" // format: user:msg:quene:<id>,消息队列
/****************************************** user ****************************************/
UserStatusUnActive = 0
UserStatusActive = 1
)
|
package hamming
import "fmt"
const testVersion = 5
func Distance(a, b string) (int, error) {
if len(a) != len(b) {
return -1, fmt.Errorf("String length %d, %d not equal", len(a), len(b))
} else {
var count int
for i := 0; i < len(a); i++ {
if a[i] != b[i] {
count++
}
}
return count, nil
}
}
|
package typematch
import (
"fmt"
"go/ast"
"go/parser"
"go/token"
"go/types"
"strconv"
"strings"
)
type patternOp int
const (
opType patternOp = iota
opPointer
opVar
opSlice
opArray
opMap
)
type Pattern struct {
typeMatches map[string]types.Type
int64Matches map[string]int64
root *pattern
}
type pattern struct {
value interface{}
op patternOp
subs []*pattern
}
func Parse(s string) (*Pattern, error) {
noDollars := strings.ReplaceAll(s, "$", "__")
n, err := parser.ParseExpr(noDollars)
if err != nil {
return nil, err
}
root := parseExpr(n)
if root == nil {
return nil, fmt.Errorf("can't convert %s type expression", s)
}
p := &Pattern{
typeMatches: map[string]types.Type{},
int64Matches: map[string]int64{},
root: root,
}
return p, nil
}
var basicTypeByName = map[string]types.Type{
"bool": types.Typ[types.Bool],
"int": types.Typ[types.Int],
"int8": types.Typ[types.Int8],
"int16": types.Typ[types.Int16],
"int32": types.Typ[types.Int32],
"int64": types.Typ[types.Int64],
"uint": types.Typ[types.Uint],
"uint8": types.Typ[types.Uint8],
"uint16": types.Typ[types.Uint16],
"uint32": types.Typ[types.Uint32],
"uint64": types.Typ[types.Uint64],
"uintptr": types.Typ[types.Uintptr],
"float32": types.Typ[types.Float32],
"float64": types.Typ[types.Float64],
"complex64": types.Typ[types.Complex64],
"complex128": types.Typ[types.Complex128],
"string": types.Typ[types.String],
"error": types.Universe.Lookup("error").Type(),
}
func parseExpr(e ast.Expr) *pattern {
switch e := e.(type) {
case *ast.Ident:
basic, ok := basicTypeByName[e.Name]
if ok {
return &pattern{op: opType, value: basic}
}
if strings.HasPrefix(e.Name, "__") {
name := strings.TrimPrefix(e.Name, "__")
return &pattern{op: opVar, value: name}
}
case *ast.StarExpr:
elem := parseExpr(e.X)
if elem == nil {
return nil
}
return &pattern{op: opPointer, subs: []*pattern{elem}}
case *ast.ArrayType:
elem := parseExpr(e.Elt)
if elem == nil {
return nil
}
if e.Len == nil {
return &pattern{
op: opSlice,
subs: []*pattern{elem},
}
}
if id, ok := e.Len.(*ast.Ident); ok && strings.HasPrefix(id.Name, "__") {
name := strings.TrimPrefix(id.Name, "__")
return &pattern{
op: opArray,
value: name,
subs: []*pattern{elem},
}
}
lit, ok := e.Len.(*ast.BasicLit)
if !ok || lit.Kind != token.INT {
return nil
}
length, err := strconv.ParseInt(lit.Value, 10, 64)
if err != nil {
return nil
}
return &pattern{
op: opArray,
value: length,
subs: []*pattern{elem},
}
case *ast.MapType:
keyType := parseExpr(e.Key)
if keyType == nil {
return nil
}
valType := parseExpr(e.Value)
if valType == nil {
return nil
}
return &pattern{
op: opMap,
subs: []*pattern{keyType, valType},
}
case *ast.ParenExpr:
return parseExpr(e.X)
case *ast.InterfaceType:
if len(e.Methods.List) == 0 {
return &pattern{op: opType, value: types.NewInterfaceType(nil, nil)}
}
}
return nil
}
func (p *Pattern) MatchIdentical(typ types.Type) bool {
p.reset()
return p.matchIdentical(p.root, typ)
}
func (p *Pattern) reset() {
if len(p.int64Matches) != 0 {
p.int64Matches = map[string]int64{}
}
if len(p.typeMatches) != 0 {
p.typeMatches = map[string]types.Type{}
}
}
func (p *Pattern) matchIdentical(sub *pattern, typ types.Type) bool {
switch sub.op {
case opVar:
name := sub.value.(string)
if name == "_" {
return true
}
y, ok := p.typeMatches[name]
if !ok {
p.typeMatches[name] = typ
return true
}
if y == nil {
return typ == nil
}
return types.Identical(typ, y)
case opType:
return types.Identical(typ, sub.value.(types.Type))
case opPointer:
typ, ok := typ.(*types.Pointer)
if !ok {
return false
}
return p.matchIdentical(sub.subs[0], typ.Elem())
case opSlice:
typ, ok := typ.(*types.Slice)
if !ok {
return false
}
return p.matchIdentical(sub.subs[0], typ.Elem())
case opArray:
typ, ok := typ.(*types.Array)
if !ok {
return false
}
var wantLen int64
switch v := sub.value.(type) {
case string:
if v == "_" {
wantLen = typ.Len()
break
}
length, ok := p.int64Matches[v]
if ok {
wantLen = length
} else {
p.int64Matches[v] = typ.Len()
wantLen = typ.Len()
}
case int64:
wantLen = v
}
return wantLen == typ.Len() && p.matchIdentical(sub.subs[0], typ.Elem())
case opMap:
typ, ok := typ.(*types.Map)
if !ok {
return false
}
return p.matchIdentical(sub.subs[0], typ.Key()) &&
p.matchIdentical(sub.subs[1], typ.Elem())
default:
return false
}
}
|
package model
// camera 基础表字段
type Camera struct {
ID int `gorm:"primary_key:AUTO_INCREMENT;column:id;not null" json:"id"`
Camera_address string `gorm:"column:camera_address" json:"camera_address"`
Camera_status int `gorm:"column:camera_status" json:"camera_status"`
Camera_position string `gorm:"column:camera_position" json:"camera_position"`
Camera_type string `gorm:"column:camera_type" json:"camera_type"`
Camera_RTSP string `gorm:"column:camera_rtsp" json:"camera_rtsp"`
Camera_token string `gorm:"column:camera_token" json:"camera_token"`
}
// account 基础表字段
type Account struct {
ID int `gorm:"primary_key:AUTO_INCREMENT;column:id;not null" json:"id"`
Ip_address string `gorm:"column:ip_address" json:"ip_address"`
Account string `gorm:"column:account" json:"account"`
Password string `gorm:"column:password" json:"password"`
Activation int `gorm:"column:activation" json:"activation"`
} |
package minnow
import (
"log"
"os"
"time"
)
type IngestDirInfo struct {
IngestPath Path
MinAge time.Duration
ProcessedBy []ProcessorId
RemoveOnceIngested bool
}
type DirectoryIngester struct {
workPath Path
ingestDirChan chan IngestDirInfo
dispatchChan chan DispatchInfo
logger *log.Logger
}
func NewDirectoryIngester(workPath Path, ingestDirChan chan IngestDirInfo, dispatchChan chan DispatchInfo) *DirectoryIngester {
logger := log.New(os.Stdout, "DirectoryIngester: ", 0)
return &DirectoryIngester{workPath, ingestDirChan, dispatchChan, logger}
}
func moveToRandomPath(workPath, metadataPath, dataPath Path) (Path, Path, error) {
randomPath, err := makeRandomPath(workPath, "dispatch")
if err != nil {
return metadataPath, dataPath, err
}
newMetadataPath := randomPath.JoinPath(Path(metadataPath.Name()))
err = metadataPath.Rename(newMetadataPath)
if err != nil {
return metadataPath, dataPath, err
}
newDataPath := randomPath.JoinPath(Path(dataPath.Name()))
err = dataPath.Rename(newDataPath)
if err != nil {
return metadataPath, dataPath, err
}
return newMetadataPath, newDataPath, nil
}
func (ingester *DirectoryIngester) Run() {
for ingestDirInfo := range ingester.ingestDirChan {
metadataPaths, _ := ingestDirInfo.IngestPath.Glob("*" + PropertiesExtension)
for _, metadataPath := range metadataPaths {
if !ValidPropertiesFile(metadataPath) {
ingester.logger.Printf("Invalid properties file at %s. Skipping...", metadataPath)
continue
}
dataPath := metadataPath.WithSuffix("") // lop off the extension
if !dataPath.Exists() {
ingester.logger.Printf("%s does not have corresponding data file", metadataPath)
continue
}
now := time.Now()
metadataAge, err := metadataPath.Age(now)
if err != nil {
ingester.logger.Print(err.Error())
continue
}
dataAge, err := dataPath.Age(now)
if err != nil {
ingester.logger.Print(err.Error())
continue
}
if metadataAge > ingestDirInfo.MinAge && dataAge > ingestDirInfo.MinAge {
// Move things to a random path in case we're ingesting from the
// main ingest directory. Files that have already been processed
// are already in a random directory, but we're moving them anyway
// just to be consistent.
metadataPath, dataPath, err := moveToRandomPath(ingester.workPath, metadataPath, dataPath)
if err != nil {
ingester.logger.Print(err.Error())
continue
}
dispatchInfo := DispatchInfo{metadataPath, dataPath, ingestDirInfo.ProcessedBy}
ingester.dispatchChan <- dispatchInfo
if ingestDirInfo.RemoveOnceIngested {
err := ingestDirInfo.IngestPath.RmdirRecursive()
if err != nil {
ingester.logger.Printf("Could not remove ingest dir: %s", err.Error())
}
}
}
}
}
}
|
package ratecounter
import (
"testing"
"time"
)
func TestAvgRateCounter(t *testing.T) {
interval := 50 * time.Millisecond
r := NewAvgRateCounter(interval)
check := func(expectedRate float64, expectedHits int64) {
rate, hits := r.Rate(), r.Hits()
if rate != expectedRate {
t.Error("Expected rate ", rate, " to equal ", expectedRate)
}
if hits != expectedHits {
t.Error("Expected hits ", hits, " to equal ", expectedHits)
}
}
check(0, 0)
r.Incr(1) // counter = 1, hits = 1
check(1.0, 1)
r.Incr(3) // counter = 4, hits = 2
check(2.0, 2)
time.Sleep(2 * interval)
check(0, 0)
}
func TestAvgRateCounterAdvanced(t *testing.T) {
interval := 50 * time.Millisecond
almost := 45 * time.Millisecond
gap := 1 * time.Millisecond
r := NewAvgRateCounter(interval)
check := func(expectedRate float64, expectedHits int64) {
rate, hits := r.Rate(), r.Hits()
if rate != expectedRate {
t.Error("Expected rate ", rate, " to equal ", expectedRate)
}
if hits != expectedHits {
t.Error("Expected hits ", hits, " to equal ", expectedHits)
}
}
check(0, 0)
r.Incr(1) // counter = 1, hits = 1
check(1.0, 1)
time.Sleep(interval - almost)
r.Incr(3) // counter = 4, hits = 2
check(2.0, 2)
time.Sleep(almost + gap)
check(3.0, 1) // counter = 3, hits = 1
time.Sleep(2 * interval)
check(0, 0)
}
func TestAvgRateCounterMinResolution(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("Resolution < 1 did not panic")
}
}()
NewAvgRateCounter(500 * time.Millisecond).WithResolution(0)
}
func TestAvgRateCounterNoResolution(t *testing.T) {
interval := 50 * time.Millisecond
almost := 45 * time.Millisecond
gap := 1 * time.Millisecond
r := NewAvgRateCounter(interval).WithResolution(1)
check := func(expectedRate float64, expectedHits int64) {
rate, hits := r.Rate(), r.Hits()
if rate != expectedRate {
t.Error("Expected rate ", rate, " to equal ", expectedRate)
}
if hits != expectedHits {
t.Error("Expected hits ", hits, " to equal ", expectedHits)
}
}
check(0, 0)
r.Incr(1) // counter = 1, hits = 1
check(1.0, 1)
time.Sleep(interval - almost)
r.Incr(3) // counter = 4, hits = 2
check(2.0, 2)
time.Sleep(almost + gap)
check(0, 0) // counter = 0, hits = 0
time.Sleep(2 * interval)
check(0, 0)
}
func TestAvgRateCounter_String(t *testing.T) {
r := NewAvgRateCounter(1 * time.Second)
if r.String() != "0.00000e+00" {
t.Error("Expected ", r.String(), " to equal ", "0.00000e+00")
}
r.Incr(1)
if r.String() != "1.00000e+00" {
t.Error("Expected ", r.String(), " to equal ", "1.00000e+00")
}
}
func TestAvgRateCounter_Incr_ReturnsImmediately(t *testing.T) {
interval := 1 * time.Second
r := NewAvgRateCounter(interval)
start := time.Now()
r.Incr(-1)
duration := time.Since(start)
if duration >= 1*time.Second {
t.Error("incr took", duration, "to return")
}
}
func BenchmarkAvgRateCounter(b *testing.B) {
interval := 0 * time.Millisecond
r := NewAvgRateCounter(interval)
for i := 0; i < b.N; i++ {
r.Incr(1)
r.Rate()
}
}
|
package bytesutil
import (
"bytes"
"compress/zlib"
"crypto/rand"
"fmt"
"io"
"math/big"
)
// Constants for byte sizes in decimal and binary formats
const (
KILO int64 = 1000 // 1000 power 1 (10 power 3)
KIBI int64 = 1024 // 1024 power 1 (2 power 10)
MEGA = KILO * KILO // 1000 power 2 (10 power 6)
MEBI = KIBI * KIBI // 1024 power 2 (2 power 20)
GIGA = MEGA * KILO // 1000 power 3 (10 power 9)
GIBI = MEBI * KIBI // 1024 power 3 (2 power 30)
TERA = GIGA * KILO // 1000 power 4 (10 power 12)
TEBI = GIBI * KIBI // 1024 power 4 (2 power 40)
PETA = TERA * KILO // 1000 power 5 (10 power 15)
PEBI = TEBI * KIBI // 1024 power 5 (2 power 50)
EXA = PETA * KILO // 1000 power 6 (10 power 18)
EXBI = PEBI * KIBI // 1024 power 6 (2 power 60)
)
// BinaryFormat formats a byte size to a human readable string in binary format.
// Uses binary prefixes. See: https://en.m.wikipedia.org/wiki/Binary_prefix
//
// For example,
// fmt.Println(BinaryFormat(2140))
// prints
// 2.09 KiB
func BinaryFormat(size int64) string {
if size < 0 {
return ""
} else if size < KIBI {
return fmt.Sprintf("%d B", size)
} else if size < MEBI {
return fmt.Sprintf("%.2f KiB", float64(size)/float64(KIBI))
} else if size < GIBI {
return fmt.Sprintf("%.2f MiB", float64(size)/float64(MEBI))
} else if size < TEBI {
return fmt.Sprintf("%.2f GiB", float64(size)/float64(GIBI))
} else if size < PEBI {
return fmt.Sprintf("%.2f TiB", float64(size)/float64(TEBI))
} else if size < EXBI {
return fmt.Sprintf("%.2f PiB", float64(size)/float64(PEBI))
} else {
return fmt.Sprintf("%.2f EiB", float64(size)/float64(EXBI))
}
}
// DecimalFormat formats a byte size to a human readable string in decimal format.
// Uses metric prefixes. See: https://en.m.wikipedia.org/wiki/Metric_prefix
//
// For example,
// fmt.Println(DecimalFormat(2140))
// prints
// 2.14KB
func DecimalFormat(size int64) string {
if size < 0 {
return ""
} else if size < KILO {
return fmt.Sprintf("%d B", size)
} else if size < MEGA {
return fmt.Sprintf("%.2f KB", float64(size)/float64(KILO))
} else if size < GIGA {
return fmt.Sprintf("%.2f MB", float64(size)/float64(MEGA))
} else if size < TERA {
return fmt.Sprintf("%.2f GB", float64(size)/float64(GIGA))
} else if size < PETA {
return fmt.Sprintf("%.2f TB", float64(size)/float64(TERA))
} else if size < EXA {
return fmt.Sprintf("%.2f PB", float64(size)/float64(PETA))
} else {
return fmt.Sprintf("%.2f EB", float64(size)/float64(EXA))
}
}
func GenerateRandomBytes(n int) []byte {
b := make([]byte, n)
rand.Read(b)
return b
}
func CopyBytes(b []byte) (copiedBytes []byte) {
if b == nil {
return nil
}
copiedBytes = make([]byte, len(b))
copy(copiedBytes, b)
return
}
// Generate a random integer in the uniform range [0, max).
func RandInt(max int) int {
n, err := rand.Int(rand.Reader, big.NewInt(int64(max)))
if err != nil {
panic(err)
}
return int(n.Int64())
}
func Compress(input []byte) ([]byte, error) {
var buf bytes.Buffer
writer := zlib.NewWriter(&buf)
_, err := writer.Write(input)
if err != nil {
return nil, err
}
err = writer.Close()
if err != nil {
return nil, err
}
output := buf.Bytes()
return output, nil
}
func Decompress(input []byte) ([]byte, error) {
var buf bytes.Buffer
w := io.Writer(&buf)
b := bytes.NewReader(input)
r, err := zlib.NewReader(b)
if err != nil {
return nil, err
}
_, err = io.Copy(w, r)
if err != nil {
return nil, err
}
err = r.Close()
if err != nil {
return nil, err
}
output := buf.Bytes()
return output, nil
}
|
package main
import (
"log"
"ukor/cmd/ukor"
)
func main() {
if err := ukor.RootCommand().Execute(); err != nil {
log.Fatal(err)
}
}
|
package main
import (
"fmt"
"log"
"math/big"
"path/filepath"
"strings"
)
func GenerateProductOfNs(files []string) (*big.Int, error) {
product := big.NewInt(1)
for _, file := range files {
publicKey, err := ReadPublicKey(file)
if err != nil {
return nil, err
}
product.Mul(product, publicKey.N)
}
return product, nil
}
func main() {
foundKeys := 0
pemFilePattern := "*.pem"
files, _ := filepath.Glob(pemFilePattern)
// Get the product of the Ns in all the PEM files.
prodNs, err := GenerateProductOfNs(files)
if err != nil {
log.Fatalf("could not generate product of Ns: %v", err)
}
for _, file := range files {
publicKey, err := ReadPublicKey(file)
if err != nil {
log.Fatalf("could not read %s: %v", file, err)
}
// Divide prodNs by the current N otherwise the
// greatest common denominator will be N.
otherNs := new(big.Int)
otherNs.Div(prodNs, publicKey.N)
// Calculate the greatest common denominator of
// otherNs and N
p := new(big.Int)
p.GCD(nil, nil, otherNs, publicKey.N)
// As the factors of N are primes then if the GCD
// is greater than 1 then we know we've found a
// a common factor between the current key and
// one of the other keys.
if p.Cmp(big.NewInt(1)) < 1 {
continue
}
foundKeys++
q := new(big.Int)
q.Div(publicKey.N, p)
// now we have p and q we can create a private key
privateKey := BuildPrivateKey(publicKey, p, q)
pkFile := strings.Replace(file, ".pem", ".pk", 1)
err = WritePrivateKey(privateKey, pkFile)
if err != nil {
log.Fatalf("error writing %s", pkFile)
}
}
fmt.Printf("Generated %d private keys for %d public keys\n", foundKeys, len(files))
}
|
package main
import (
"math"
)
/**
跳跃游戏 II
给定一个非负整数数组,你最初位于数组的第一个位置。
数组中的每个元素代表你在该位置可以跳跃的最大长度。
你的目标是使用最少的跳跃次数到达数组的最后一个位置。
示例 1:
```
输入: [2, 3, 1, 1, 4]
输出: 2
解释: 跳到最后一个位置的最小跳跃数是 2。
从下标为 0 跳到下标为 1 的位置,跳 1 步,然后跳 3 步到达数组的最后一个位置。
```
说明:
假设你总是可以到达数组的最后一个位置。
*/
/**
循环到 len(nums) - 1, 最后一个元素不访问,因为到达最后一个元素之后,就不会再往后跳跃了
*/
func Jump(nums []int) int {
maxPosition := 0
end := 0
steps := 0
for i := 0; i < len(nums)-1; i++ {
maxPosition = int(math.Max(float64(maxPosition), float64(i+nums[i])))
if i == end {
end = maxPosition
steps++
}
}
return steps
}
/**
没做出来,继续努力
*/
// 从最后一个元素往前跳,走到最开始的位置
func toJump(nums []int, nextStep int, idx int, countJump int) int {
// 如果跳到最开始,且开始的步数刚好是 step ,返回
if idx < 0 {
return math.MaxInt32
}
if idx == 0 {
if nums[idx] == nextStep {
countJump++
return countJump
} else {
return math.MaxInt32
}
}
sel := math.MaxInt32
// 判断选还是不选,如果 nums[idx] == nextStep 可选
if nums[idx] == nextStep {
countJump++
idx--
sel = toJump(nums, 1, idx, countJump) // 选
}
nextStep++
idx--
nosel := toJump(nums, nextStep, idx, countJump) // 不选
return int(math.Min(float64(sel), float64(nosel)))
}
|
/*
Bytelandian Currency is made of coins with integers on them. There is a coin for each non-negative integer (including 0).
You have access to a peculiar money changing machine.
If you insert a N-valued coin, with N positive, It pays back 3 coins of the value N/2,N/3 and N/4, rounded down.
For example, if you insert a 19-valued coin, you get three coins worth 9, 6, and 4.
If you insert a 2-valued coin, you get three coins worth 1, 0, and 0. 0-valued coins cannot be used in this machine.
One day you're bored so you insert a 7-valued coin. You get three coins back, and you then insert each of these back into the machine.
You continue to do this with every positive-valued coin you get back, until finally you're left with nothing but 0-valued coins.
You count them up and see you have 15 coins.
How many 0-valued coins could you get starting with a single 1000-valued coin?
Author: Thomas1122
Formal Inputs & Outputs
Input Description
The value N of the coin you start with
Output Description
The number of 0-valued coins you wind up with after putting every positive-valued coin you have through the machine.
Sample Inputs & Outputs
Sample Input
7
Sample Output
15
Challenge Input
1000
Challenge Input Solution
???
Note
Hint: use recursion!
Please direct questions about this challenge to /u/Cosmologicon
*/
package main
func main() {
assert(exchange(7) == 15)
assert(exchange(1000) == 3263)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func exchange(n uint) uint {
p := []uint{n}
c := uint(0)
for len(p) > 0 {
for i := uint(2); i <= 4; i++ {
v := p[0] / i
if v != 0 {
p = append(p, v)
} else {
c++
}
}
p = p[1:]
}
return c
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"path/filepath"
"text/template"
)
var tmp *template.Template
func init() {
tmp = template.Must(template.ParseFiles("index.gohtml"))
}
func main() {
var port string
var arg string
if len(os.Args) > 1 {
arg = os.Args[1]
}
if arg != "" {
port = arg
} else {
port = ":8080"
}
fmt.Println("Server started at port ", port)
http.Handle("/favicon.ico", http.NotFoundHandler())
http.HandleFunc("/", defaultpagehandler)
err := http.ListenAndServe(port, nil)
if err != nil {
log.Fatalln(err)
}
}
func defaultpagehandler(rew http.ResponseWriter, req *http.Request) {
var s string
if req.Method == http.MethodPost {
f, fh, err := req.FormFile("ufile")
if handleerror(rew, err) {
return
}
defer f.Close()
bs, err := ioutil.ReadAll(f)
if handleerror(rew, err) {
return
}
ptf, err := os.Create(filepath.Join("./uploads/", fh.Filename))
if handleerror(rew, err) {
return
}
defer ptf.Close()
_, err = ptf.Write(bs)
if handleerror(rew, err) {
return
}
s = string(bs)
}
rew.Header().Set("Content-Type", "text/html; charset ")
tmp.Execute(rew, s)
}
func handleerror(rew http.ResponseWriter, err error) bool {
if err != nil {
http.Error(rew, err.Error(), http.StatusInternalServerError)
return true
}
return false
}
|
package main
import "fmt"
import "strconv"
import "math/rand"
const minkeysize = 16
// Generate presorted load, always return unique key,
// return nil after `n` keys.
func Generateloads(klen, vlen, n int64) func(k, v []byte) ([]byte, []byte) {
var textint [1024]byte
keynum := int64(0)
return func(key, value []byte) ([]byte, []byte) {
if keynum >= n {
return nil, nil
}
ascii := strconv.AppendInt(textint[:0], int64(keynum), 10)
// create key
key = Fixbuffer(key, int64(klen))
copy(key, zeros)
copy(key[klen-int64(len(ascii)):klen], ascii)
if value != nil { // create value
value = Fixbuffer(value, int64(vlen))
copytovalue(value, ascii, klen, vlen)
}
keynum++
return key, value
}
}
// Generate unsorted load, always return unique key,
// return nill after `n` keys.
func Generateloadr(
klen, vlen, n, seed int64) func(k, v []byte) ([]byte, []byte) {
var text [1024]byte
intn := n * rndscale
rnd := rand.New(rand.NewSource(seed))
bitmap := make([]byte, ((intn / 8) + 1))
count := int64(0)
return func(key, value []byte) ([]byte, []byte) {
if count >= n {
return nil, nil
}
ascii, key := makeuniquekey(rnd, bitmap, 0, intn, klen, text[:0], key)
//fmt.Printf("load %q\n", key)
value = makevalue(vlen, ascii, value)
count++
return key, value
}
}
// Generate keys greater than loadn, always return unique keys.
func Generatecreate(
klen, vlen, loadn, insertn,
seed int64) func(k, v []byte) ([]byte, []byte) {
var text [1024]byte
loadn = int64(loadn * rndscale)
intn := (insertn * rndscale)
rnd := rand.New(rand.NewSource(seed))
bitmap := make([]byte, ((intn / 8) + 1))
return func(key, value []byte) ([]byte, []byte) {
ascii, key := makeuniquekey(rnd, bitmap, loadn, intn, klen, text[:0], key)
//fmt.Printf("create %q\n", key)
value = makevalue(vlen, ascii, value)
return key, value
}
}
func Generateupdate(
klen, vlen, loadn, insertn,
seedl, seedc, mod int64) func(k, v []byte) ([]byte, []byte) {
var textint [1024]byte
var getkey func() int64
loadn1 := loadn * rndscale
intn := insertn * rndscale
rndl := rand.New(rand.NewSource(seedl))
rndc := rand.New(rand.NewSource(seedc))
lcount := int64(0)
getkey = func() (keynum int64) {
if lcount < loadn { // from load pool, headstart
keynum = int64(rndl.Intn(int(loadn1)))
} else if (lcount % 3) == 0 { // from create pool
keynum = loadn1 + int64(rndc.Intn(int(intn)))
} else { // from load pool
keynum = int64(rndl.Intn(int(loadn1)))
}
lcount++
if lcount >= loadn && (lcount%loadn) == 0 {
rndl = rand.New(rand.NewSource(seedl))
}
if mod >= 0 && (keynum%2) != mod {
return getkey()
}
return keynum
}
return func(key, value []byte) ([]byte, []byte) {
keynum := getkey()
ascii, key := makekey(keynum, klen, textint[:0], key)
//fmt.Printf("update %q\n", key)
value = makevalue(vlen, ascii, value)
return key, value
}
}
func Generateread(
klen, loadn, insertn, seedl, seedc int64) func([]byte, int64) []byte {
var textint [1024]byte
var getkey func(int64) int64
loadn1 := loadn * rndscale
intn := insertn * rndscale
rndl := rand.New(rand.NewSource(seedl))
rndc := rand.New(rand.NewSource(seedc))
lcount := int64(0)
getkey = func(mod int64) (keynum int64) {
if lcount < loadn { // from load pool, headstart
keynum = int64(rndl.Intn(int(loadn1)))
} else if mod > 0 && (lcount%mod) != 0 { // from create pool
keynum = loadn1 + int64(rndc.Intn(int(intn)))
} else { // from load pool
keynum = int64(rndl.Intn(int(loadn1)))
}
lcount++
if lcount >= loadn && (lcount%loadn) == 0 {
rndl = rand.New(rand.NewSource(seedl))
rndc = rand.New(rand.NewSource(seedc))
}
return keynum
}
return func(key []byte, ncreates int64) []byte {
keynum := getkey(ncreates / loadn)
_, key = makekey(keynum, klen, textint[:0], key)
//fmt.Printf("read %q\n", key)
return key
}
}
func Generatereadseq(klen, loadn, seedl int64) func([]byte, int64) []byte {
var textint [1024]byte
var getkey func(int64) int64
rndl := rand.New(rand.NewSource(seedl))
lcount := int64(0)
getkey = func(mod int64) (keynum int64) {
keynum = int64(rndl.Intn(int(loadn)))
lcount++
return keynum
}
return func(key []byte, ncreates int64) []byte {
keynum := getkey(ncreates / loadn)
_, key = makekey(keynum, klen, textint[:0], key)
return key
}
}
func Generatedelete(
klen, vlen,
loadn, insertn,
seedl, seedc, mod int64) func(k, v []byte) ([]byte, []byte) {
var textint [1024]byte
var getkey func() int64
loadn1 := loadn * rndscale
intn := insertn * rndscale
rndl := rand.New(rand.NewSource(seedl))
rndc := rand.New(rand.NewSource(seedc))
lcount := int64(0)
getkey = func() (keynum int64) {
if lcount < loadn { // from load pool, headstart
keynum = int64(rndl.Intn(int(loadn1)))
} else if (lcount % 3) == 0 { // from create pool
keynum = loadn1 + int64(rndc.Intn(int(intn)))
} else { // from load pool
keynum = int64(rndl.Intn(int(loadn1)))
}
lcount++
if lcount >= loadn && (lcount%loadn) == 0 {
rndl = rand.New(rand.NewSource(seedl))
}
if mod >= 0 && (keynum%2) != mod {
return getkey()
}
return keynum
}
return func(key, value []byte) ([]byte, []byte) {
keynum := getkey()
ascii, key := makekey(keynum, klen, textint[:0], key)
//fmt.Printf("delete %q\n", key)
value = makevalue(vlen, ascii, value)
return key, value
}
}
var rndscale = int64(3)
var bitmask = [8]byte{1, 2, 4, 8, 16, 32, 64, 128}
var zeros = make([]byte, 4096)
func makeuniquekey(
rnd *rand.Rand, bitmap []byte, offset, intn int64,
klen int64, textint, key []byte) ([]byte, []byte) {
for true {
keynum := int64(rnd.Intn(int(intn)))
if (bitmap[keynum/8] & bitmask[keynum%8]) == 0 {
bitmap[keynum/8] |= bitmask[keynum%8]
keynum += offset
ascii := strconv.AppendInt(textint[:0], keynum, 10)
// create key
key = Fixbuffer(key, int64(klen))
copy(key, zeros)
copy(key[klen-int64(len(ascii)):klen], ascii)
return ascii, key
}
}
panic(fmt.Errorf("unreachable code"))
}
func makekey(keynum, klen int64, textint, key []byte) ([]byte, []byte) {
ascii := strconv.AppendInt(textint[:0], keynum, 10)
// create key
key = Fixbuffer(key, int64(klen))
copy(key, zeros)
copy(key[klen-int64(len(ascii)):klen], ascii)
return ascii, key
}
func makevalue(vlen int64, ascii, value []byte) []byte {
if vlen == 0 {
return value
}
if vlen < int64(len(ascii)) {
vlen = minkeysize
}
value = Fixbuffer(value, vlen)
copy(value, zeros)
copy(value[vlen-int64(len(ascii)):vlen], ascii)
return value
}
func copytovalue(value, ascii []byte, klen, vlen int64) []byte {
if vlen <= klen {
copy(value, zeros)
} else {
copy(value[vlen-klen:vlen], zeros)
}
copy(value[vlen-int64(len(ascii)):vlen], ascii)
return value
}
func init() {
for i := range zeros {
zeros[i] = '0'
}
}
|
package domain
// Dealership holds dealership object
type Dealership struct {
Address
Name string
DealershipID string
GroundInventory map[string]*GroundTransportation
}
|
// Package clientserverpair provides a buffered, connected pair of dialers and
// listeners.
//
// This pair of objects differs from the net.Pipe implementation in that reads
// and writes are buffered and operations on them do not block, unless the
// respective internal buffer(s) is/are full.
package clientserverpair
import (
"context"
"errors"
"fmt"
"net"
"sync"
"sync/atomic"
"github.com/rwool/ex/log"
)
var (
// ErrClosed indicates that there was an attempt to use a closed connection.
ErrClosed = errors.New("conn: use of closed connection")
// ErrListenerClosed indicates that there was an attempt to use a closed
// listener.
ErrListenerClosed = errors.New("listener closed")
)
var (
nextID int
idMu sync.Mutex
)
func getNextID() int {
idMu.Lock()
defer idMu.Unlock()
id := nextID
nextID++
return id
}
type attempts struct {
mu sync.Mutex
cRead, cWrite, sRead, sWrite int
}
func (a *attempts) String() string {
return fmt.Sprintf("(C: %d, %d; S %d, %d)", a.cRead, a.cWrite, a.sRead, a.sWrite)
}
var accessAttempts = map[int]*attempts{}
// DebugConn is a net.Conn implementation that can log its input and output.
type DebugConn struct {
net.Conn
isClient bool
id int
logger log.Logger
readDebug RWDebugger
writeDebug RWDebugger
closed uint32
}
func (dc *DebugConn) isClosed() bool {
return atomic.LoadUint32(&dc.closed) == 1
}
// Read reads up to len(p) bytes from the connection.
func (dc *DebugConn) Read(p []byte) (int, error) {
if dc.isClosed() {
return 0, ErrClosed
}
n, err := dc.Conn.Read(p)
if dc.readDebug != nil {
dc.readDebug(dc.logger, true, dc.isClient, dc.id, p, n, err)
}
return n, err
}
// Write writes len(p) bytes from p to the connection.
func (dc *DebugConn) Write(p []byte) (int, error) {
if dc.isClosed() {
return 0, ErrClosed
}
n, err := dc.Conn.Write(p)
if dc.writeDebug != nil {
dc.readDebug(dc.logger, false, dc.isClient, dc.id, p, n, err)
}
return n, err
}
// Addr holds the information for the connection. This typically does not hold
// much meaning because this is not for a "real" connection.
type Addr struct {
NetworkStr string
StringStr string
}
// Network returns the type of network that is used for the connection.
func (a *Addr) Network() string {
return a.NetworkStr
}
// String returns the string form of the address.
func (a *Addr) String() string {
return a.StringStr
}
// PipeListener is a net.Listener implementation that is paired up with a
// corresponding dialer.
//
// Accepting connections with this listener returns a net.Conn object that
// reads and writes from and to buffers that are shared with another net.Conn
// object that is used by client side of the connection.
type PipeListener struct {
connC <-chan net.Conn
doneC chan struct{}
addr *Addr
closeOnce *sync.Once
}
// Accept accepts a connection. The returned net.Conn object is paired up with
// a corresponding client side net.Conn object that share a pair of read and
// write buffers.
func (pl *PipeListener) Accept() (c net.Conn, e error) {
select {
case conn := <-pl.connC:
return conn, nil
case <-pl.doneC:
return nil, ErrListenerClosed
}
}
// Close closes the listener.
func (pl *PipeListener) Close() error {
pl.closeOnce.Do(func() { close(pl.doneC) })
return nil
}
// Addr returns the address that is being listened on.
func (pl *PipeListener) Addr() net.Addr {
return pl.addr
}
// Dialer is the interface that wraps the dial method.
//
// Primarily used for abstracting out possible dialer implementations as there
// is no dialer interface in the standard library.
type Dialer interface {
DialContext(ctx context.Context, network, address string) (net.Conn, error)
}
// PipeDialer is a in memory dialer that opens net.Conn pipes in conjunction
// with Accept calls from the corresponding PipeListener.
type PipeDialer struct {
connC chan<- net.Conn
logger log.Logger
clientReadDebug RWDebugger
clientWriteDebug RWDebugger
serverReadDebug RWDebugger
serverWriteDebug RWDebugger
}
// DialContext creates a client side connection that is paired with the server
// side connection.
func (pd *PipeDialer) DialContext(_ context.Context, network, address string) (net.Conn, error) {
c, s := newConnPair(1 << 10)
id := getNextID()
accessAttempts[id] = &attempts{}
clientConn := &DebugConn{
Conn: c,
logger: pd.logger,
isClient: true,
id: id,
readDebug: pd.clientReadDebug,
writeDebug: pd.clientWriteDebug,
}
serverConn := &DebugConn{
Conn: s,
logger: pd.logger,
isClient: false,
id: id,
readDebug: pd.serverReadDebug,
writeDebug: pd.serverWriteDebug,
}
pd.connC <- serverConn
return clientConn, nil
}
// RWDebugger is a function that can be used to debug read and write calls.
type RWDebugger func(logger log.Logger, isRead, isClient bool, pairID int, data []byte, processed int, err error)
// Example connection debugger.
//
//func updateAttempt(a *attempts, isRead, isClient bool) {
// a.mu.Lock()
// defer a.mu.Unlock()
//
// if isRead {
// if isClient {
// a.cRead++
// } else {
// a.sRead++
// }
// } else {
// if isClient {
// a.cWrite++
// } else {
// a.sWrite++
// }
// }
//}
//
//func BasicDebugger(logger log.Logger, isRead, isClient bool, pairID int, data []byte, processed int, err error) {
// updateAttempt(accessAttempts[pairID], isRead, isClient)
//
// var clientServer string
// if isClient {
// clientServer = "client"
// } else {
// clientServer = "server"
// }
//
// var readWrite string
// if isRead {
// readWrite = "read"
// } else {
// readWrite = "write"
// }
//
// logger.Debugf("(%d) %s %s: %s %d bytes, err: %v, data:\n%s, Stack:\n%s",
// pairID, clientServer, readWrite, accessAttempts[pairID].String(), processed, err, spew.Sdump(data), string(debug.Stack()))
//}
// PipeCSPairConfig contains configuration information for the creation of a
// Dialer/Listener pipe pair.
type PipeCSPairConfig struct {
Logger log.Logger
ClientReadDebug RWDebugger
ClientWriteDebug RWDebugger
ServerReadDebug RWDebugger
ServerWriteDebug RWDebugger
}
// New creates a new dialer and listener pipe pair.
//
// Connections returned from the dialer and listener will be connected via
// shared buffers.
func New(pcpc *PipeCSPairConfig) (*PipeDialer, *PipeListener) {
connC := make(chan net.Conn)
noOpIfNil := func(rwd RWDebugger) RWDebugger {
if rwd == nil {
return func(log.Logger, bool, bool, int, []byte, int, error) {}
}
return rwd
}
pd := &PipeDialer{
connC: connC,
logger: pcpc.Logger,
clientReadDebug: noOpIfNil(pcpc.ClientReadDebug),
clientWriteDebug: noOpIfNil(pcpc.ClientWriteDebug),
serverReadDebug: noOpIfNil(pcpc.ServerReadDebug),
serverWriteDebug: noOpIfNil(pcpc.ServerWriteDebug),
}
pl := &PipeListener{
connC: connC,
doneC: make(chan struct{}),
addr: &Addr{
NetworkStr: "pipe",
StringStr: "127.0.0.1:22",
},
closeOnce: &sync.Once{},
}
return pd, pl
}
|
package main
import (
"runtime"
"github.com/therecipe/qt/androidextras"
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/qml"
)
var Application *application
type application struct {
core.QObject
_ func() `constructor:"init"`
_ func() `signal:"onPermissionsGranted"`
_ func() `signal:"onPermissionsDenied"`
engine *qml.QQmlApplicationEngine
}
func (a *application) init() {
Application = a
a.engine = qml.NewQQmlApplicationEngine(nil)
a.ConnectOnPermissionsGranted(a.initializeQML)
a.ConnectOnPermissionsDenied(a.initializeQML)
}
func (a *application) initializeQML() {
a.engine.Load(core.NewQUrl3("qrc:/main.qml", 0))
}
func (a *application) checkPermissions() {
if runtime.GOOS == "android" {
//intentionally called in the C++ thread since it is blocking and will continue after the check
println("About to request permissions")
androidextras.QAndroidJniObject_CallStaticMethodVoid2("org/ftylitak/qzxing/Utilities", "requestQZXingPermissions", "(Landroid/app/Activity;)V", androidextras.QtAndroid_AndroidActivity().Object())
println("Permissions granted")
} else {
a.OnPermissionsGranted()
}
}
|
package main
import (
"fmt"
"github.com/joho/godotenv"
"log"
"os"
"github.com/sendgrid/sendgrid-go"
"github.com/sendgrid/sendgrid-go/helpers/mail"
)
func main() {
err := godotenv.Load(".env")
if err != nil {
log.Println(err)
}
from := mail.NewEmail("Example User", "soichi.sumi@gmail.com")
subject := "Sending with SendGrid is Fun"
to := mail.NewEmail("Example User", "atom.soichi0407@gmail.com")
plainTextContent := "and easy to do anywhere, even with Go"
htmlContent := "<strong>and easy to do anywhere, even with Go</strong>"
message := mail.NewSingleEmail(from, subject, to, plainTextContent, htmlContent)
fmt.Println(os.Getenv("SENDGRID_API_KEY"))
client := sendgrid.NewSendClient(os.Getenv("SENDGRID_API_KEY"))
response, err := client.Send(message)
if err != nil {
println("error")
log.Println(err)
return
}
println("succeeded")
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
} |
package sim
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"math/rand"
"os"
)
type SeedPod struct {
src *os.File
n int
offset int64
size int64
}
func NewSeedPod(src string, offset int64) (*SeedPod, error) {
f, err := os.Open(src)
if err != nil {
return nil, err
}
info, err := f.Stat()
if err != nil {
return nil, err
}
return &SeedPod{
src: f,
n: -1,
offset: offset,
size: info.Size(),
}, nil
}
func (s *SeedPod) Get(i int) (*rand.Rand, error) {
pos := (int64(i / 4) * 32 + s.offset) % s.size
s.src.Seek(pos, os.SEEK_SET)
buf := make([]byte, 32)
n, err := s.src.Read(buf)
if err != nil {
return nil, err
}
if n < 32 {
s.src.Seek(0, os.SEEK_SET)
xbuf := make([]byte, 32 - n)
_, err := s.src.Read(xbuf)
if err != nil {
return nil, err
}
for j, v := range xbuf {
buf[n+j] = v
}
}
m := (i % 4) * 8
sum := sha256.Sum256(buf)
seedBytes := bytes.NewReader(sum[m:m + 8])
var seed int64
err = binary.Read(seedBytes, binary.BigEndian, &seed)
if err != nil {
return nil, err
}
source := rand.NewSource(seed)
return rand.New(source), nil
}
func (s *SeedPod) Next() (*rand.Rand, error) {
s.n += 1
return s.Get(s.n)
}
func (s *SeedPod) Reset() {
s.n = -1
}
|
package server
import (
"io/fs"
"net/url"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/valyala/fasthttp"
"github.com/authelia/authelia/v4/internal/configuration/schema"
"github.com/authelia/authelia/v4/internal/mocks"
"github.com/authelia/authelia/v4/internal/session"
"github.com/authelia/authelia/v4/internal/templates"
)
const (
assetsOpenAPIPath = "public_html/api/openapi.yml"
localOpenAPIPath = "../../api/openapi.yml"
)
type ReadFileOpenAPI struct{}
func (lfs *ReadFileOpenAPI) Open(name string) (fs.File, error) {
switch name {
case assetsOpenAPIPath:
return os.Open(localOpenAPIPath)
default:
return assets.Open(name)
}
}
func (lfs *ReadFileOpenAPI) ReadFile(name string) ([]byte, error) {
switch name {
case assetsOpenAPIPath:
return os.ReadFile(localOpenAPIPath)
default:
return assets.ReadFile(name)
}
}
func TestShouldTemplateOpenAPI(t *testing.T) {
provider, err := templates.New(templates.Config{})
require.NoError(t, err)
fs := &ReadFileOpenAPI{}
require.NoError(t, provider.LoadTemplatedAssets(fs))
mock := mocks.NewMockAutheliaCtx(t)
mock.Ctx.Configuration.Server = schema.DefaultServerConfiguration
mock.Ctx.Configuration.Session = schema.Session{
Cookies: []schema.SessionCookie{
{
Domain: "example.com",
AutheliaURL: &url.URL{Scheme: "https", Host: "auth.example.com", Path: "/"},
},
},
}
mock.Ctx.Providers.SessionProvider = session.NewProvider(mock.Ctx.Configuration.Session, nil)
opts := NewTemplatedFileOptions(&mock.Ctx.Configuration)
handler := ServeTemplatedOpenAPI(provider.GetAssetOpenAPISpecTemplate(), opts)
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedProto, "https")
mock.Ctx.Request.Header.Set(fasthttp.HeaderXForwardedHost, "example.com")
mock.Ctx.Request.Header.Set("X-Forwarded-URI", "/api/openapi.yml")
handler(mock.Ctx)
assert.Equal(t, fasthttp.StatusOK, mock.Ctx.Response.StatusCode())
body := string(mock.Ctx.Response.Body())
assert.NotEqual(t, "", body)
assert.Contains(t, body, "example: 'https://auth.example.com/?rd=https%3A%2F%2Fexample.com%2F&rm=GET'")
}
|
package e2e
import (
"context"
"fmt"
"path/filepath"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned"
"github.com/operator-framework/operator-lifecycle-manager/test/e2e/ctx"
)
const (
failForwardTestDataBaseDir = "fail-forward/base/"
)
var _ = Describe("Fail Forward Upgrades", func() {
var (
ns corev1.Namespace
crclient versioned.Interface
c client.Client
ogName string
)
BeforeEach(func() {
crclient = newCRClient()
c = ctx.Ctx().Client()
By("creating the testing namespace with an OG that enabled fail forward behavior")
namespaceName := genName("ff-e2e-")
og := operatorsv1.OperatorGroup{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-operatorgroup", namespaceName),
Namespace: namespaceName,
},
Spec: operatorsv1.OperatorGroupSpec{
UpgradeStrategy: operatorsv1.UpgradeStrategyUnsafeFailForward,
},
}
ns = SetupGeneratedTestNamespaceWithOperatorGroup(namespaceName, og)
ogName = og.GetName()
})
AfterEach(func() {
By("deleting the testing namespace")
TeardownNamespace(ns.GetName())
})
When("an InstallPlan is reporting a failed state", func() {
var (
magicCatalog *MagicCatalog
catalogSourceName string
subscription *operatorsv1alpha1.Subscription
originalInstallPlanRef *corev1.ObjectReference
failedInstallPlanRef *corev1.ObjectReference
)
BeforeEach(func() {
By("creating a service account with no permission")
saNameWithNoPerms := genName("scoped-sa-")
newServiceAccount(ctx.Ctx().KubeClient(), ns.GetName(), saNameWithNoPerms)
By("deploying the testing catalog")
provider, err := NewFileBasedFiledBasedCatalogProvider(filepath.Join(testdataDir, failForwardTestDataBaseDir, "example-operator.v0.1.0.yaml"))
Expect(err).To(BeNil())
catalogSourceName = genName("mc-ip-failed-")
magicCatalog = NewMagicCatalog(c, ns.GetName(), catalogSourceName, provider)
Expect(magicCatalog.DeployCatalog(context.Background())).To(BeNil())
By("creating the testing subscription")
subscription = &operatorsv1alpha1.Subscription{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-sub", catalogSourceName),
Namespace: ns.GetName(),
},
Spec: &operatorsv1alpha1.SubscriptionSpec{
CatalogSource: catalogSourceName,
CatalogSourceNamespace: ns.GetName(),
Channel: "stable",
Package: "packageA",
},
}
Expect(c.Create(context.Background(), subscription)).To(BeNil())
By("waiting until the subscription has an IP reference")
subscription, err := fetchSubscription(crclient, subscription.GetNamespace(), subscription.GetName(), subscriptionHasInstallPlanChecker)
Expect(err).Should(BeNil())
originalInstallPlanRef = subscription.Status.InstallPlanRef
By("waiting for the v0.1.0 CSV to report a succeeded phase")
_, err = fetchCSV(crclient, subscription.Status.CurrentCSV, ns.GetName(), buildCSVConditionChecker(operatorsv1alpha1.CSVPhaseSucceeded))
Expect(err).ShouldNot(HaveOccurred())
By("updating the operator group to use the service account without required permissions to simulate InstallPlan failure")
Eventually(operatorGroupServiceAccountNameSetter(crclient, ns.GetName(), ogName, saNameWithNoPerms)).Should(Succeed())
By("updating the catalog with v0.2.0 bundle image")
brokenProvider, err := NewFileBasedFiledBasedCatalogProvider(filepath.Join(testdataDir, failForwardTestDataBaseDir, "example-operator.v0.2.0.yaml"))
Expect(err).To(BeNil())
err = magicCatalog.UpdateCatalog(context.Background(), brokenProvider)
Expect(err).To(BeNil())
By("verifying the subscription is referencing a new installplan")
subscription, err = fetchSubscription(crclient, subscription.GetNamespace(), subscription.GetName(), subscriptionHasInstallPlanDifferentChecker(originalInstallPlanRef.Name))
Expect(err).Should(BeNil())
By("waiting for the bad InstallPlan to report a failed installation state")
failedInstallPlanRef = subscription.Status.InstallPlanRef
_, err = fetchInstallPlan(GinkgoT(), crclient, failedInstallPlanRef.Name, failedInstallPlanRef.Namespace, buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseFailed))
Expect(err).To(BeNil())
By("updating the operator group remove service account without permissions")
Eventually(operatorGroupServiceAccountNameSetter(crclient, ns.GetName(), ogName, "")).Should(Succeed())
})
AfterEach(func() {
By("removing the testing catalog resources")
Expect(magicCatalog.UndeployCatalog(context.Background())).To(BeNil())
})
It("eventually reports a successful state when multiple bad versions are rolled forward", func() {
By("patching the OperatorGroup to reduce the bundle unpacking timeout")
addBundleUnpackTimeoutOGAnnotation(context.Background(), c, types.NamespacedName{Name: ogName, Namespace: ns.GetName()}, "1s")
By("patching the catalog with a bad bundle version")
badProvider, err := NewFileBasedFiledBasedCatalogProvider(filepath.Join(testdataDir, "fail-forward/multiple-bad-versions", "example-operator.v0.2.1-non-existent-tag.yaml"))
Expect(err).To(BeNil())
err = magicCatalog.UpdateCatalog(context.Background(), badProvider)
Expect(err).To(BeNil())
By("waiting for the subscription to maintain the example-operator.v0.2.0 status.currentCSV")
Consistently(subscriptionCurrentCSVGetter(crclient, subscription.GetNamespace(), subscription.GetName())).Should(Equal("example-operator.v0.2.0"))
By("patching the OperatorGroup to increase the bundle unpacking timeout")
addBundleUnpackTimeoutOGAnnotation(context.Background(), c, types.NamespacedName{Name: ogName, Namespace: ns.GetName()}, "5m")
By("patching the catalog with a fixed version")
fixedProvider, err := NewFileBasedFiledBasedCatalogProvider(filepath.Join(testdataDir, "fail-forward/multiple-bad-versions", "example-operator.v0.3.0.yaml"))
Expect(err).To(BeNil())
err = magicCatalog.UpdateCatalog(context.Background(), fixedProvider)
Expect(err).To(BeNil())
By("waiting for the subscription to have the example-operator.v0.3.0 status.currentCSV")
_, err = fetchSubscription(crclient, subscription.GetNamespace(), subscription.GetName(), subscriptionHasCurrentCSV("example-operator.v0.3.0"))
Expect(err).Should(BeNil())
By("verifying the subscription is referencing a new InstallPlan")
subscription, err = fetchSubscription(crclient, subscription.GetNamespace(), subscription.GetName(), subscriptionHasInstallPlanDifferentChecker(originalInstallPlanRef.Name))
Expect(err).Should(BeNil())
By("waiting for the fixed v0.3.0 InstallPlan to report a successful state")
ref := subscription.Status.InstallPlanRef
_, err = fetchInstallPlan(GinkgoT(), crclient, ref.Name, ref.Namespace, buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete))
Expect(err).To(BeNil())
})
It("eventually reports a successful state when using skip ranges", func() {
By("patching the catalog with a fixed version")
fixedProvider, err := NewFileBasedFiledBasedCatalogProvider(filepath.Join(testdataDir, "fail-forward/skip-range", "example-operator.v0.3.0.yaml"))
Expect(err).To(BeNil())
err = magicCatalog.UpdateCatalog(context.Background(), fixedProvider)
Expect(err).To(BeNil())
By("waiting for the subscription to have the example-operator.v0.3.0 status.currentCSV")
_, err = fetchSubscription(crclient, subscription.GetNamespace(), subscription.GetName(), subscriptionHasCurrentCSV("example-operator.v0.3.0"))
Expect(err).Should(BeNil())
By("verifying the subscription is referencing a new InstallPlan")
subscription, err = fetchSubscription(crclient, subscription.GetNamespace(), subscription.GetName(), subscriptionHasInstallPlanDifferentChecker(originalInstallPlanRef.Name))
Expect(err).Should(BeNil())
By("waiting for the fixed v0.3.0 InstallPlan to report a successful state")
ref := subscription.Status.InstallPlanRef
_, err = fetchInstallPlan(GinkgoT(), crclient, ref.Name, ref.Namespace, buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete))
Expect(err).To(BeNil())
})
It("eventually reports a successful state when using skips", func() {
By("patching the catalog with a fixed version")
fixedProvider, err := NewFileBasedFiledBasedCatalogProvider(filepath.Join(testdataDir, "fail-forward/skips", "example-operator.v0.3.0.yaml"))
Expect(err).To(BeNil())
err = magicCatalog.UpdateCatalog(context.Background(), fixedProvider)
Expect(err).To(BeNil())
By("waiting for the subscription to have the example-operator.v0.3.0 status.currentCSV")
_, err = fetchSubscription(crclient, subscription.GetNamespace(), subscription.GetName(), subscriptionHasCurrentCSV("example-operator.v0.3.0"))
Expect(err).Should(BeNil())
By("verifying the subscription is referencing a new InstallPlan")
subscription, err = fetchSubscription(crclient, subscription.GetNamespace(), subscription.GetName(), subscriptionHasInstallPlanDifferentChecker(originalInstallPlanRef.Name))
Expect(err).Should(BeNil())
By("waiting for the fixed v0.3.0 InstallPlan to report a successful state")
ref := subscription.Status.InstallPlanRef
_, err = fetchInstallPlan(GinkgoT(), crclient, ref.Name, ref.Namespace, buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete))
Expect(err).To(BeNil())
})
It("eventually reports a failed state when using replaces", func() {
By("patching the catalog with a fixed version")
fixedProvider, err := NewFileBasedFiledBasedCatalogProvider(filepath.Join(testdataDir, "fail-forward/replaces", "example-operator.v0.3.0.yaml"))
Expect(err).To(BeNil())
err = magicCatalog.UpdateCatalog(context.Background(), fixedProvider)
Expect(err).To(BeNil())
By("waiting for the subscription to maintain the example-operator.v0.2.0 status.currentCSV")
Consistently(subscriptionCurrentCSVGetter(crclient, subscription.GetNamespace(), subscription.GetName())).Should(Equal("example-operator.v0.2.0"))
By("verifying the subscription is referencing the same InstallPlan")
subscription, err = fetchSubscription(crclient, subscription.GetNamespace(), subscription.GetName(), subscriptionHasInstallPlanChecker)
Expect(err).Should(BeNil())
Expect(subscription.Status.InstallPlanRef.Name).To(Equal(failedInstallPlanRef.Name))
})
})
When("a CSV resource is in a failed state", func() {
var (
magicCatalog *MagicCatalog
catalogSourceName string
subscription *operatorsv1alpha1.Subscription
)
BeforeEach(func() {
By("deploying the testing catalog")
provider, err := NewFileBasedFiledBasedCatalogProvider(filepath.Join(testdataDir, failForwardTestDataBaseDir, "example-operator.v0.1.0.yaml"))
Expect(err).To(BeNil())
catalogSourceName = genName("mc-csv-failed-")
magicCatalog = NewMagicCatalog(c, ns.GetName(), catalogSourceName, provider)
Expect(magicCatalog.DeployCatalog(context.Background())).To(BeNil())
By("creating the testing subscription")
subscription = &operatorsv1alpha1.Subscription{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-sub", catalogSourceName),
Namespace: ns.GetName(),
},
Spec: &operatorsv1alpha1.SubscriptionSpec{
CatalogSource: catalogSourceName,
CatalogSourceNamespace: ns.GetName(),
Channel: "stable",
Package: "packageA",
},
}
Expect(c.Create(context.Background(), subscription)).To(BeNil())
By("waiting until the subscription has an IP reference")
subscription, err := fetchSubscription(crclient, subscription.GetNamespace(), subscription.GetName(), subscriptionHasInstallPlanChecker)
Expect(err).Should(BeNil())
By("waiting for the v0.1.0 CSV to report a succeeded phase")
_, err = fetchCSV(crclient, subscription.Status.CurrentCSV, ns.GetName(), buildCSVConditionChecker(operatorsv1alpha1.CSVPhaseSucceeded))
Expect(err).ShouldNot(HaveOccurred())
By("updating the catalog with a broken v0.2.0 csv")
brokenProvider, err := NewFileBasedFiledBasedCatalogProvider(filepath.Join(testdataDir, failForwardTestDataBaseDir, "example-operator.v0.2.0-invalid-csv.yaml"))
Expect(err).To(BeNil())
err = magicCatalog.UpdateCatalog(context.Background(), brokenProvider)
Expect(err).To(BeNil())
badCSV := "example-operator.v0.2.0"
By("verifying the subscription has installed the current csv")
subscription, err = fetchSubscription(crclient, subscription.GetNamespace(), subscription.GetName(), subscriptionHasCurrentCSV(badCSV))
Expect(err).Should(BeNil())
By("waiting for the bad CSV to report a failed state")
_, err = fetchCSV(crclient, subscription.Status.CurrentCSV, ns.GetName(), csvFailedChecker)
Expect(err).To(BeNil())
})
AfterEach(func() {
By("removing the testing catalog resources")
Expect(magicCatalog.UndeployCatalog(context.Background())).To(BeNil())
})
It("eventually reports a successful state when using skip ranges", func() {
By("patching the catalog with a fixed version")
fixedProvider, err := NewFileBasedFiledBasedCatalogProvider(filepath.Join(testdataDir, "fail-forward/skip-range", "example-operator.v0.3.0.yaml"))
Expect(err).To(BeNil())
err = magicCatalog.UpdateCatalog(context.Background(), fixedProvider)
Expect(err).To(BeNil())
By("waiting for the subscription to have the example-operator.v0.3.0 status.currentCSV")
subscription, err = fetchSubscription(crclient, subscription.GetNamespace(), subscription.GetName(), subscriptionHasCurrentCSV("example-operator.v0.3.0"))
Expect(err).Should(BeNil())
})
It("eventually reports a successful state when using skips", func() {
By("patching the catalog with a fixed version")
fixedProvider, err := NewFileBasedFiledBasedCatalogProvider(filepath.Join(testdataDir, "fail-forward/skips", "example-operator.v0.3.0.yaml"))
Expect(err).To(BeNil())
err = magicCatalog.UpdateCatalog(context.Background(), fixedProvider)
Expect(err).To(BeNil())
By("waiting for the subscription to have the example-operator.v0.3.0 status.currentCSV")
subscription, err = fetchSubscription(crclient, subscription.GetNamespace(), subscription.GetName(), subscriptionHasCurrentCSV("example-operator.v0.3.0"))
Expect(err).Should(BeNil())
})
It("eventually reports a successful state when using replaces", func() {
By("patching the catalog with a fixed version")
fixedProvider, err := NewFileBasedFiledBasedCatalogProvider(filepath.Join(testdataDir, "fail-forward/replaces", "example-operator.v0.3.0.yaml"))
Expect(err).To(BeNil())
err = magicCatalog.UpdateCatalog(context.Background(), fixedProvider)
Expect(err).To(BeNil())
By("waiting for the subscription to have the example-operator.v0.3.0 status.currentCSV")
subscription, err = fetchSubscription(crclient, subscription.GetNamespace(), subscription.GetName(), subscriptionHasCurrentCSV("example-operator.v0.3.0"))
Expect(err).Should(BeNil())
})
})
})
|
func isPowerOfThree(n int) bool {
return sol1(n)
}
func sol1(n int) bool {
if n < 1 {
return false
}
cur := n
for cur > 0 {
if cur % 3 != 0 && cur != 1 {
return false
}
cur = cur / 3
}
return true
}
|
package main
func main() {
}
func countBinarySubstrings(s string) int {
var ptr, last, ans int
n := len(s)
for ptr < n {
c := s[ptr]
count := 0
for ptr < n && s[ptr] == c {
ptr++
count++
}
ans += min(count, last)
last = count
}
return ans
}
func min(x, y int) int {
if x < y {
return x
}
return y
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dataplex
import (
"context"
"fmt"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
dclService "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataplex/beta"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured"
)
type Lake struct{}
func LakeToUnstructured(r *dclService.Lake) *unstructured.Resource {
u := &unstructured.Resource{
STV: unstructured.ServiceTypeVersion{
Service: "dataplex",
Version: "beta",
Type: "Lake",
},
Object: make(map[string]interface{}),
}
if r.AssetStatus != nil && r.AssetStatus != dclService.EmptyLakeAssetStatus {
rAssetStatus := make(map[string]interface{})
if r.AssetStatus.ActiveAssets != nil {
rAssetStatus["activeAssets"] = *r.AssetStatus.ActiveAssets
}
if r.AssetStatus.SecurityPolicyApplyingAssets != nil {
rAssetStatus["securityPolicyApplyingAssets"] = *r.AssetStatus.SecurityPolicyApplyingAssets
}
if r.AssetStatus.UpdateTime != nil {
rAssetStatus["updateTime"] = *r.AssetStatus.UpdateTime
}
u.Object["assetStatus"] = rAssetStatus
}
if r.CreateTime != nil {
u.Object["createTime"] = *r.CreateTime
}
if r.Description != nil {
u.Object["description"] = *r.Description
}
if r.DisplayName != nil {
u.Object["displayName"] = *r.DisplayName
}
if r.Labels != nil {
rLabels := make(map[string]interface{})
for k, v := range r.Labels {
rLabels[k] = v
}
u.Object["labels"] = rLabels
}
if r.Location != nil {
u.Object["location"] = *r.Location
}
if r.Metastore != nil && r.Metastore != dclService.EmptyLakeMetastore {
rMetastore := make(map[string]interface{})
if r.Metastore.Service != nil {
rMetastore["service"] = *r.Metastore.Service
}
u.Object["metastore"] = rMetastore
}
if r.MetastoreStatus != nil && r.MetastoreStatus != dclService.EmptyLakeMetastoreStatus {
rMetastoreStatus := make(map[string]interface{})
if r.MetastoreStatus.Endpoint != nil {
rMetastoreStatus["endpoint"] = *r.MetastoreStatus.Endpoint
}
if r.MetastoreStatus.Message != nil {
rMetastoreStatus["message"] = *r.MetastoreStatus.Message
}
if r.MetastoreStatus.State != nil {
rMetastoreStatus["state"] = string(*r.MetastoreStatus.State)
}
if r.MetastoreStatus.UpdateTime != nil {
rMetastoreStatus["updateTime"] = *r.MetastoreStatus.UpdateTime
}
u.Object["metastoreStatus"] = rMetastoreStatus
}
if r.Name != nil {
u.Object["name"] = *r.Name
}
if r.Project != nil {
u.Object["project"] = *r.Project
}
if r.ServiceAccount != nil {
u.Object["serviceAccount"] = *r.ServiceAccount
}
if r.State != nil {
u.Object["state"] = string(*r.State)
}
if r.Uid != nil {
u.Object["uid"] = *r.Uid
}
if r.UpdateTime != nil {
u.Object["updateTime"] = *r.UpdateTime
}
return u
}
func UnstructuredToLake(u *unstructured.Resource) (*dclService.Lake, error) {
r := &dclService.Lake{}
if _, ok := u.Object["assetStatus"]; ok {
if rAssetStatus, ok := u.Object["assetStatus"].(map[string]interface{}); ok {
r.AssetStatus = &dclService.LakeAssetStatus{}
if _, ok := rAssetStatus["activeAssets"]; ok {
if i, ok := rAssetStatus["activeAssets"].(int64); ok {
r.AssetStatus.ActiveAssets = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.AssetStatus.ActiveAssets: expected int64")
}
}
if _, ok := rAssetStatus["securityPolicyApplyingAssets"]; ok {
if i, ok := rAssetStatus["securityPolicyApplyingAssets"].(int64); ok {
r.AssetStatus.SecurityPolicyApplyingAssets = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.AssetStatus.SecurityPolicyApplyingAssets: expected int64")
}
}
if _, ok := rAssetStatus["updateTime"]; ok {
if s, ok := rAssetStatus["updateTime"].(string); ok {
r.AssetStatus.UpdateTime = dcl.String(s)
} else {
return nil, fmt.Errorf("r.AssetStatus.UpdateTime: expected string")
}
}
} else {
return nil, fmt.Errorf("r.AssetStatus: expected map[string]interface{}")
}
}
if _, ok := u.Object["createTime"]; ok {
if s, ok := u.Object["createTime"].(string); ok {
r.CreateTime = dcl.String(s)
} else {
return nil, fmt.Errorf("r.CreateTime: expected string")
}
}
if _, ok := u.Object["description"]; ok {
if s, ok := u.Object["description"].(string); ok {
r.Description = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Description: expected string")
}
}
if _, ok := u.Object["displayName"]; ok {
if s, ok := u.Object["displayName"].(string); ok {
r.DisplayName = dcl.String(s)
} else {
return nil, fmt.Errorf("r.DisplayName: expected string")
}
}
if _, ok := u.Object["labels"]; ok {
if rLabels, ok := u.Object["labels"].(map[string]interface{}); ok {
m := make(map[string]string)
for k, v := range rLabels {
if s, ok := v.(string); ok {
m[k] = s
}
}
r.Labels = m
} else {
return nil, fmt.Errorf("r.Labels: expected map[string]interface{}")
}
}
if _, ok := u.Object["location"]; ok {
if s, ok := u.Object["location"].(string); ok {
r.Location = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Location: expected string")
}
}
if _, ok := u.Object["metastore"]; ok {
if rMetastore, ok := u.Object["metastore"].(map[string]interface{}); ok {
r.Metastore = &dclService.LakeMetastore{}
if _, ok := rMetastore["service"]; ok {
if s, ok := rMetastore["service"].(string); ok {
r.Metastore.Service = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Metastore.Service: expected string")
}
}
} else {
return nil, fmt.Errorf("r.Metastore: expected map[string]interface{}")
}
}
if _, ok := u.Object["metastoreStatus"]; ok {
if rMetastoreStatus, ok := u.Object["metastoreStatus"].(map[string]interface{}); ok {
r.MetastoreStatus = &dclService.LakeMetastoreStatus{}
if _, ok := rMetastoreStatus["endpoint"]; ok {
if s, ok := rMetastoreStatus["endpoint"].(string); ok {
r.MetastoreStatus.Endpoint = dcl.String(s)
} else {
return nil, fmt.Errorf("r.MetastoreStatus.Endpoint: expected string")
}
}
if _, ok := rMetastoreStatus["message"]; ok {
if s, ok := rMetastoreStatus["message"].(string); ok {
r.MetastoreStatus.Message = dcl.String(s)
} else {
return nil, fmt.Errorf("r.MetastoreStatus.Message: expected string")
}
}
if _, ok := rMetastoreStatus["state"]; ok {
if s, ok := rMetastoreStatus["state"].(string); ok {
r.MetastoreStatus.State = dclService.LakeMetastoreStatusStateEnumRef(s)
} else {
return nil, fmt.Errorf("r.MetastoreStatus.State: expected string")
}
}
if _, ok := rMetastoreStatus["updateTime"]; ok {
if s, ok := rMetastoreStatus["updateTime"].(string); ok {
r.MetastoreStatus.UpdateTime = dcl.String(s)
} else {
return nil, fmt.Errorf("r.MetastoreStatus.UpdateTime: expected string")
}
}
} else {
return nil, fmt.Errorf("r.MetastoreStatus: expected map[string]interface{}")
}
}
if _, ok := u.Object["name"]; ok {
if s, ok := u.Object["name"].(string); ok {
r.Name = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Name: expected string")
}
}
if _, ok := u.Object["project"]; ok {
if s, ok := u.Object["project"].(string); ok {
r.Project = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Project: expected string")
}
}
if _, ok := u.Object["serviceAccount"]; ok {
if s, ok := u.Object["serviceAccount"].(string); ok {
r.ServiceAccount = dcl.String(s)
} else {
return nil, fmt.Errorf("r.ServiceAccount: expected string")
}
}
if _, ok := u.Object["state"]; ok {
if s, ok := u.Object["state"].(string); ok {
r.State = dclService.LakeStateEnumRef(s)
} else {
return nil, fmt.Errorf("r.State: expected string")
}
}
if _, ok := u.Object["uid"]; ok {
if s, ok := u.Object["uid"].(string); ok {
r.Uid = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Uid: expected string")
}
}
if _, ok := u.Object["updateTime"]; ok {
if s, ok := u.Object["updateTime"].(string); ok {
r.UpdateTime = dcl.String(s)
} else {
return nil, fmt.Errorf("r.UpdateTime: expected string")
}
}
return r, nil
}
func GetLake(ctx context.Context, config *dcl.Config, u *unstructured.Resource) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToLake(u)
if err != nil {
return nil, err
}
r, err = c.GetLake(ctx, r)
if err != nil {
return nil, err
}
return LakeToUnstructured(r), nil
}
func ListLake(ctx context.Context, config *dcl.Config, project string, location string) ([]*unstructured.Resource, error) {
c := dclService.NewClient(config)
l, err := c.ListLake(ctx, project, location)
if err != nil {
return nil, err
}
var resources []*unstructured.Resource
for {
for _, r := range l.Items {
resources = append(resources, LakeToUnstructured(r))
}
if !l.HasNext() {
break
}
if err := l.Next(ctx, c); err != nil {
return nil, err
}
}
return resources, nil
}
func ApplyLake(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToLake(u)
if err != nil {
return nil, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToLake(ush)
if err != nil {
return nil, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
r, err = c.ApplyLake(ctx, r, opts...)
if err != nil {
return nil, err
}
return LakeToUnstructured(r), nil
}
func LakeHasDiff(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToLake(u)
if err != nil {
return false, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToLake(ush)
if err != nil {
return false, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
opts = append(opts, dcl.WithLifecycleParam(dcl.BlockDestruction), dcl.WithLifecycleParam(dcl.BlockCreation), dcl.WithLifecycleParam(dcl.BlockModification))
_, err = c.ApplyLake(ctx, r, opts...)
if err != nil {
if _, ok := err.(dcl.ApplyInfeasibleError); ok {
return true, nil
}
return false, err
}
return false, nil
}
func DeleteLake(ctx context.Context, config *dcl.Config, u *unstructured.Resource) error {
c := dclService.NewClient(config)
r, err := UnstructuredToLake(u)
if err != nil {
return err
}
return c.DeleteLake(ctx, r)
}
func LakeID(u *unstructured.Resource) (string, error) {
r, err := UnstructuredToLake(u)
if err != nil {
return "", err
}
return r.ID()
}
func (r *Lake) STV() unstructured.ServiceTypeVersion {
return unstructured.ServiceTypeVersion{
"dataplex",
"Lake",
"beta",
}
}
func (r *Lake) SetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Lake) GetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, role, member string) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Lake) DeletePolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) error {
return unstructured.ErrNoSuchMethod
}
func (r *Lake) SetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Lake) SetPolicyWithEtag(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Lake) GetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *Lake) Get(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return GetLake(ctx, config, resource)
}
func (r *Lake) Apply(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
return ApplyLake(ctx, config, resource, opts...)
}
func (r *Lake) HasDiff(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
return LakeHasDiff(ctx, config, resource, opts...)
}
func (r *Lake) Delete(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) error {
return DeleteLake(ctx, config, resource)
}
func (r *Lake) ID(resource *unstructured.Resource) (string, error) {
return LakeID(resource)
}
func init() {
unstructured.Register(&Lake{})
}
|
package tmpl
import "strings"
func FillTmpl(tmpl string, values map[string]string) string {
if values == nil {
return tmpl
}
for k, v := range values {
tmpl = strings.Replace(tmpl, "{{"+k+"}}", v, 1)
}
return tmpl
}
|
package endpoints
import (
"encoding/json"
"github.com/valyala/fasthttp"
"log"
"strconv"
"strings"
"technodb-final/app/dbhandlers"
"technodb-final/app/models"
)
//var PostErrors = map[string]error{
// "conflict": errors.New("Post already exists"),
// "none": errors.New("Post not found"),
// "parent":errors.New("Parent error"),
//}
func PostInfo(ctx * fasthttp.RequestCtx) {
id, _ := strconv.Atoi(ctx.UserValue("id").(string))
related := string(ctx.QueryArgs().Peek("related"))
items := []string{""}
items = append(items, strings.Split(related, ",")...)
res, err := dbhandlers.PostById(id, &items)
resp, _ := json.Marshal(res)
if err == dbhandlers.PostErrors["conflict"] {
ctx.SetBody(resp)
ctx.SetStatusCode(fasthttp.StatusConflict)
} else if err == dbhandlers.PostErrors["none"] {
resp, _ = json.Marshal(err)
ctx.SetBody(resp)
ctx.SetStatusCode(fasthttp.StatusNotFound)
} else if err == nil {
ctx.SetBody(resp)
ctx.SetStatusCode(fasthttp.StatusOK)
} else {
log.Print(err)
}
}
func PostUpdate(ctx *fasthttp.RequestCtx) {
id, _ := strconv.Atoi(ctx.UserValue("id").(string))
var post models.Post
err := json.Unmarshal(ctx.PostBody(), &post)
if err != nil {
ctx.SetStatusCode(fasthttp.StatusBadRequest)
ctx.WriteString(err.Error())
return
}
res, err := dbhandlers.UpdatePostById(id, &post)
resp, _ := json.Marshal(res)
if err == dbhandlers.PostErrors["conflict"] {
ctx.SetBody(resp)
ctx.SetStatusCode(fasthttp.StatusConflict)
} else if err == dbhandlers.PostErrors["none"] {
resp, _ = json.Marshal(err)
ctx.SetBody(resp)
ctx.SetStatusCode(fasthttp.StatusNotFound)
} else if err == nil {
ctx.SetBody(resp)
ctx.SetStatusCode(fasthttp.StatusOK)
} else {
log.Print(err)
}
}
|
package controllers
import (
"encoding/json"
"fmt"
"log"
"net/http"
"github.com/BolajiOlajide/go-api/database"
"github.com/gorilla/mux"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
)
// CreatePerson endpoint for creating a person
func CreatePerson(response http.ResponseWriter, request *http.Request) {
collection, ctx := database.GetDB()
response.Header().Add("content-type", "application/json")
var person Person
err := json.NewDecoder(request.Body).Decode(&person)
if err != nil {
log.Fatal(err)
}
fmt.Printf("%+v\n", person)
result, err := collection.InsertOne(ctx, person)
if err != nil {
log.Fatal(err, "Encountered error!")
return
}
json.NewEncoder(response).Encode(result)
log.Print("Done creating persoN!")
}
// GetPeople fetch everyone in the DB
func GetPeople(response http.ResponseWriter, request *http.Request) {
response.Header().Add("content-type", "application/json")
var people []Person
collection, ctx := database.GetDB()
cursor, err := collection.Find(ctx, bson.M{})
if err != nil {
response.WriteHeader(http.StatusBadRequest)
response.Write([]byte(`{"message": "` + err.Error() + `"}`))
return
}
defer cursor.Close(ctx)
for cursor.Next(ctx) {
var person Person
cursor.Decode(&person)
people = append(people, person)
}
if err := cursor.Err(); err != nil {
response.WriteHeader(http.StatusBadRequest)
response.Write([]byte(`{"message": "` + err.Error() + `"}`))
return
}
json.NewEncoder(response).Encode(people)
}
// GetPerson fetch a single person details
func GetPerson(response http.ResponseWriter, request *http.Request) {
response.Header().Add("content-type", "application/json")
params := mux.Vars(request)
id, _ := primitive.ObjectIDFromHex(params["id"])
var person Person
collection, ctx := database.GetDB()
err := collection.FindOne(ctx, Person{ID: id}).Decode(&person)
if err != nil {
response.WriteHeader(http.StatusBadRequest)
response.Write([]byte(`{"message": "` + err.Error() + `"}`))
return
}
json.NewEncoder(response).Encode(person)
}
// Person structure for a person instance
type Person struct {
ID primitive.ObjectID `json:"_id,omitempty" bson:"_id,omitempty"`
Firstname string `json:"firstname,omitempty" bson:"firstname,omitempty"`
Lastname string `json:"lastname,omitempty" bson:"lastname,omitempty"`
}
|
package easypost_test
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/EasyPost/easypost-go/v3"
)
type Fixture struct {
Addresses map[string]*easypost.Address `json:"addresses,omitempty"`
CarrierAccounts map[string]*easypost.CarrierAccount `json:"carrier_accounts,omitempty"`
CarrierStrings map[string]string `json:"carrier_strings,omitempty"`
CustomsInfos map[string]*easypost.CustomsInfo `json:"customs_infos,omitempty"`
CustomsItems map[string]*easypost.CustomsItem `json:"customs_items,omitempty"`
CreditCards map[string]*easypost.CreditCardOptions `json:"credit_cards,omitempty"`
FormOptions map[string]map[string]interface{} `json:"form_options,omitempty"`
Insurances map[string]*easypost.Insurance `json:"insurances,omitempty"`
Orders map[string]*easypost.Order `json:"orders,omitempty"`
PageSizes map[string]int `json:"page_sizes,omitempty"`
Parcels map[string]*easypost.Parcel `json:"parcels,omitempty"`
Pickups map[string]*easypost.Pickup `json:"pickups,omitempty"`
ReportTypes map[string]string `json:"report_types,omitempty"`
ServiceNames map[string]map[string]string `json:"service_names,omitempty"`
Shipments map[string]*easypost.Shipment `json:"shipments,omitempty"`
TaxIdentifiers map[string]*easypost.TaxIdentifier `json:"tax_identifiers,omitempty"`
Users map[string]*easypost.UserOptions `json:"users,omitempty"`
WebhookURL string `json:"webhook_url,omitempty"`
}
// Reads fixture data from the fixtures JSON file
func readFixtureData() Fixture {
currentDir, _ := os.Getwd()
parentDir := filepath.Dir(currentDir)
filePath := fmt.Sprintf("%s%s", parentDir, "/examples/official/fixtures/client-library-fixtures.json")
/* #nosec */
data, err := os.Open(filePath)
if err != nil {
fmt.Fprintln(os.Stderr, "error opening fixture file:", err)
os.Exit(1)
}
defer func() { _ = data.Close() }()
byteData, _ := ioutil.ReadAll(data)
var fixtures Fixture
_ = json.Unmarshal([]byte(byteData), &fixtures)
return fixtures
}
// We keep the page_size of retrieving `all` records small so cassettes stay small
func (fixture *Fixture) pageSize() int {
return readFixtureData().PageSizes["five_results"]
}
// This is the USPS carrier account ID that comes with your EasyPost account by default and should be used for all tests
func (fixture *Fixture) USPSCarrierAccountID() string {
uspsCarrierAccountID := os.Getenv("USPS_CARRIER_ACCOUNT_ID")
// Fallback to the EasyPost Go Client Library Test User USPS carrier account ID
if len(uspsCarrierAccountID) == 0 {
return "ca_e606176ddb314afe896733636dba2f3b"
}
return uspsCarrierAccountID
}
func (fixture *Fixture) USPS() string {
return readFixtureData().CarrierStrings["usps"]
}
func (fixture *Fixture) USPSService() string {
return readFixtureData().ServiceNames["usps"]["first_service"]
}
func (fixture *Fixture) PickupService() string {
return readFixtureData().ServiceNames["usps"]["pickup_service"]
}
func (fixture *Fixture) ReportType() string {
return readFixtureData().ReportTypes["shipment"]
}
func (fixture *Fixture) ReportDate() string {
return "2022-04-11"
}
func (fixture *Fixture) WebhookUrl() string {
return readFixtureData().WebhookURL
}
func (fixture *Fixture) CaAddress1() *easypost.Address {
return readFixtureData().Addresses["ca_address_1"]
}
func (fixture *Fixture) CaAddress2() *easypost.Address {
return readFixtureData().Addresses["ca_address_2"]
}
func (fixture *Fixture) IncorrectAddress() *easypost.Address {
return readFixtureData().Addresses["incorrect"]
}
func (fixture *Fixture) BasicParcel() *easypost.Parcel {
return readFixtureData().Parcels["basic"]
}
func (fixture *Fixture) BasicCustomsItem() *easypost.CustomsItem {
customsItem := readFixtureData().CustomsItems["basic"]
// Json unmarshalling doesn't handle float64 well, need to manually set the value
customsItem.Value = 23.25
return customsItem
}
func (fixture *Fixture) BasicCustomsInfo() *easypost.CustomsInfo {
customsInfo := readFixtureData().CustomsInfos["basic"]
// Json unmarshalling doesn't handle float64 well, need to manually set the value
for _, customsItem := range customsInfo.CustomsItems {
customsItem.Value = 23.25
}
return customsInfo
}
func (fixture *Fixture) TaxIdentifier() *easypost.TaxIdentifier {
return readFixtureData().TaxIdentifiers["basic"]
}
func (fixture *Fixture) BasicShipment() *easypost.Shipment {
return readFixtureData().Shipments["basic_domestic"]
}
func (fixture *Fixture) FullShipment() *easypost.Shipment {
return readFixtureData().Shipments["full"]
}
func (fixture *Fixture) OneCallBuyShipment() *easypost.Shipment {
return &easypost.Shipment{
ToAddress: fixture.CaAddress1(),
FromAddress: fixture.CaAddress2(),
Parcel: fixture.BasicParcel(),
Service: fixture.USPSService(),
CarrierAccountIDs: []string{fixture.USPSCarrierAccountID()},
Carrier: fixture.USPS(),
}
}
// This fixture will require you to add a `shipment` key with a Shipment object from a test.
// If you need to re-record cassettes, increment the date below and ensure it is one day in the future,
// USPS only does "next-day" pickups including Saturday but not Sunday or Holidays.
func (fixture *Fixture) BasicPickup() *easypost.Pickup {
pickupDate := easypost.NewDateTime(2023, time.March, 8, 0, 0, 0, 0, time.UTC)
pickupData := readFixtureData().Pickups["basic"]
pickupData.MinDatetime = &pickupDate
pickupData.MaxDatetime = &pickupDate
return pickupData
}
func (fixture *Fixture) BasicCarrierAccount() *easypost.CarrierAccount {
return readFixtureData().CarrierAccounts["basic"]
}
// This fixture will require you to add a `tracking_code` key with a tracking code from a shipment
func (fixture *Fixture) BasicInsurance() *easypost.Insurance {
return readFixtureData().Insurances["basic"]
}
func (fixture *Fixture) BasicOrder() *easypost.Order {
return readFixtureData().Orders["basic"]
}
func (fixture *Fixture) EventBody() []byte {
currentDir, _ := os.Getwd()
parentDir := filepath.Dir(currentDir)
filePath := fmt.Sprintf("%s%s", parentDir, "/examples/official/fixtures/event-body.json")
/* #nosec */
data, err := os.Open(filePath)
if err != nil {
fmt.Fprintln(os.Stderr, "error opening fixture file:", err)
os.Exit(1)
}
defer func() { _ = data.Close() }()
scanner := bufio.NewScanner(data)
var eventBody []byte
for scanner.Scan() {
eventBody = []byte(scanner.Text())
}
return eventBody
}
func (fixture *Fixture) RmaFormOptions() map[string]interface{} {
return readFixtureData().FormOptions["rma"]
}
func (fixture *Fixture) ReferralUser() *easypost.UserOptions {
return readFixtureData().Users["referral"]
}
func (fixture *Fixture) TestCreditCard() *easypost.CreditCardOptions {
return readFixtureData().CreditCards["test"]
}
func (fixture *Fixture) PlannedShipDate() string {
return "2023-04-28"
}
|
/*
* Copyright 2017 StreamSets Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package store
import (
"bytes"
"encoding/json"
"github.com/streamsets/datacollector-edge/container/common"
"github.com/streamsets/datacollector-edge/container/util"
"io"
"io/ioutil"
"os"
"time"
)
const (
PIPELINE_STATE_FILE = "pipelineState.json"
PIPELINE_STATE_HISTORY_FILE = "pipelineStateHistory.json"
IS_REMOTE_PIPELINE = "IS_REMOTE_PIPELINE"
ISSUES = "issues"
)
func checkFileExists(filePath string) (bool, error) {
_, err := os.Stat(filePath)
if os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
} else {
return true, nil
}
}
func GetState(pipelineId string) (*common.PipelineState, error) {
fileExists, err := checkFileExists(getPipelineStateFile(pipelineId))
if err != nil {
return nil, err
}
if !fileExists {
pipelineState := &common.PipelineState{
PipelineId: pipelineId,
Status: common.EDITED,
Message: "",
TimeStamp: util.ConvertTimeToLong(time.Now()),
}
pipelineState.Attributes = make(map[string]interface{})
pipelineState.Attributes[IS_REMOTE_PIPELINE] = false
err = os.MkdirAll(getRunInfoDir(pipelineId), os.ModePerm)
if err == nil {
err = SaveState(pipelineId, pipelineState)
}
return pipelineState, err
} else {
file, readError := ioutil.ReadFile(getPipelineStateFile(pipelineId))
if readError != nil {
return nil, readError
}
var pipelineState common.PipelineState
err := json.Unmarshal(file, &pipelineState)
return &pipelineState, err
}
}
func Edited(pipelineId string, isRemote bool) error {
pipelineState, err := GetState(pipelineId)
if err != nil {
return err
}
if isRemote {
pipelineState.Attributes = make(map[string]interface{})
pipelineState.Attributes[IS_REMOTE_PIPELINE] = isRemote
}
return SaveState(pipelineId, pipelineState)
}
func SaveState(pipelineId string, pipelineState *common.PipelineState) error {
var err error
var pipelineStateJson []byte
if pipelineStateJson, err = json.Marshal(pipelineState); err == nil {
if err = ioutil.WriteFile(getPipelineStateFile(pipelineId), pipelineStateJson, 0644); err == nil {
//open for append or create and open for write if it does not exist
openFlag := os.O_APPEND | os.O_CREATE | os.O_WRONLY
var historyFile *os.File
//save in history file as well.
if historyFile, err = os.OpenFile(getPipelineStateHistoryFile(pipelineId), openFlag, 0666); err == nil {
defer historyFile.Close()
_, err = historyFile.Write(pipelineStateJson)
if err == nil {
_, err = historyFile.WriteString("\n")
}
}
}
}
return err
}
func GetHistory(pipelineId string) ([]*common.PipelineState, error) {
fileExists, err := checkFileExists(getPipelineStateHistoryFile(pipelineId))
if err != nil {
return nil, err
}
history_of_states := []*common.PipelineState{}
if fileExists {
fileBytes, readError := ioutil.ReadFile(getPipelineStateHistoryFile(pipelineId))
if readError != nil {
return nil, readError
}
var err error = nil
decoder := json.NewDecoder(bytes.NewReader(fileBytes))
for err == nil {
var pipelineState common.PipelineState
err = decoder.Decode(&pipelineState)
if err == nil {
history_of_states = append(history_of_states, &pipelineState)
}
}
if err != io.EOF {
return nil, err
}
}
return history_of_states, nil
}
func getPipelineStateFile(pipelineId string) string {
return getRunInfoDir(pipelineId) + PIPELINE_STATE_FILE
}
func getPipelineStateHistoryFile(pipelineId string) string {
return getRunInfoDir(pipelineId) + PIPELINE_STATE_HISTORY_FILE
}
|
package format
import (
"testing"
)
// duplicate the map so individual tests dont mess with things
func getTestMap() map[string]interface{} {
m := map[string]interface{}{
"int": 1,
"negativeInt": -2,
"answer to life": 42,
"string": "test",
"emptyString": "",
"true": true,
"false": false,
}
nm := make(map[string]interface{})
for k, v := range m {
nm[k] = v
}
return nm
}
func makeTestStorage() *Storage {
return &Storage{
data: getTestMap(),
}
}
func TestStorage_Delete(t *testing.T) {
storage := makeTestStorage()
tests := []struct {
testName string
targetName string
}{
{"int", "int"},
{"bool1", "true"},
{"bool2", "false"},
}
for _, tt := range tests {
tt := tt
t.Run(tt.testName, func(t *testing.T) {
storage.Delete(tt.targetName)
if _, ok := storage.get(tt.targetName); ok {
t.Errorf("Delete(): expected %q not to exist in %#v", tt.targetName, storage)
}
})
}
}
func TestStorage_GetBool(t *testing.T) {
s := makeTestStorage()
tests := []struct {
testName string
targetName string
default_ bool //nolint:golint // It's to avoid the default keyword
want bool
}{
{
"get true",
"true",
false,
true,
},
{
"get false",
"false",
true,
false,
},
{
"get nonexistent false default",
"thisDoesn'tExist",
false,
false,
},
{
"get nonexistent true default",
"thisDoesn'tExist",
true,
true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.testName, func(t *testing.T) {
if got := s.GetBool(tt.targetName, tt.default_); got != tt.want {
t.Errorf("GetBool() = %v, want %v", got, tt.want)
}
})
}
}
func TestStorage_GetInt(t *testing.T) {
s := makeTestStorage()
tests := []struct {
testName string
targetName string
default_ int //nolint:golint // It's to avoid the default keyword
want int
}{
{
"get positive",
"int",
1337,
1,
},
{
"get negative",
"negativeInt",
1337,
-2,
},
{
"life",
"answer to life",
-1337,
42,
},
{
"default",
"no exist",
1337,
1337,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.testName, func(t *testing.T) {
if got := s.GetInt(tt.targetName, tt.default_); got != tt.want {
t.Errorf("GetInt() = %v, want %v", got, tt.want)
}
})
}
}
func TestStorage_GetString(t *testing.T) {
s := makeTestStorage()
tests := []struct {
testName string
targetName string
default_ string //nolint:golint // It's to avoid the default keyword
want string
}{
{
"get",
"string",
"asd",
"test",
},
{
"get empty string",
"emptyString",
"this isn't empty",
"",
},
{
"get not exist",
"not exist",
"this doesn't exist",
"this doesn't exist",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.testName, func(t *testing.T) {
if got := s.GetString(tt.targetName, tt.default_); got != tt.want {
t.Errorf("GetString() = %v, want %v", got, tt.want)
}
})
}
}
func TestStorage_SetBool(t *testing.T) {
s := new(Storage)
tests := []struct {
testName string
targetName string
setting bool
}{
{
"true",
"true",
true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.testName, func(t *testing.T) {
s.SetBool(tt.targetName, tt.setting)
if res, exists := s.get(tt.targetName); exists {
if b, ok := res.(bool); !ok {
t.Errorf("s.SetBool() set a type that was not a bool: %t", res)
} else if ok && (b != tt.setting) {
t.Errorf("s.setbool set an incorrect value. got %v, want %v", b, tt.setting)
}
}
})
}
}
func TestStorage_SetInt(t *testing.T) {
s := new(Storage)
tests := []struct {
testName string
targetName string
setting int
}{
{
"true",
"true",
1337,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.testName, func(t *testing.T) {
s.SetInt(tt.targetName, tt.setting)
if res, exists := s.get(tt.targetName); exists {
if b, ok := res.(int); !ok {
t.Errorf("s.SetInt() set an invalid type: got %T, want Int", res)
} else if ok && (b != tt.setting) {
t.Errorf("s.SetInt() set an incorrect value. got %v, want %v", b, tt.setting)
}
}
})
}
}
func TestStorage_SetString(t *testing.T) {
s := new(Storage)
tests := []struct {
testName string
targetName string
setting string
}{
{
"true",
"true",
"so I heard ",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.testName, func(t *testing.T) {
s.SetString(tt.targetName, tt.setting)
if res, exists := s.get(tt.targetName); exists {
if b, ok := res.(string); !ok {
t.Errorf("s.SetString() set an invalid type: got %T, want Int", res)
} else if ok && (b != tt.setting) {
t.Errorf("s.SetString() set an incorrect value. got %v, want %v", b, tt.setting)
}
}
})
}
}
|
package renter
// skyfilefanout.go implements the encoding and decoding of skyfile fanouts. A
// fanout is a description of all of the Merkle roots in a file, organized by
// chunk. Each chunk has N pieces, and each piece has a Merkle root which is a
// 32 byte hash.
//
// The fanout is encoded such that the first 32 bytes are chunk 0 index 0, the
// second 32 bytes are chunk 0 index 1, etc... and then the second chunk is
// appended immediately after, and so on.
import (
"fmt"
"io"
"sync"
"time"
"gitlab.com/NebulousLabs/Sia/build"
"gitlab.com/NebulousLabs/Sia/crypto"
"gitlab.com/NebulousLabs/Sia/modules"
"gitlab.com/NebulousLabs/Sia/modules/renter/filesystem"
"gitlab.com/NebulousLabs/Sia/modules/renter/filesystem/siafile"
"gitlab.com/NebulousLabs/Sia/skykey"
"gitlab.com/NebulousLabs/errors"
)
// fanoutStreamBufferDataSource implements streamBufferDataSource with the
// skyfile so that it can be used to open a stream from the streamBufferSet.
type fanoutStreamBufferDataSource struct {
// Each chunk is an array of sector hashes that correspond to pieces which
// can be fetched.
staticChunks [][]crypto.Hash
staticChunkSize uint64
staticErasureCoder modules.ErasureCoder
staticLayout modules.SkyfileLayout
staticMasterKey crypto.CipherKey
staticMetadata modules.SkyfileMetadata
staticStreamID modules.DataSourceID
// staticTimeout defines a timeout that is applied to every chunk download
staticTimeout time.Duration
// Utils.
staticRenter *Renter
mu sync.Mutex
}
// newFanoutStreamer will create a modules.Streamer from the fanout of a
// skyfile. The streamer is created by implementing the streamBufferDataSource
// interface on the skyfile, and then passing that to the stream buffer set.
func (r *Renter) newFanoutStreamer(link modules.Skylink, sl modules.SkyfileLayout, metadata modules.SkyfileMetadata, fanoutBytes []byte, timeout time.Duration, sk skykey.Skykey) (modules.Streamer, error) {
masterKey, err := r.deriveFanoutKey(&sl, sk)
if err != nil {
return nil, errors.AddContext(err, "count not recover siafile fanout because cipher key was unavailable")
}
// Create the erasure coder
ec, err := modules.NewRSSubCode(int(sl.FanoutDataPieces), int(sl.FanoutParityPieces), crypto.SegmentSize)
if err != nil {
return nil, errors.New("unable to initialize erasure code")
}
// Build the base streamer object.
fs := &fanoutStreamBufferDataSource{
staticChunkSize: modules.SectorSize * uint64(sl.FanoutDataPieces),
staticErasureCoder: ec,
staticLayout: sl,
staticMasterKey: masterKey,
staticMetadata: metadata,
staticStreamID: link.DataSourceID(),
staticTimeout: timeout,
staticRenter: r,
}
err = fs.decodeFanout(fanoutBytes)
if err != nil {
return nil, errors.AddContext(err, "unable to decode fanout of skyfile")
}
// Grab and return the stream.
stream := r.staticStreamBufferSet.callNewStream(fs, 0)
return stream, nil
}
// decodeFanout will take the fanout bytes from a skyfile and decode them in to
// the staticChunks filed of the fanoutStreamBufferDataSource.
func (fs *fanoutStreamBufferDataSource) decodeFanout(fanoutBytes []byte) error {
// Decode piecesPerChunk, chunkRootsSize, and numChunks
piecesPerChunk, chunkRootsSize, numChunks, err := modules.DecodeFanout(fs.staticLayout, fanoutBytes)
if err != nil {
return err
}
// Decode the fanout data into the list of chunks for the
// fanoutStreamBufferDataSource.
fs.staticChunks = make([][]crypto.Hash, 0, numChunks)
for i := uint64(0); i < numChunks; i++ {
chunk := make([]crypto.Hash, piecesPerChunk)
for j := uint64(0); j < piecesPerChunk; j++ {
fanoutOffset := (i * chunkRootsSize) + (j * crypto.HashSize)
copy(chunk[j][:], fanoutBytes[fanoutOffset:])
}
fs.staticChunks = append(fs.staticChunks, chunk)
}
return nil
}
// skyfileEncodeFanout will create the serialized fanout for a fileNode. The
// encoded fanout is just the list of hashes that can be used to retrieve a file
// concatenated together, where piece 0 of chunk 0 is first, piece 1 of chunk
// 0 is second, etc. The full set of erasure coded pieces are included.
//
// There is a special case for unencrypted 1-of-N files. Because every piece is
// identical for an unencrypted 1-of-N file, only the first piece of each chunk
// is included.
//
// NOTE: This method should not be called unless the fileNode is available,
// meaning that all the dataPieces have been uploaded.
func skyfileEncodeFanout(fileNode *filesystem.FileNode, reader io.Reader) ([]byte, error) {
// Grab the erasure coding scheme and encryption scheme from the file.
cipherType := fileNode.MasterKey().Type()
dataPieces := fileNode.ErasureCode().MinPieces()
onlyOnePieceNeeded := dataPieces == 1 && cipherType == crypto.TypePlain
// If only one piece is needed, or if no reader was passed in, then we can
// generate the encoded fanout from the fileNode.
if onlyOnePieceNeeded || reader == nil {
return skyfileEncodeFanoutFromFileNode(fileNode, onlyOnePieceNeeded)
}
// If we need all the pieces, then we need to generate the encoded fanout from
// the reader since we cannot assume that all the parity pieces have been
// uploaded.
return skyfileEncodeFanoutFromReader(fileNode, reader)
}
// skyfileEncodeFanoutFromFileNode will create the serialized fanout for
// a fileNode. The encoded fanout is just the list of hashes that can be used to
// retrieve a file concatenated together, where piece 0 of chunk 0 is first,
// piece 1 of chunk 0 is second, etc. This method assumes the special case for
// unencrypted 1-of-N files. Because every piece is identical for an unencrypted
// 1-of-N file, only the first piece of each chunk is included.
func skyfileEncodeFanoutFromFileNode(fileNode *filesystem.FileNode, onePiece bool) ([]byte, error) {
// Allocate the memory for the fanout.
fanout := make([]byte, 0, fileNode.NumChunks()*crypto.HashSize)
// findPieceInPieceSet will scan through a piece set and return the first
// non-empty piece in the set. If the set is empty, or every piece in the
// set is empty, then the emptyHash is returned.
var emptyHash crypto.Hash
findPieceInPieceSet := func(pieceSet []siafile.Piece) crypto.Hash {
for _, piece := range pieceSet {
if piece.MerkleRoot != emptyHash {
return piece.MerkleRoot
}
}
return emptyHash
}
// Build the fanout one chunk at a time.
for i := uint64(0); i < fileNode.NumChunks(); i++ {
// Get the pieces for this chunk.
allPieces, err := fileNode.Pieces(i)
if err != nil {
return nil, errors.AddContext(err, "unable to get sector roots from file")
}
// Special case: if only one piece is needed, only use the first piece
// that is available. This is because 1-of-N files are encoded more
// compactly in the fanout.
if onePiece {
root := emptyHash
for _, pieceSet := range allPieces {
root = findPieceInPieceSet(pieceSet)
if root != emptyHash {
fanout = append(fanout, root[:]...)
break
}
}
// If root is still equal to emptyHash it means that we didn't add a piece
// root for this chunk.
if root == emptyHash {
err = fmt.Errorf("No piece root encoded for chunk %v", i)
build.Critical(err)
return nil, err
}
continue
}
// Generate all the piece roots
for pi, pieceSet := range allPieces {
root := findPieceInPieceSet(pieceSet)
if root == emptyHash {
err = fmt.Errorf("Empty piece root at index %v found for chunk %v", pi, i)
build.Critical(err)
return nil, err
}
fanout = append(fanout, root[:]...)
}
}
return fanout, nil
}
// skyfileEncodeFanoutFromReader will create the serialized fanout for
// a fileNode. The encoded fanout is just the list of hashes that can be used to
// retrieve a file concatenated together, where piece 0 of chunk 0 is first,
// piece 1 of chunk 0 is second, etc. The full set of erasure coded pieces are
// included.
func skyfileEncodeFanoutFromReader(fileNode *filesystem.FileNode, reader io.Reader) ([]byte, error) {
// Safety check
if reader == nil {
err := errors.New("skyfileEncodeFanoutFromReader called with nil reader")
build.Critical(err)
return nil, err
}
// Generate the remaining pieces of the each chunk to build the fanout bytes
numPieces := fileNode.ErasureCode().NumPieces()
fanout := make([]byte, 0, fileNode.NumChunks()*uint64(numPieces)*crypto.HashSize)
for chunkIndex := uint64(0); chunkIndex < fileNode.NumChunks(); chunkIndex++ {
// Allocate data pieces and fill them with data from the reader.
dataPieces, _, err := readDataPieces(reader, fileNode.ErasureCode(), fileNode.PieceSize())
if err != nil {
return nil, errors.AddContext(err, "unable to get dataPieces from chunk")
}
// Encode the data pieces, forming the chunk's logical data.
logicalChunkData, _ := fileNode.ErasureCode().EncodeShards(dataPieces)
for pieceIndex := range logicalChunkData {
// Encrypt and pad the piece with the given index.
padAndEncryptPiece(chunkIndex, uint64(pieceIndex), logicalChunkData, fileNode.MasterKey())
root := crypto.MerkleRoot(logicalChunkData[pieceIndex])
// Unlike in skyfileEncodeFanoutFromFileNode we don't check for an
// emptyHash here since if MerkleRoot returned an emptyHash it would mean
// that an emptyHash is a valid MerkleRoot and a host should be able to
// return the corresponding data.
fanout = append(fanout, root[:]...)
}
}
return fanout, nil
}
// DataSize returns the amount of file data in the underlying skyfile.
func (fs *fanoutStreamBufferDataSource) DataSize() uint64 {
return fs.staticLayout.Filesize
}
// ID returns the id of the skylink being fetched, this is just the hash of the
// skylink.
func (fs *fanoutStreamBufferDataSource) ID() modules.DataSourceID {
return fs.staticStreamID
}
// Metadata returns the metadata of the skylink being fetched.
func (fs *fanoutStreamBufferDataSource) Metadata() modules.SkyfileMetadata {
return fs.staticMetadata
}
// ReadAt will fetch data from the siafile at the provided offset.
func (fs *fanoutStreamBufferDataSource) ReadAt(b []byte, offset int64) (int, error) {
// Input checking.
if offset < 0 {
return 0, errors.New("cannot read from a negative offset")
}
// Can only grab one chunk.
if uint64(len(b)) > fs.staticChunkSize {
return 0, errors.New("request needs to be no more than RequestSize()")
}
// Must start at the chunk boundary.
if uint64(offset)%fs.staticChunkSize != 0 {
return 0, errors.New("request needs to be aligned to RequestSize()")
}
// Must not go beyond the end of the file.
if uint64(offset)+uint64(len(b)) > fs.staticLayout.Filesize {
return 0, errors.New("making a read request that goes beyond the boundaries of the file")
}
// Determine which chunk contains the data.
chunkIndex := uint64(offset) / fs.staticChunkSize
// Perform a download to fetch the chunk.
chunkData, err := fs.managedFetchChunk(chunkIndex)
if err != nil {
return 0, errors.AddContext(err, "unable to fetch chunk in ReadAt call on fanout streamer")
}
n := copy(b, chunkData)
return n, nil
}
// RequestSize implements streamBufferDataSource and will return the size of a
// logical data chunk.
func (fs *fanoutStreamBufferDataSource) RequestSize() uint64 {
return fs.staticChunkSize
}
// SilentClose will clean up any resources that the fanoutStreamBufferDataSource
// keeps open.
func (fs *fanoutStreamBufferDataSource) SilentClose() {
// Nothing to clean up.
return
}
|
package domain
// BindRequest encapsulates the request payload information
// for a bind request.
type BindRequest struct {
// BindingID is the ID value for the service binding
// represented by this bind request.
BindingID string
// InstanceID is the ID value for the service instance
// to be bound in this bind request.
InstanceID string
// ServiceID is the ID value of the service provided in
// the service catalog. This service was specified when
// the service instance was provisioned.
ServiceID string
// PlanID is the ID value of the plan provided in the
// service catalog. This plan was specified when the
// service instance was provisioned.
PlanID string
// AppGUID is the GUID value of the application that the
// service instance is to be bound to in this bind request.
AppGUID string
}
// BindResponse encapsulates the response payload information
// for a bind request.
type BindResponse struct {
// Credentials is an open set of key-value fields used to
// indicate credential information for this service binding.
Credentials BindingCredentials
// SyslogDrainURL is a URL to which CloudFoundry should
// drain logs for the bound application.
SyslogDrainURL string
}
// BindingCredentials is an open set of key-value fields used
// to indicate credential information for a service binding.
type BindingCredentials map[string]interface{}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"strings"
"github.com/julienschmidt/httprouter"
)
var (
answer = ""
webRoot = "web-root"
whitelistFile = "tmp/whitelist"
)
func main() {
loadAnswer()
router := httprouter.New()
router.POST("/answer", answerPost)
router.NotFound = http.HandlerFunc(serveStaticFilesOr404)
log.Fatal(http.ListenAndServe(":8090", router))
}
func loadAnswer() {
loadedAnswer, error := ioutil.ReadFile("private/answer.txt")
panicOnError(error)
answer = string(loadedAnswer)
}
func answerPost(responseWriter http.ResponseWriter, request *http.Request, requestParameters httprouter.Params) {
if answer == "" || answer != request.PostFormValue("answer") {
fmt.Fprint(responseWriter, "false")
return
}
ipAddresses := strings.Split(request.Header.Get("x-forwarded-for"), ", ")
error := ioutil.WriteFile(whitelistFile, []byte(ipAddresses[0]), 0000)
if error != nil {
fmt.Fprint(responseWriter, "false")
return
}
fmt.Fprint(responseWriter, "true")
}
|
package consensus
import (
"context"
"encoding/hex"
"math/big"
"time"
"github.com/Secured-Finance/dione/ethclient"
"github.com/asaskevich/EventBus"
"github.com/ethereum/go-ethereum/common"
"github.com/Secured-Finance/dione/config"
"github.com/Secured-Finance/dione/cache"
"github.com/ethereum/go-ethereum/event"
types2 "github.com/Secured-Finance/dione/blockchain/types"
"github.com/Secured-Finance/dione/types"
"github.com/fxamacker/cbor/v2"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/sha3"
"github.com/Secured-Finance/dione/blockchain"
"github.com/Secured-Finance/dione/contracts/dioneDispute"
"github.com/Secured-Finance/dione/contracts/dioneOracle"
)
type DisputeManager struct {
ctx context.Context
bus EventBus.Bus
ethClient ethclient.EthereumSideAPI
voteWindow time.Duration
blockchain *blockchain.BlockChain
submissionChan chan *dioneOracle.DioneOracleSubmittedOracleRequest
submissionEthSubscription event.Subscription
submissionCache cache.Cache
disputesChan chan *dioneDispute.DioneDisputeNewDispute
disputeEthSubscription event.Subscription
disputeCache cache.Cache
}
type Dispute struct {
Dhash [32]byte
RequestID *big.Int
Miner common.Address
DisputeInitiator common.Address
Timestamp int64
Voted bool
Finished bool // if we are dispute initiator
}
type Submission struct {
ReqID *big.Int
Data []byte
Timestamp int64
Checked bool
}
func NewDisputeManager(bus EventBus.Bus, ethClient ethclient.EthereumSideAPI, bc *blockchain.BlockChain, cfg *config.Config, cm cache.CacheManager) (*DisputeManager, error) {
ctx := context.TODO()
submissionChan, submSubscription, err := ethClient.SubscribeOnNewSubmissions(ctx)
if err != nil {
return nil, err
}
disputesChan, dispSubscription, err := ethClient.SubscribeOnNewDisputes(ctx)
if err != nil {
return nil, err
}
dm := &DisputeManager{
ethClient: ethClient,
ctx: ctx,
bus: bus,
voteWindow: time.Duration(cfg.Ethereum.DisputeVoteWindow) * time.Second,
blockchain: bc,
submissionChan: submissionChan,
submissionEthSubscription: submSubscription,
submissionCache: cm.Cache("submissions"),
disputesChan: disputesChan,
disputeEthSubscription: dispSubscription,
disputeCache: cm.Cache("disputes"),
}
return dm, nil
}
func (dm *DisputeManager) Run(ctx context.Context) {
go func() {
for {
select {
case <-ctx.Done():
{
dm.submissionEthSubscription.Unsubscribe()
dm.disputeEthSubscription.Unsubscribe()
return
}
case s := <-dm.submissionChan:
{
dm.onNewSubmission(s)
}
case d := <-dm.disputesChan:
{
dm.onNewDispute(d)
}
}
}
}()
}
func (dm *DisputeManager) onNewSubmission(submission *dioneOracle.DioneOracleSubmittedOracleRequest) {
s := wrapSubmission(submission)
s.Timestamp = time.Now().Unix()
dm.submissionCache.Store(submission.ReqID.String(), s)
// find a block that contains the dione task with specified request id
task, block, err := dm.findTaskAndBlockWithRequestID(submission.ReqID.String())
if err != nil {
logrus.Error(err)
return
}
submHashBytes := sha3.Sum256(s.Data)
localHashBytes := sha3.Sum256(task.Payload)
submHash := hex.EncodeToString(submHashBytes[:])
localHash := hex.EncodeToString(localHashBytes[:])
if submHash != localHash {
logrus.Debugf("submission of request id %s isn't valid - beginning dispute", s.ReqID)
err := dm.ethClient.BeginDispute(block.Header.ProposerEth, s.ReqID)
if err != nil {
logrus.Errorf(err.Error())
return
}
disputeFinishTimer := time.NewTimer(dm.voteWindow)
go func() {
for {
select {
case <-dm.ctx.Done():
return
case <-disputeFinishTimer.C:
{
var d Dispute
err := dm.disputeCache.Get(s.ReqID.String(), &d)
if err != nil {
logrus.Errorf(err.Error())
return
}
err = dm.ethClient.FinishDispute(d.Dhash)
if err != nil {
logrus.Errorf(err.Error())
disputeFinishTimer.Stop()
return
}
disputeFinishTimer.Stop()
d.Finished = true
dm.disputeCache.Store(d.RequestID.String(), d)
return
}
}
}
}()
}
s.Checked = true
dm.submissionCache.Store(s.ReqID.String(), s)
}
func (dm *DisputeManager) findTaskAndBlockWithRequestID(requestID string) (*types.DioneTask, *types2.Block, error) {
height, err := dm.blockchain.GetLatestBlockHeight()
if err != nil {
return nil, nil, err
}
for {
block, err := dm.blockchain.FetchBlockByHeight(height)
if err != nil {
return nil, nil, err
}
for _, v := range block.Data {
var task types.DioneTask
err := cbor.Unmarshal(v.Data, &task)
if err != nil {
logrus.Error(err)
continue
}
if task.RequestID == requestID {
return &task, block, nil
}
}
height--
}
}
func (dm *DisputeManager) onNewDispute(dispute *dioneDispute.DioneDisputeNewDispute) {
d := wrapDispute(dispute)
d.Timestamp = time.Now().Unix()
dm.disputeCache.Store(d.RequestID.String(), d)
task, _, err := dm.findTaskAndBlockWithRequestID(d.RequestID.String())
if err != nil {
logrus.Error(err)
return
}
var s Submission
err = dm.submissionCache.Get(d.RequestID.String(), &s)
if err != nil {
logrus.Warnf("submission of request id %s isn't found in cache", d.RequestID.String())
return
}
if dispute.DisputeInitiator.Hex() == dm.ethClient.GetEthAddress().Hex() {
d.Voted = true
dm.disputeCache.Store(d.RequestID.String(), d)
return
}
submHashBytes := sha3.Sum256(s.Data)
localHashBytes := sha3.Sum256(task.Payload)
submHash := hex.EncodeToString(submHashBytes[:])
localHash := hex.EncodeToString(localHashBytes[:])
if submHash == localHash {
err := dm.ethClient.VoteDispute(d.Dhash, false)
if err != nil {
logrus.Errorf(err.Error())
return
}
} else {
err = dm.ethClient.VoteDispute(d.Dhash, true)
if err != nil {
logrus.Errorf(err.Error())
return
}
}
d.Voted = true
dm.disputeCache.Store(dispute.RequestID.String(), d)
}
func wrapDispute(d *dioneDispute.DioneDisputeNewDispute) *Dispute {
return &Dispute{
Dhash: d.Dhash,
RequestID: d.RequestID,
Miner: d.Miner,
DisputeInitiator: d.DisputeInitiator,
}
}
func wrapSubmission(s *dioneOracle.DioneOracleSubmittedOracleRequest) *Submission {
return &Submission{
ReqID: s.ReqID,
Data: s.Data,
}
}
|
package main
import "sync"
func merge(done <-chan struct{}, ch ...<-chan int) <-chan int {
var wg sync.WaitGroup
out := make(chan int)
output := func(c <-chan int) {
for n := range c {
select {
case out <- n:
case <-done:
}
}
wg.Done()
}
wg.Add(len(ch))
for _, c := range ch {
go output(c)
}
go func() {
wg.Wait()
close(out)
}()
return out
}
func gen(nums ...int) <-chan int {
ch := make(chan int)
go func() {
defer close(ch)
for _, n := range nums {
ch <- n
}
}()
return ch
}
func sq(ch <-chan int) <-chan int {
s := make(chan int)
go func() {
for c := range ch {
s <- c * c
}
close(s)
}()
return s
}
|
//自定义中间件 recorder记录
package main
import (
"net/http"
"net/http/httptest"
)
type MiddleWare struct {
http.Handler
}
func (self *MiddleWare)ServeHTTP(w http.ResponseWriter, r *http.Request) {
rec := httptest.NewRecorder()
self.Handler.ServeHTTP(rec,r)
for k,v := range rec.Header(){
w.Header()[k] = v
}
w.Header().Set("go-web-foundation","vip")
w.WriteHeader(418)
w.Write([]byte("hey!")
w.Write(rec.Body.Bytes())
}
func myHandler(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("hello world!"))
}
func main() {
mid := &MiddleWare{
http.HandlerFunc(myHandler),
}
http.ListenAndServe(":8080",mid)
} |
package ber
import (
"bytes"
"io"
"math"
"testing"
)
func TestReadIdentifier(t *testing.T) {
testCases := map[string]struct {
Data []byte
ExpectedIdentifier Identifier
ExpectedBytesRead int
ExpectedError string
}{
"empty": {
Data: []byte{},
ExpectedBytesRead: 0,
ExpectedError: io.EOF.Error(),
},
"universal primitive eoc": {
Data: []byte{byte(ClassUniversal) | byte(TypePrimitive) | byte(TagEOC)},
ExpectedIdentifier: Identifier{
ClassType: ClassUniversal,
TagType: TypePrimitive,
Tag: TagEOC,
},
ExpectedBytesRead: 1,
},
"universal primitive character string": {
Data: []byte{byte(ClassUniversal) | byte(TypePrimitive) | byte(TagCharacterString)},
ExpectedIdentifier: Identifier{
ClassType: ClassUniversal,
TagType: TypePrimitive,
Tag: TagCharacterString,
},
ExpectedBytesRead: 1,
},
"universal constructed bit string": {
Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(TagBitString)},
ExpectedIdentifier: Identifier{
ClassType: ClassUniversal,
TagType: TypeConstructed,
Tag: TagBitString,
},
ExpectedBytesRead: 1,
},
"universal constructed character string": {
Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(TagCharacterString)},
ExpectedIdentifier: Identifier{
ClassType: ClassUniversal,
TagType: TypeConstructed,
Tag: TagCharacterString,
},
ExpectedBytesRead: 1,
},
"application constructed object descriptor": {
Data: []byte{byte(ClassApplication) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
ExpectedIdentifier: Identifier{
ClassType: ClassApplication,
TagType: TypeConstructed,
Tag: TagObjectDescriptor,
},
ExpectedBytesRead: 1,
},
"context constructed object descriptor": {
Data: []byte{byte(ClassContext) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
ExpectedIdentifier: Identifier{
ClassType: ClassContext,
TagType: TypeConstructed,
Tag: TagObjectDescriptor,
},
ExpectedBytesRead: 1,
},
"private constructed object descriptor": {
Data: []byte{byte(ClassPrivate) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
ExpectedIdentifier: Identifier{
ClassType: ClassPrivate,
TagType: TypeConstructed,
Tag: TagObjectDescriptor,
},
ExpectedBytesRead: 1,
},
"high-tag-number tag missing bytes": {
Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag)},
ExpectedError: io.ErrUnexpectedEOF.Error(),
ExpectedBytesRead: 1,
},
"high-tag-number tag invalid first byte": {
Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag), 0x0},
ExpectedError: "invalid first high-tag-number tag byte",
ExpectedBytesRead: 2,
},
"high-tag-number tag invalid first byte with continue bit": {
Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag), byte(HighTagContinueBitmask)},
ExpectedError: "invalid first high-tag-number tag byte",
ExpectedBytesRead: 2,
},
"high-tag-number tag continuation missing bytes": {
Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag), byte(HighTagContinueBitmask | 0x1)},
ExpectedError: io.ErrUnexpectedEOF.Error(),
ExpectedBytesRead: 2,
},
"high-tag-number tag overflow": {
Data: []byte{
byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
byte(HighTagContinueBitmask | 0x1),
byte(HighTagContinueBitmask | 0x1),
byte(HighTagContinueBitmask | 0x1),
byte(HighTagContinueBitmask | 0x1),
byte(HighTagContinueBitmask | 0x1),
byte(HighTagContinueBitmask | 0x1),
byte(HighTagContinueBitmask | 0x1),
byte(HighTagContinueBitmask | 0x1),
byte(HighTagContinueBitmask | 0x1),
byte(0x1),
},
ExpectedError: "high-tag-number tag overflow",
ExpectedBytesRead: 11,
},
"max high-tag-number tag": {
Data: []byte{
byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(0x7f),
},
ExpectedIdentifier: Identifier{
ClassType: ClassUniversal,
TagType: TypeConstructed,
Tag: Tag(0x7FFFFFFFFFFFFFFF), // 01111111...(63)...11111b
},
ExpectedBytesRead: 10,
},
"high-tag-number encoding of low-tag value": {
Data: []byte{
byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
byte(TagObjectDescriptor),
},
ExpectedIdentifier: Identifier{
ClassType: ClassUniversal,
TagType: TypeConstructed,
Tag: TagObjectDescriptor,
},
ExpectedBytesRead: 2,
},
"max high-tag-number tag ignores extra data": {
Data: []byte{
byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(0x7f),
byte(0x01), // extra data, shouldn't be read
byte(0x02), // extra data, shouldn't be read
byte(0x03), // extra data, shouldn't be read
},
ExpectedIdentifier: Identifier{
ClassType: ClassUniversal,
TagType: TypeConstructed,
Tag: Tag(0x7FFFFFFFFFFFFFFF), // 01111111...(63)...11111b
},
ExpectedBytesRead: 10,
},
}
for k, tc := range testCases {
reader := bytes.NewBuffer(tc.Data)
identifier, read, err := readIdentifier(reader)
if err != nil {
if tc.ExpectedError == "" {
t.Errorf("%s: unexpected error: %v", k, err)
} else if err.Error() != tc.ExpectedError {
t.Errorf("%s: expected error %v, got %v", k, tc.ExpectedError, err)
}
} else if tc.ExpectedError != "" {
t.Errorf("%s: expected error %v, got none", k, tc.ExpectedError)
continue
}
if read != tc.ExpectedBytesRead {
t.Errorf("%s: expected read %d, got %d", k, tc.ExpectedBytesRead, read)
}
if identifier.ClassType != tc.ExpectedIdentifier.ClassType {
t.Errorf("%s: expected class type %d (%s), got %d (%s)", k,
tc.ExpectedIdentifier.ClassType,
ClassMap[tc.ExpectedIdentifier.ClassType],
identifier.ClassType,
ClassMap[identifier.ClassType],
)
}
if identifier.TagType != tc.ExpectedIdentifier.TagType {
t.Errorf("%s: expected tag type %d (%s), got %d (%s)", k,
tc.ExpectedIdentifier.TagType,
TypeMap[tc.ExpectedIdentifier.TagType],
identifier.TagType,
TypeMap[identifier.TagType],
)
}
if identifier.Tag != tc.ExpectedIdentifier.Tag {
t.Errorf("%s: expected tag %d (%s), got %d (%s)", k,
tc.ExpectedIdentifier.Tag,
tagMap[tc.ExpectedIdentifier.Tag],
identifier.Tag,
tagMap[identifier.Tag],
)
}
}
}
func TestEncodeIdentifier(t *testing.T) {
testCases := map[string]struct {
Identifier Identifier
ExpectedBytes []byte
}{
"universal primitive eoc": {
Identifier: Identifier{
ClassType: ClassUniversal,
TagType: TypePrimitive,
Tag: TagEOC,
},
ExpectedBytes: []byte{byte(ClassUniversal) | byte(TypePrimitive) | byte(TagEOC)},
},
"universal primitive character string": {
Identifier: Identifier{
ClassType: ClassUniversal,
TagType: TypePrimitive,
Tag: TagCharacterString,
},
ExpectedBytes: []byte{byte(ClassUniversal) | byte(TypePrimitive) | byte(TagCharacterString)},
},
"universal constructed bit string": {
Identifier: Identifier{
ClassType: ClassUniversal,
TagType: TypeConstructed,
Tag: TagBitString,
},
ExpectedBytes: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(TagBitString)},
},
"universal constructed character string": {
Identifier: Identifier{
ClassType: ClassUniversal,
TagType: TypeConstructed,
Tag: TagCharacterString,
},
ExpectedBytes: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(TagCharacterString)},
},
"application constructed object descriptor": {
Identifier: Identifier{
ClassType: ClassApplication,
TagType: TypeConstructed,
Tag: TagObjectDescriptor,
},
ExpectedBytes: []byte{byte(ClassApplication) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
},
"context constructed object descriptor": {
Identifier: Identifier{
ClassType: ClassContext,
TagType: TypeConstructed,
Tag: TagObjectDescriptor,
},
ExpectedBytes: []byte{byte(ClassContext) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
},
"private constructed object descriptor": {
Identifier: Identifier{
ClassType: ClassPrivate,
TagType: TypeConstructed,
Tag: TagObjectDescriptor,
},
ExpectedBytes: []byte{byte(ClassPrivate) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
},
"max low-tag-number tag": {
Identifier: Identifier{
ClassType: ClassUniversal,
TagType: TypeConstructed,
Tag: TagBMPString,
},
ExpectedBytes: []byte{
byte(ClassUniversal) | byte(TypeConstructed) | byte(TagBMPString),
},
},
"min high-tag-number tag": {
Identifier: Identifier{
ClassType: ClassUniversal,
TagType: TypeConstructed,
Tag: TagBMPString + 1,
},
ExpectedBytes: []byte{
byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
byte(TagBMPString + 1),
},
},
"max high-tag-number tag": {
Identifier: Identifier{
ClassType: ClassUniversal,
TagType: TypeConstructed,
Tag: Tag(math.MaxInt64),
},
ExpectedBytes: []byte{
byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(HighTagContinueBitmask | 0x7f),
byte(0x7f),
},
},
}
for k, tc := range testCases {
b := encodeIdentifier(tc.Identifier)
if !bytes.Equal(tc.ExpectedBytes, b) {
t.Errorf("%s: Expected\n\t%#v\ngot\n\t%#v", k, tc.ExpectedBytes, b)
}
}
}
func TestEncodeHighTag(t *testing.T) {
cases := []struct {
tag Tag
want []byte
}{
{134, []byte{0x80 + 0x01, 0x06}},
{123456, []byte{0x80 + 0x07, 0x80 + 0x44, 0x40}},
{0xFF, []byte{0x81, 0x7F}},
}
for _, c := range cases {
got := encodeHighTag(c.tag)
if !bytes.Equal(c.want, got) {
t.Errorf("tag: %d want: %#v got: %#v", c.tag, c.want, got)
}
}
}
|
package feed
import (
"api/factory"
"encoding/json"
"net/http"
)
func Create(response http.ResponseWriter, request *http.Request) {
feed := New()
var feedRequest Feed
defer request.Body.Close()
{
if err := json.NewDecoder(request.Body).Decode(&feedRequest); err != nil {
response.WriteHeader(http.StatusInternalServerError)
response.Write([]byte("Algo errado com o json, revise!!"))
return
}
}
{
db := factory.GetConnection()
defer db.Close()
tx, err := db.Begin()
e, isEr := factory.CheckErr(err)
if isEr {
response.WriteHeader(http.StatusInternalServerError)
response.Write(e.ReturnError())
return
}
{
stmt, err := tx.Prepare(`INSERT INTO feed (profile_id, image_id, description) VALUES ($1, $2, $3) returning *;`)
e, isEr := factory.CheckErr(err)
if isEr {
tx.Rollback()
response.WriteHeader(http.StatusInternalServerError)
response.Write(e.ReturnError())
return
}
err = stmt.QueryRow(feedRequest.Profile.ID, feedRequest.Image.ID, feedRequest.Description).Scan(
&feed.ID,
&feed.Profile.ID,
&feed.Image.ID,
&feed.CreationDate,
&feed.Description,
)
e, isEr = factory.CheckErr(err)
if isEr {
tx.Rollback()
response.WriteHeader(http.StatusInternalServerError)
response.Write(e.ReturnError())
return
}
}
tx.Commit()
}
payload, _ := json.Marshal(feed)
response.Write(payload)
}
|
package main
import (
"bytes"
"encoding/json"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"testing"
)
var drinkList = []string{"beer", "wine", "coke"}
const user = "marvin"
const playlistUser = "paranoid"
func TestPingPongRoute(t *testing.T) {
router := InitRoutes()
queueDrinks = false
w := performGetRequest(router, "/api/ping")
assert.Equal(t, http.StatusOK, w.Code)
assert.Equal(t, "pong", w.Body.String())
}
func TestRandomizeRoute(t *testing.T) {
router := InitRoutes()
queueDrinks = false
requestBody := RandomizeRequest{drinkList, playlistUser, user }
body, _ := json.Marshal(requestBody)
w := performPostRequest(router, "/api/v1/randomize", body)
var result ResultModel
err := json.Unmarshal(w.Body.Bytes(), &result)
assert.Nil(t, err)
assert.Contains(t, drinkList, result.Result)
assert.Equal(t, 200, w.Code)
}
func TestRandomizeRouteWrongMethod(t *testing.T) {
router := InitRoutes()
queueDrinks = false
w := performGetRequest(router, "/api/v1/randomize")
var result MethodError
err := json.Unmarshal(w.Body.Bytes(), &result)
assert.Nil(t, err)
assert.Equal(t, 405, w.Code)
assert.Equal(t, "POST", result.Messages[0].Methods)
assert.Contains(t, result.Messages[0].Message, "GET")
}
func TestRandomizeRouteInvalidUser(t *testing.T) {
router := InitRoutes()
queueDrinks = false
requestBody := RandomizeRequest{drinkList, playlistUser, invalidUser }
body, _ := json.Marshal(requestBody)
w := performPostRequest(router, "/api/v1/randomize", body)
var result ValidationError
err := json.Unmarshal(w.Body.Bytes(), &result)
assert.Nil(t, err)
assert.Equal(t, 400, w.Code)
assert.Equal(t, usernameField, result.Messages[0].Field)
assert.Contains(t, result.Messages[0].Message, invalidUser)
}
func TestRandomizeRouteInvalidPlaylist(t *testing.T) {
router := InitRoutes()
requestBody := RandomizeRequest{drinkList, invalidPlayList, user }
body, _ := json.Marshal(requestBody)
w := performPostRequest(router, "/api/v1/randomize", body)
var result ValidationError
err := json.Unmarshal(w.Body.Bytes(), &result)
assert.Nil(t, err)
assert.Equal(t, 400, w.Code)
assert.Equal(t, playlistField, result.Messages[0].Field)
assert.Contains(t, result.Messages[0].Message, invalidPlayList)
}
func performGetRequest(r http.Handler, path string) *httptest.ResponseRecorder {
req, _ := http.NewRequest("GET", path, nil)
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
return w
}
func performPostRequest(r http.Handler, path string, body []byte) *httptest.ResponseRecorder {
req, _ := http.NewRequest("POST", path, bytes.NewBuffer(body))
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
return w
} |
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-09-18 08:55
# @File : lt_1171_Remove_Zero_Sum_Consecutive_Nodes_from_Linked_List.go
# @Description :
# @Attention :
*/
package v0
/*
去除和抵消为0的值
关键: 对每个元素的下标取和,如果 有重复的说明,[x,y] 之间的是可以抵消掉的
*/
func removeZeroSumSublists(head *ListNode) *ListNode {
if nil == head {
return nil
}
dummy := &ListNode{
Next: head,
}
flagMap := make(map[int]*ListNode, 0)
sum := 0
for node := dummy; nil != node; node = node.Next {
sum += node.Val
flagMap[sum] = node
}
sum = 0
for node := dummy; nil != node; node = node.Next {
sum += node.Val
node.Next = flagMap[sum].Next
}
return dummy.Next
}
|
package ptrace
import (
"context"
"os"
"github.com/criyle/go-sandbox/pkg/forkexec"
"github.com/criyle/go-sandbox/ptracer"
"github.com/criyle/go-sandbox/runner"
)
// Run starts the tracing process
func (r *Runner) Run(c context.Context) runner.Result {
ch := &forkexec.Runner{
Args: r.Args,
Env: r.Env,
ExecFile: r.ExecFile,
RLimits: r.RLimits,
Files: r.Files,
WorkDir: r.WorkDir,
Seccomp: r.Seccomp.SockFprog(),
Ptrace: true,
SyncFunc: r.SyncFunc,
UnshareCgroupAfterSync: os.Getuid() == 0,
}
th := &tracerHandler{
ShowDetails: r.ShowDetails,
Unsafe: r.Unsafe,
Handler: r.Handler,
}
tracer := ptracer.Tracer{
Handler: th,
Runner: ch,
Limit: r.Limit,
}
return tracer.Trace(c)
}
|
package repo
import "fmt"
// Repo uniquely identifies a GitHub repository.
type Repo struct {
Owner string
Name string
}
func (r *Repo) String() string {
if r.Owner == "" && r.Name == "" {
return ""
}
return fmt.Sprintf("%v/%v", r.Owner, r.Name)
}
|
package cmd
import (
"github.com/spf13/cobra"
)
// Create the cancel command
var cmdCancel = &cobra.Command{
Use: "cancel WORKFLOW",
Short: "Cancel a workflow",
Long: `Cancel a Swif workflow.
Use "sw rm WORKFLOW" to delete a workflow.`,
Example: `1. sw cancel my-workflow
2. sw cancel ana`,
Run: runCancel,
}
func init() {
cmdSW.AddCommand(cmdCancel)
}
func runCancel(cmd *cobra.Command, args []string) {
if len(args) != 1 {
exitNoWorkflow(cmd)
}
run("swif", "cancel", "-workflow", args[0])
}
|
package car
import (
"github.com/shanghuiyang/rpi-devices/dev"
)
// Config ...
type Config struct {
Engine *dev.L298N
Servo *dev.SG90
GY25 *dev.GY25
Horn *dev.Buzzer
Led *dev.Led
Light *dev.Led
Camera *dev.Camera
GPS *dev.GPS
LC12S *dev.LC12S
Collisions []*dev.Collision
DistMeter dev.DistMeter
}
|
package main
func main() {
}
func smallerNumbersThanCurrent(nums []int) []int {
cnt := [101]int{}
for _, v := range nums {
cnt[v]++
}
for i := 0; i < 100; i++ {
cnt[i+1] += cnt[i]
}
ans := make([]int, len(nums))
for i, v := range nums {
if v > 0 {
ans[i] = cnt[v-1]
}
}
return ans
}
|
/*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bufio"
"errors"
"fmt"
"io"
"os"
"strings"
"github.com/moby/term"
"github.com/spf13/cobra"
"helm.sh/helm/v3/cmd/helm/require"
"helm.sh/helm/v3/pkg/action"
)
const registryLoginDesc = `
Authenticate to a remote registry.
`
type registryLoginOptions struct {
username string
password string
passwordFromStdinOpt bool
certFile string
keyFile string
caFile string
insecure bool
}
func newRegistryLoginCmd(cfg *action.Configuration, out io.Writer) *cobra.Command {
o := ®istryLoginOptions{}
cmd := &cobra.Command{
Use: "login [host]",
Short: "login to a registry",
Long: registryLoginDesc,
Args: require.MinimumNArgs(1),
ValidArgsFunction: noCompletions,
RunE: func(cmd *cobra.Command, args []string) error {
hostname := args[0]
username, password, err := getUsernamePassword(o.username, o.password, o.passwordFromStdinOpt)
if err != nil {
return err
}
return action.NewRegistryLogin(cfg).Run(out, hostname, username, password,
action.WithCertFile(o.certFile),
action.WithKeyFile(o.keyFile),
action.WithCAFile(o.caFile),
action.WithInsecure(o.insecure))
},
}
f := cmd.Flags()
f.StringVarP(&o.username, "username", "u", "", "registry username")
f.StringVarP(&o.password, "password", "p", "", "registry password or identity token")
f.BoolVarP(&o.passwordFromStdinOpt, "password-stdin", "", false, "read password or identity token from stdin")
f.BoolVarP(&o.insecure, "insecure", "", false, "allow connections to TLS registry without certs")
f.StringVar(&o.certFile, "cert-file", "", "identify registry client using this SSL certificate file")
f.StringVar(&o.keyFile, "key-file", "", "identify registry client using this SSL key file")
f.StringVar(&o.caFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle")
return cmd
}
// Adapted from https://github.com/oras-project/oras
func getUsernamePassword(usernameOpt string, passwordOpt string, passwordFromStdinOpt bool) (string, string, error) {
var err error
username := usernameOpt
password := passwordOpt
if passwordFromStdinOpt {
passwordFromStdin, err := io.ReadAll(os.Stdin)
if err != nil {
return "", "", err
}
password = strings.TrimSuffix(string(passwordFromStdin), "\n")
password = strings.TrimSuffix(password, "\r")
} else if password == "" {
if username == "" {
username, err = readLine("Username: ", false)
if err != nil {
return "", "", err
}
username = strings.TrimSpace(username)
}
if username == "" {
password, err = readLine("Token: ", true)
if err != nil {
return "", "", err
} else if password == "" {
return "", "", errors.New("token required")
}
} else {
password, err = readLine("Password: ", true)
if err != nil {
return "", "", err
} else if password == "" {
return "", "", errors.New("password required")
}
}
} else {
warning("Using --password via the CLI is insecure. Use --password-stdin.")
}
return username, password, nil
}
// Copied/adapted from https://github.com/oras-project/oras
func readLine(prompt string, silent bool) (string, error) {
fmt.Print(prompt)
if silent {
fd := os.Stdin.Fd()
state, err := term.SaveState(fd)
if err != nil {
return "", err
}
term.DisableEcho(fd, state)
defer term.RestoreTerminal(fd, state)
}
reader := bufio.NewReader(os.Stdin)
line, _, err := reader.ReadLine()
if err != nil {
return "", err
}
if silent {
fmt.Println()
}
return string(line), nil
}
|
package proxy
import (
"fmt"
"net"
"net/http"
"net/url"
"time"
)
type Proxy struct {
Scheme string
IP string
Port string
ConnTime time.Duration
}
func (p *Proxy) Test(client *http.Client, URL string, check func(resp *http.Response) error) error {
transport, err := p.Transport(client.Timeout)
if err != nil {
return err
}
client.Transport = transport
before := time.Now()
resp, err := client.Get(URL)
connTime := time.Now().Sub(before)
if err != nil {
return err
}
p.ConnTime = connTime
if check == nil {
check = DefaultCheck
}
if err = check(resp); err != nil {
return err
}
return nil
}
func (p *Proxy) Transport(timeout time.Duration) (*http.Transport, error) {
URL, err := url.Parse(p.String())
if err != nil {
return nil, fmt.Errorf("can't parse proxy url %s,%v", p.String(), err)
}
return &http.Transport{
Proxy: http.ProxyURL(URL),
Dial: (&net.Dialer{
Timeout: timeout,
}).Dial,
TLSHandshakeTimeout: timeout,
}, nil
}
func (p Proxy) String() string {
return fmt.Sprintf("%s://%s:%s", p.Scheme, p.IP, p.Port)
}
|
package main
import (
"fmt"
"net/http"
"os"
)
func Handler_header(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "write header! ")
fmt.Println(r.Header.Get("Accept-Language"))
w.Header().Set("Accept-Language", r.Header.Get("Accept-Language"))
fmt.Printf("ClientIP: %s \n", r.RemoteAddr)
fmt.Printf("VERSION: %s \n", os.Getenv("VERSION"))
}
func Handler_healthz(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "healthz")
//task1
fmt.Printf("########################HTTP Req Header##########################")
for headerKey, headerValues := range r.Header {
fmt.Println(headerKey)
fmt.Println(headerValues)
for _, value := range headerValues {
w.Header().Add(headerKey, value)
}
}
//task2
fmt.Printf("########################VERSION##########################\n")
w.Header().Add("VERSION", os.Getenv("VERSION"))
fmt.Printf("VERSION: %s \n", os.Getenv("VERSION"))
//task3
fmt.Printf("########################ClientIP##########################\n")
fmt.Printf("ClientIP: %s \n", r.RemoteAddr)
//task 4
w.WriteHeader(200)
fmt.Printf("HTTP return:%d \n", 200)
fmt.Printf("*************************HTTP Res Header***********************************\n")
for headerKey, headerValues := range w.Header().Clone() {
fmt.Println(headerKey)
fmt.Println(headerValues)
}
}
func main() {
http.HandleFunc("/", Handler_header)
http.HandleFunc("/healthz", Handler_healthz)
http.ListenAndServe("0.0.0.0:80", nil)
//fmt.Printf("Web end! \n")
}
|
package backends
import (
//"fmt"
"io"
//"github.com/lioneagle/abnf/src/basic"
//"github.com/lioneagle/goutil/src/chars"
"github.com/lioneagle/goutil/src/code_gen/backends"
//"github.com/lioneagle/goutil/src/code_gen/model"
)
type CGeneratorH struct {
CGeneratorBase
}
func NewCGeneratorH(w io.Writer, config backends.CConfig) *CGeneratorH {
gen := &CGeneratorH{}
gen.Init(w, config)
return gen
}
func (this *CGeneratorH) Init(w io.Writer, config backends.CConfig) {
this.CGeneratorBase.Init(w, config)
}
|
package snow
import (
"fmt"
"github.com/HuiOnePos/flysnow/models"
"github.com/HuiOnePos/flysnow/utils"
"gopkg.in/mgo.v2/bson"
)
type ClearReq struct {
TagTerms map[string][]string `json:"tag_terms" `
Query bson.M `json:"query"`
STime int64 `json:"s_time"`
ETime int64 `json:"e_time"`
}
type clearList struct {
Tag, Term string
RdsKey string
MongoQuery bson.M
}
func Clear(body []byte) (error, int) {
req := ClearReq{}
list := []clearList{}
err := utils.JsonDecode(body, &req)
if err != nil {
return err, models.ErrData
}
// 解析需要清理的统计项
var find bool
var rdskey string
var query bson.M
for tag, terms := range req.TagTerms {
for _, term := range terms {
if termconfig, ok := models.TermConfigMap[tag][term]; ok {
rdskey = fmt.Sprintf("%s_%s_*", utils.RDSPrefix, tag)
query = bson.M{}
for key, value := range req.Query {
find = false
for _, k := range termconfig.Key {
if "@"+key == k {
find = true
rdskey += fmt.Sprintf("@%s_%s", key, value)
query["index."+key] = value
}
}
if !find {
return models.ErrNew(fmt.Sprintf("%s-%s key:%s not found", tag, term, key)), models.ErrClear
}
}
rdskey += "*"
if req.STime > 0 {
query["s_time"] = bson.M{"$gte": req.STime}
}
if req.ETime > 0 {
if req.ETime <= req.STime {
return models.ErrNew(fmt.Sprintf("start >= etime", req.STime, req.ETime)), models.ErrClear
}
query["e_time"] = bson.M{"$lte": req.ETime}
}
list = append(list, clearList{tag, term, rdskey, query})
} else {
return models.ErrNew(fmt.Sprintf("%s-%s not found", tag, term)), models.ErrClear
}
}
}
var key string
for _, clear := range list {
session := utils.MgoSessionDupl()
// clear redis
rdsconn := utils.NewRedisConn()
keys, _ := rdsconn.Dos("KEYS", clear.RdsKey)
for _, k := range keys.([]interface{}) {
key = string(k.([]byte))
rdsconn.Dos("DEL", key)
}
rdsconn.Close()
// clear mongo
session.DB(utils.MongoPrefix + clear.Tag).C(utils.MongoIndex + clear.Term).RemoveAll(clear.MongoQuery)
session.DB(utils.MongoPrefix + clear.Tag).C(utils.MongoOBJ + clear.Term).RemoveAll(clear.MongoQuery)
session.Close()
}
return nil, 0
}
|
package log
import (
"github.com/MuratSs/assert"
"testing"
)
func TestLevel_String(t *testing.T) {
var actual string
var assert = assert.With(t)
actual = DEBUG.String()
assert.That(actual).IsEqualTo("DEBUG")
actual = INFO.String()
assert.That(actual).IsEqualTo("INFO")
actual = WARN.String()
assert.That(actual).IsEqualTo("WARN")
actual = ERROR.String()
assert.That(actual).IsEqualTo("ERROR")
assert.ThatPanics(func() { Level(-1).String() })
}
func TestParse(t *testing.T) {
var actual Level
var assert = assert.With(t)
actual = Parse("DEBUG")
assert.That(actual).IsEqualTo(DEBUG)
actual = Parse("INFO")
assert.That(actual).IsEqualTo(INFO)
actual = Parse("WARN")
assert.That(actual).IsEqualTo(WARN)
actual = Parse("ERROR")
assert.That(actual).IsEqualTo(ERROR)
assert.ThatPanics(func() { Parse("foobar") })
}
|
package main
import (
"fmt"
"time"
)
func main() {
fmt.Println("==START")
c := make(chan bool)
people := [2]string{"A","B"}
for _, person := range people {
go print(person, c)
}
time.Sleep(time.Second * 5)
result := <-c
fmt.Println(result)
fmt.Println("==END")
}
func print(person string, c chan bool) {
time.Sleep(time.Second * 5)
c <- true
}
|
package pkg
import (
"net/http"
"regexp"
"strings"
"sync"
"github.com/davecgh/go-spew/spew"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const keyAuthDefaultHTTPBasicUser = "gte"
const keyAuthApiKeyQuery = "__gteApiKey"
const keyArgsHeadersKey = "__gteHeaders"
func MountRoutes(engine *gin.Engine, config *Config) {
storageCache := new(sync.Map)
for route, listenerConfig := range config.Listeners {
log := logrus.WithField("listener", route)
listener := compileListener(&config.Defaults, listenerConfig, route, false, storageCache)
handler := getGinListenerHandler(listener)
if len(listener.config.Methods) == 0 {
engine.GET(route, handler)
engine.POST(route, handler)
} else {
for _, method := range listener.config.Methods {
engine.Handle(method, route, handler)
}
}
if logrus.IsLevelEnabled(logrus.DebugLevel) {
log.WithFields(logrus.Fields{
"config": spew.Sdump(listener.config),
}).Debug("added listener")
} else {
log.Info("added listener")
}
}
}
type ListenerResponse struct {
*ExecCommandResult
Storage *StorageEntry `json:"storage,omitempty"`
Error *string `json:"error,omitempty"`
ErrorHandlerResult *ListenerResponse `json:"errorHandlerResult,omitempty"`
}
var regexListenerRouteCleaner = regexp.MustCompile(`[\W]`)
func getGinListenerHandler(listener *CompiledListener) gin.HandlerFunc {
return func(c *gin.Context) {
if err := verifyAuth(c, listener); err != nil {
c.AbortWithError(http.StatusUnauthorized, err)
return
}
// Keep track of what to store
toStore := make(map[string]interface{})
args := make(map[string]interface{})
// Use route params, if any
for _, param := range c.Params {
args[param.Key] = param.Value
}
// Add headers to args
{
headerMap := make(map[string]interface{})
for k := range c.Request.Header {
headerMap[strings.ToLower(k)] = c.GetHeader(k)
}
args[keyArgsHeadersKey] = headerMap
}
if c.Request.Method != http.MethodGet {
b := binding.Default(c.Request.Method, c.ContentType())
if b == binding.Form || b == binding.FormMultipart {
queryMap := make(map[string][]string)
if err := c.ShouldBindWith(&queryMap, b); err != nil {
c.AbortWithError(http.StatusBadRequest, errors.WithMessage(err, "failed to parse request form body"))
return
}
for key, vals := range queryMap {
if len(vals) > 0 {
args[key] = vals[len(vals)-1]
} else {
args[key] = true
}
args["_form_"+key] = vals
}
} else {
if err := c.ShouldBindWith(&args, b); err != nil {
c.AbortWithError(http.StatusBadRequest, errors.WithMessage(err, "failed to parse request body"))
return
}
}
}
// Always bind query
{
queryMap := make(map[string][]string)
if err := c.ShouldBindQuery(&queryMap); err != nil {
c.AbortWithError(http.StatusBadRequest, errors.WithMessage(err, "failed to parse request query"))
return
}
for key, vals := range queryMap {
if len(vals) > 0 {
args[key] = vals[len(vals)-1]
} else {
args[key] = true
}
args["_query_"+key] = vals
}
}
if listener.storager != nil && listener.config.Storage.StoreArgs() {
toStore["args"] = args
}
out, err := listener.ExecCommand(args, toStore)
if err != nil {
err := errors.WithMessagef(err, "failed to execute listener %s", listener.route)
response := &ListenerResponse{
ExecCommandResult: out,
Error: stringPtr(err.Error()),
}
var errorHandlerResult *ListenerResponse
if listener.errorHandler != nil {
errorHandler := listener.errorHandler
errorHandlerResult = &ListenerResponse{}
toStoreOnError := make(map[string]interface{})
// Trigger a command on error
onErrorArgs := map[string]interface{}{
"route": listener.route,
"error": err.Error(),
"output": out,
"args": args,
}
if errorHandler.storager != nil && errorHandler.config.Storage.StoreArgs() {
toStoreOnError["args"] = args
}
errorHandlerExecCommandResult, err := errorHandler.ExecCommand(onErrorArgs, toStoreOnError)
errorHandlerResult.ExecCommandResult = errorHandlerExecCommandResult
if err != nil {
errorHandlerResult.Error = stringPtr(err.Error())
errorHandler.log.WithError(err).Error("failed to execute error listener")
} else {
errorHandler.log.Info("executed error listener")
}
if errorHandler.storager != nil && len(toStoreOnError) > 0 {
if entry := storePayload(
errorHandler,
toStoreOnError,
); entry != nil {
if errorHandler.config.ReturnStorage() {
errorHandlerResult.Storage = entry
}
}
}
toStore["errorHandler"] = toStoreOnError
if listener.storager != nil && len(toStore) > 0 {
if entry := storePayload(
listener,
toStore,
); entry != nil {
if listener.config.ReturnStorage() {
response.Storage = entry
}
}
}
response.ErrorHandlerResult = errorHandlerResult
}
c.JSON(http.StatusInternalServerError, response)
return
}
response := &ListenerResponse{
ExecCommandResult: out,
}
if listener.storager != nil && len(toStore) > 0 {
if entry := storePayload(
listener,
toStore,
); entry != nil {
if listener.config.ReturnStorage() {
response.Storage = entry
}
}
}
c.JSON(http.StatusOK, response)
}
}
|
/*
* @lc app=leetcode.cn id=31 lang=golang
*
* [31] 下一个排列
*/
// @lc code=start
package main
import "fmt"
func main() {
var nums []int
nums = []int{1,2,3,4,5}
nums2 := make([]int, len(nums))
copy(nums2, nums)
nextPermutation(nums)
fmt.Printf("%v, %v\n", nums2, nums)
}
func nextPermutation(nums []int) {
nums[0] = 0
}
// @lc code=end
|
package stack
import "errors"
//Stack is a good type
type Stack []interface{}
//Push a value
func (stack *Stack) Push(val interface{}) error {
*stack = append(*stack, val)
return nil
}
//Pop a value
func (stack *Stack) Pop() (interface{}, error) {
thestack := *stack
if len(thestack) == 0 {
return nil, errors.New("nothing in this stack")
}
val := thestack[len(thestack)-1]
*stack = thestack[:len(thestack)-1]
return val, nil
}
//Len is good function
func (stack Stack) Len() int {
return len(stack)
}
//Top is good function
func (stack Stack) Top() (interface{}, error) {
if len(stack) == 0 {
return nil, errors.New("nothing in this stack")
}
return stack[len(stack)-1], nil
}
|
package ical
import (
"fmt"
"io"
"strings"
)
const timeFormat = "20060102T150405Z"
// NewEncoder ...
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w: w}
}
// Encode ...
func (ec *Encoder) Encode(cal VCalendar) {
fmt.Fprintln(ec.w, "BEGIN:VCALENDAR\nVERSION:2.0\nMETHOD:PUBLISH")
for _, e := range cal.Events {
fmt.Fprintf(ec.w, "BEGIN:VEVENT\nUID:%s@%s\nCLASS:PUBLIC\n", e.UID, cal.URL)
if e.Location != "" {
fmt.Fprintf(ec.w, "LOCATION:%s\n", strings.ReplaceAll(e.Location, "\n", `\n`))
}
if e.Summary != "" {
fmt.Fprintf(ec.w, "SUMMARY:%s\n", strings.ReplaceAll(e.Summary, "\n", `\n`))
}
if e.Description != "" {
fmt.Fprintf(ec.w, "DESCRIPTION:%s\n", strings.ReplaceAll(e.Description, "\n", `\n`))
}
if e.DTSTAMP != nil {
fmt.Fprintf(ec.w, "DTSTAMP:%s\n", e.DTSTAMP.Format(timeFormat))
}
if e.DTSTART != nil {
fmt.Fprintf(ec.w, "DTSTART:%s\n", e.DTSTART.Format(timeFormat))
}
if e.DTEND != nil {
fmt.Fprintf(ec.w, "DTEND:%s\n", e.DTEND.Format(timeFormat))
}
if e.CREATED != nil {
fmt.Fprintf(ec.w, "CREATED:%s\n", e.CREATED.Format(timeFormat))
}
if e.URL != nil {
fmt.Fprintf(ec.w, "URL:%s\n", e.URL.String())
}
fmt.Fprintln(ec.w, "END:VEVENT")
}
fmt.Fprintln(ec.w, "END:VCALENDAR")
}
|
package Problem0389
func findTheDifference(s string, t string) byte {
rec := make([]int, 26)
for i := range s {
rec[s[i]-'a']--
rec[t[i]-'a']++
}
rec[t[len(t)-1]-'a']++
var i int
for i = 0; i < 26; i++ {
if rec[i] == 1 {
break
}
}
return byte('a' + i)
}
|
package slice
func rotate(x []int, r int) {
// If r is negative means left rotating
// Then rotate on the right by len(x) + r
if r < 0 {
r = len(x) + (r % len(x))
}
y := make([]int, len(x))
copy(y, x)
for i := range y {
x[(i+r)%len(x)] = y[i]
}
}
|
package log
import (
"bytes"
"crypto/tls"
"fmt"
"io"
"net"
"os"
"path/filepath"
"time"
raftboltdb "github.com/hashicorp/raft-boltdb"
api "github.com/alexeyqian/proglog/api/v1"
)
var (
_ raft.FSM = (*fsm)(nil)
)
type fsm struct {
log *Log
}
type DistributedLog struct {
config Config
log *Log
raft *raft.Raft
}
func NewDistributedLog(dataDir string, config Config)(*DistributedLog, error){
l := DistributedLog{
config: config,
}
if err := l.setupLog(dataDir); err != nil{
return nil, err
}
if err := l.setupRaft(dataDir); err != nil {
return nil, err
}
return &l, nil
}
func (l *DistributedLog) setupLog(dataDir string) error {
logDir := filepath.Join(dataDir, "log")
if err := os.MkdirAll(logDir, 0755); err != nil {
return err
}
var err error
l.log, err = NewLog(logDir, l.config)
return err
}
func (l *DistributedLog) setupRaft(dataDir string) error {
// finite state machine
fsm := &fsm{log: l.log}
logDir := filepath.Join(dataDir, "raft", "log")
if err := os.MkdirAll(logDir, 0755); err != nil {
return err
}
logConfig := l.config
logConfig.Segment.InitialOffset = 1
logStore, err := newLogStore(logDir, logConfig)
if err != nil {
return err
}
// stable store is a key-value store where Raft stores important metadata.
// like server's current term or the candidate the server voted for.
// Bolt is an embeded and persisted key-value database for Go
stableStore, err := raftboltdb.NewBoltStore(
filepath.join(dataDir, "raft", "stable"),
)
if err != nil {
return err
}
retain := 1
snapshotStore, err := raft.NewFileSnapshotStore(
filepath.Join(dataDir, "raft"),
retain,
o.Stderr,
)
if err != nil {
return err
}
maxPool := 5
timeout := 10 * time.Second
transport := raft.NewNetworkTransport(
l.config.Raft.StreamLayer,
maxPool,
timeout,
os.Stderr,
)
// localID is the unique ID for this server
config := raft.DefaultConfig()
config.LocalID = l.config.Raft.LocalID
if l.config.Raft.HeartbeatTimeout != 0 {
config.HeartbeatTimeout = l.config.Raft.HeartbeatTimeout
}
if l.config.Raft.ElectionTimeout != 0 {
config.ElectionTimeout = l.config.Raft.ElectionTimeout
}
if l.config.Raft.LeaderLeaseTimeout != 0 {
config.LeaderLeaseTimeout = l.config.Raft.LeaderLeaseTimeout
}
if l.config.Raft.CommitTimeout != 0 {
config.CommitTimeout = l.config.Raft.CommitTimeout
}
// create raft instance and bootstrap the cluster
l.raft, err = raft.NewRaft(
config,
fsm,
logStore,
stableStore,
snapshotStore,
transport,
)
if err != nil {
return err
}
hasState, err := raft.HasExistingState(logStore, stableStore, snapshotStore)
if err != nil{
return err
}
if l.config.Raft.Bootstrap && !hasState{
config := raft.Configuration{
Servers: []raft.Server{{ID: config.LocalID, Address: transport.LocalAddr()}}
}
err = l.raft.BootstrapCluster(config).Error()
}
return err
}
// raft replicate the command to majority of the raft servers
// and ultimately append the record to a majority of Raft servers.
func (l *DistributedLog) Append(record *api.Record) (uint64, error) {
res, err := l.apply(AppendRequestType, &api.ProduceRequest{Record: record})
if err != nil {
return 0, err
}
return res.(*api.ProduceResponse).Offset, nil
}
func (l *DistributedLog) apply(reqType RequestType, req proto.Message) (interface{}, error){
var buf bytes.Buffer
_, err := buf.Write([]byte{byte(reqType)})
if err != nil {
return nil, err
}
b, err := proto.Marshal(req)
if err != nil{
return nil, err
}
_, err = buf.Write(b)
if err != nil {
return nil, err
}
timeout := 10 * time.Second
future := l.raft.Apply(buf.Bytes(), timeout)
if future.Error() != nil{
return nil, future.Error()
}
res := future.Response()
// check if the res is an error
if err, ok := res.(error): ok{
return nil, err
}
return res, nil
}
func (l *DistributedLog) Read(offset uint64)(*api.Record, error){
// using relaxed consistency instead of strong consistency here.
return l.log.Read(offset)
}
// our distributed log will act as our Serf membership's handler
// so we need to implement Join and Leave method to update Raft
func (l *DistributedLog) Join(id, addr string) error{
configFuture := l.raft.GetConfiguration()
if err := configFuture.Error(); err != nil{
return err
}
serverID := raft.ServerID(id)
serverAddr := raft.ServerAddress(addr)
// duplication check
for _, srv := range configFuture.Configuration().Servers{
if srv.ID == serverID || srv.Address == serverAddr{
if srv.ID == serverID && srv.Address == serverAddr{
return nil // server has already joined
}
// remove the existing server
removeFuture := l.raft.RemoveServer(serverID, 0, 0)
if err := removeFuture.Error(); err != nil{
return err
}
}
}
addFuture := l.raft.addVoter(serverID, serverAddr, 0, 0)
if err := addFuture.Error(); err != nil{
return err
}
return nil
}
// removing the leader will trigger a new election
func (l *DistributedLog) Leave(id string) error {
removeFuture := l.raft.RemoveServer(raft.ServerID(id), 0, 0)
return removeFuture.Error()
}
// blocks until the cluster has elected a leader or times out
// most operations must run on the leader.
func (l *DistributedLog) WaitForLeader(timeout time.Duration) error{
timeoutc := time.After(timeout)
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for{
select {
case <-timeoutc:
return fmt.Errorf("time out")
case <-ticker.C:
if l := l.raft.Leader(); l != ""{
return nil
}
}
}
}
func (l *DistributedLog) Close() error {
f := l.raft.Shutdown()
if err := f.Error(); err != nil{
return err
}
return l.log.Close()
}
type RequestType uint8
const (
AppendRequestType RequestType = 0
)
func (l *fsm)Apply(record *raft.Log) interface{} {
buf := record.Data
reqType := RequestType(buf[0])
switch reqType{
case AppendRequestType:
return l.applyAppend(buf[1:])
}
return nil
}
func (l *fsm) applyAppend(b []byte) interface{}{
var req api.ProduceRequest
err := proto.Unmarshal(b, &req)
if err != nil{
return err
}
offset, err := l.log.Append(req.Reocrd)
if err != nil {
return err
}
return &api.ProduceResponse{Offset: offset}
}
// raft calls Snapshot according to configured Snapshortinterval
// and SnapshotThreshold (how many logs since the the last snapshot before making a new snapshot)
func (f *fsm) Snapshot() (raft.FSMSnapshot, error){
r :=f.log.Reader()
// return a reader that read all log's data
return &snapshot{reader: r}, nil
}
var _ raft.FSMSnapshot = (*snapshot)(nil)
type snapshot struct {
reader io.Reader
}
func (s *snapshot) Persist(sink raft.SnapshotSink) error {
if _, err := io.Copy(sink, s.reader); err != nil{
_ = sink.Cancel()
return err
}
return sink.Close()
}
func (s *snapshot) Release(){}
// restore an fsm from a snapshot
func (f *fsm) Restore(r io.ReadCloser) error {
b := make([]byte, lenWidth)
var buf bytes.Buffer
for i := 0; ; i++{
_, err := io.ReadFull(r, b)
if err == io.EOF{
break
}else if err != nil{
return err
}
size := int64(enc.Uint64(b))
if _, err = io.CopyN(&buf, r, size); err != nil{
return err
}
record := &api.Record{}
if err = proto.Unmarshal(buf.Bytes(), record); err != nil{
return err
}
if i == 0 {
f.log.Config.Segment.InitialOffset = record.Offset
if err := f.log.Reset(); err != nil{
return err
}
}
if _, err = f.log.Append(record); err != nil{
return err
}
buf.Reset()
}
return nil
}
var _ raft.LogStore = (*logStore)(nil)
type logStore struct {
*Log
}
func newLogStore(dir string, c Config) (*logStore, error){
log, err := NewLog(dir, c)
if err != nil{
return nil, err
}
return &logStore{log}, nil
}
func (l *logStore) FirstIndex() (uint64, error){
return l.LowestOffset()
}
func (l *logStore) LastIndex()(uint64, error){
return l.HighestOffset()
}
func (l *logStore) GetLog(index uint64, out *raft.Log) error {
in, err := l.Read(index)
if err != nil{
return err
}
out.Data = in.Value
out.index = in.Offset
out.Type = raft.LogType(in.Type)
out.Term = in.Term
return nil
}
func (l *logStore) StoreLog(record *raft.Log) error{
return l.StoreLogs([]*raft.Log{record})
}
func (l *logStore) StoreLogs(records []*raft.Log) error{
for _, record := range records {
if _, err := l.Append(&api.Record{
Value: record.Data
Term: record.Term
Type: uint32(record.Type)
}); err != nil{
return err
}
}
return nil
}
func (l *logStore) DeleteRange(min, max uint64) error {
return l.Trancate(max)
}
var _ raft.StreamLayer = (*StreamLayer)(nil)
type StreamLayer struct{
ln net.Listener
// accept incomming connections
serverTLSConfig *tls.Config
// create outgoing connections
peerTLSConfig *tls.Config
}
func NewStreamLayer(
ln net.Listener,
serverTLSConfig,
peerTLSConfig *tls.Config,
) *StreamLayer{
return &StreamLayer{
ln: ln,
serverTLSConfig: serverTLSConfig,
peerTLSConfig: peerTLSConfig,
}
}
const RaftRPC = 1
func (s *StreamLayer) Dial(
addr raft.ServerAddress,
timeout time.Duration,
) (net.Conn, error){
dialer := &net.Dialer{Timeout: timeout}
var conn, err = dialer.Dial("tcp", string(addr))
if err != nil {
return nil, err
}
// identify to mux this is a raft rpc
// to multiplexing raft on the same port
// as our Log gRPC requests.
_, err = conn.Write([]byte{byte(RaftRPC)})
if err != nil{
return nil, err
}
if s.peerTLSConfig != nil {
conn = tls.Client(conn, s.peerTLSConfig)
}
return conn, err
}
// Accept is a mirror of Dial
// We accept the incoming connection and read the byte that identifies the connection.
func (s *StreamLayer) Accept() (net.Conn, error){
conn, err := s.ln.Accept()
if err != nil{
return nil, err
}
b :=make([]byte, 1)
_, err = conn.Read(b)
if err != nil {
return nil, err
}
if bytes.Compare([]byte{byte(RaftRPC)}, b) != 0{
return nil, fmt.Errorf("not a raft rpc")
}
if s.serverTLSConfig != nil {
return tls.Server(conn, s.serverTLSConfig), nil
}
return conn, nil
}
func (s *StreamLayer) Close() error{
return s.ln.Close()
}
func (s *StreamLayer) Addr() net.Addr{
return s.ln.Addr()
}
|
package viewmodel
// Signup struct
type Signup struct {
Title string
Active string
Email string
Password string
PasswordConfirmation string
FirstName string
LastName string
Alert string
AlertMessage string
AlertDanger string
AlertSuccess string
}
// NewSignup function
func NewSignup() Signup {
return Signup{Title: "Lemonade Stand Supply", Active: "home", Alert: "invisible"}
}
|
package dshelp
import (
"testing"
cid "gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid"
)
func TestKey(t *testing.T) {
c, _ := cid.Decode("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq")
dsKey := CidToDsKey(c)
c2, err := DsKeyToCid(dsKey)
if err != nil {
t.Fatal(err)
}
if c.String() != c2.String() {
t.Fatal("should have parsed the same key")
}
}
|
package redis
import (
"fmt"
"testing"
"time"
"github.com/garyburd/redigo/redis"
)
//var srv *disposable_redis.Server
func TestBatch(t *testing.T) {
//t.SkipNow()
conn, e := redis.Dial("tcp", srv.Addr())
if e != nil {
t.Fatal("Could not connect to server:", e)
}
var b *Batch = NewBatch(conn)
val := fmt.Sprintf("Whatever %v", time.Now().UnixNano())
p1, e := b.Send("SET", "foo", val)
if e != nil {
t.Fatal("Could not send batched command: ", e)
}
if p1 == nil {
t.Fatal("Got a nil promise")
}
p2, e := b.Send("GET", "foo")
if e != nil {
t.Fatal("Could not send batched command: ", e)
}
if p2 == nil {
t.Fatal("Got a nil promise")
}
results, e := b.Execute()
if e != nil {
t.Fatal("Error executing batch: ", e)
}
if len(results) != 2 {
t.Fatal("Expected 2 results, got ", len(results))
}
if len(b.promises) != 0 {
t.Fatal("Did not reset batch correctly")
}
if s, _ := redis.String(p1.Reply()); s != "OK" {
t.Fatal("Invalid reply: ", s)
}
if s, _ := redis.String(p2.Reply()); s != val {
t.Fatal("Invalid reply: ", s)
}
}
func TestTransaction(t *testing.T) {
//t.SkipNow()
conn, e := redis.Dial("tcp", srv.Addr())
if e != nil {
t.Fatal("Could not connect to server:", e)
}
var b *Transaction
b = NewTransaction(conn)
val := fmt.Sprintf("Whatever %v", time.Now().UnixNano())
p1, e := b.Send("SET", "foo", val)
if e != nil {
t.Fatal("Could not send batched command: ", e)
}
if p1 == nil {
t.Fatal("Got a nil promise")
}
p2, e := b.Send("GET", "foo")
if e != nil {
t.Fatal("Could not send batched command: ", e)
}
if p2 == nil {
t.Fatal("Got a nil promise")
}
results, e := b.Execute()
if e != nil {
t.Fatal("Error executing batch: ", e)
}
if len(results) != 2 {
t.Fatal("Expected 2 results, got ", len(results))
}
if len(b.promises) != 0 {
t.Fatal("Did not reset batch correctly")
}
if s, _ := redis.String(p1.Reply()); s != "OK" {
t.Fatal("Invalid reply: ", s)
}
if s, _ := redis.String(p2.Reply()); s != val {
t.Fatal("Invalid reply: ", s)
}
// now we check that aborting really does nothing
p3, e := b.Send("SET", "foo", "not val")
if e != nil {
t.Fatal("Could not send batched command: ", e)
}
if e = b.Abort(); e != nil {
t.Fatal("Could not abort transaction: ", e)
}
if p3.Value != nil {
t.Fatal("Promise of aborted transaction has value")
}
conn, _ = redis.Dial("tcp", srv.Addr())
b = NewTransaction(conn)
p3, e = b.Send("GET", "foo")
if e != nil {
t.Fatal("Could not get value:", e)
}
if _, e = b.Execute(); e != nil {
t.Error("Could not execute transaction", e)
}
if s, _ := redis.String(p3.Reply()); s != val {
t.Fatal("Aborting transaction changed value to", s)
}
}
func ExampleBatch() {
return
conn, e := redis.Dial("tcp", srv.Addr())
if e != nil {
panic(e)
}
// create a new batch from the connection
b := NewBatch(conn)
// send a SET command
if _, e := b.Send("SET", "foo", "BAR"); e != nil {
panic(e)
}
// Send a GET command and keep a promise that will contain its value after execution
promise, e := b.Send("GET", "foo")
if e != nil {
panic(e)
}
// execute also returns the promises, but we're not interested in this right now
if _, e := b.Execute(); e != nil {
panic(e)
}
s, _ := redis.String(promise.Reply())
fmt.Println(s)
// Outpux: BAR
}
//func TestMain(m *testing.M) {
// srv, _ = disposable_redis.NewServerRandomPort()
// rc := m.Run()
// srv.Stop()
// os.Exit(rc)
//}
|
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestMemoryStorage(t *testing.T) {
ms := newMemoryStorage()
imageRanks := ms.GetImageRanks()
assert.Equal(t, len(imageRanks), 0, "New MemoryStorage is empty.")
ms.Meme("me", "http://foo.bar/z.gif")
imageRanks = ms.GetImageRanks()
assert.Equal(t, len(imageRanks), 1, "Meme adds an image to the list.")
ms.Unmeme("me", "http://foo.bar/z.gif")
imageRanks = ms.GetImageRanks()
assert.Equal(t, len(imageRanks), 1, "Unmeme removes an image from the list.")
/* don't crash */
ms.Unmeme("me-bad", "http://foo.bar/z.gif")
ms.Unmeme("me-bad", "http://foo.bar/z2.gif")
}
func TestMemeFactor(t *testing.T) {
assert.True(t, calculateMemeFactor(2, 1134028003) > calculateMemeFactor(1, 1134028003), "math")
assert.True(t, calculateMemeFactor(3, 1134028003) > calculateMemeFactor(1, 1134028003), "math")
assert.True(t, calculateMemeFactor(300, 1134028003) < calculateMemeFactor(1, 1144028003), "math")
}
|
// Package cmd contains definitions for executable commands and is responsible
// for the validation of flags and arguments.
package cmd
import (
"github.com/urfave/cli"
"github.com/davidsbond/mona/internal/command"
"github.com/davidsbond/mona/internal/config"
)
// The ActionFunc type is a method that takes a CLI context and the
// current project as an argument and returns a single error.
type ActionFunc func(ctx *cli.Context, cfg command.Config) error
func withProject(fn ActionFunc) cli.ActionFunc {
return func(ctx *cli.Context) error {
wd := ctx.GlobalString("wd")
root, err := config.GetProjectRoot(wd)
if err != nil {
return err
}
project, err := config.LoadProject(root)
if err != nil {
return err
}
return fn(ctx, command.Config{
Project: project,
FailFast: ctx.GlobalBool("fail-fast"),
})
}
}
|
package utils
import (
"context"
"github.com/mojocn/base64Captcha"
"time"
"github.com/go-redis/redis/v8"
)
type CaptchaConfig struct {
KeyPrefix string
Expire time.Duration
}
type Captcha struct {
redis *redis.Client
keyPrefix string
expire time.Duration
}
func NewCaptcha(redisClient *redis.Client, config CaptchaConfig) *Captcha {
return &Captcha{
redis: redisClient,
keyPrefix: config.KeyPrefix,
expire: config.Expire,
}
}
// 创建图片验证码并将值存入redis中
func (r *Captcha) GenerateCaptcha() (id string, b64s string, err error) {
driver := &base64Captcha.DriverDigit{
Height: 80,
Width: 240,
Length: 4,
MaxSkew: 0.7,
DotCount: 80,
}
store := base64Captcha.DefaultMemStore
c := base64Captcha.NewCaptcha(driver, store)
id, content, answer := c.Driver.GenerateIdQuestionAnswer()
item, err := c.Driver.DrawCaptcha(content)
if err != nil {
return "", "", err
}
// 存入到redis中
_, setErr := r.redis.Set(context.Background(), r.keyPrefix+id, answer, r.expire).Result()
if setErr != nil {
return "", "", setErr
}
b64s = item.EncodeB64string()
return id, b64s, err
}
// 校验验证码,成功后会删除redis中的验证码
func (r *Captcha) VerifyCaptcha(id, answer string) (bool, error) {
key := r.keyPrefix + id
val, err := r.redis.Get(context.Background(), key).Result()
if err != nil {
return false, err
}
if val == answer {
r.redis.Del(context.Background(), key)
return true, nil
}
return false, nil
}
|
package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/http/httptest"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"gopkg.in/redis.v3"
)
// TestProxy ...
func TestProxy(t *testing.T) {
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
client := redis.NewClient(&redis.Options{
Addr: "127.0.0.1:6379",
Password: "",
DB: 2,
})
if err := client.Ping().Err(); err != nil {
log.Panic(err)
}
testKey := fmt.Sprintf("%v", rnd.Intn(10000))
backend := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
time.Sleep(time.Millisecond * time.Duration(1000+rnd.Intn(1000)))
writer.WriteHeader(200)
writer.Write([]byte("response=" + request.URL.RequestURI()))
}))
proxy := NewProxy(client, &ProxyOptions{
BackendURL: backend.URL,
CookieName: "foo",
StatsInterval: time.Millisecond * 100,
RenderTimeout: time.Second * 2,
CacheDuration: time.Second * 4,
TopicName: "cachier-test-" + testKey,
ValuePrefix: "value-test-" + testKey,
LockPrefix: "lock-test-" + testKey,
})
server := httptest.NewServer(proxy)
defer proxy.Close()
defer server.Close()
defer backend.Close()
clients := 400
var wg sync.WaitGroup
wg.Add(clients)
for i := 0; i < clients; i++ {
url := fmt.Sprintf("/%v", rnd.Intn(clients>>4))
sleep := time.Millisecond * time.Duration(rnd.Intn(2000))
go func() {
defer wg.Done()
time.Sleep(sleep)
client := &http.Client{
Timeout: time.Second * 5,
}
request, err := http.NewRequest(http.MethodGet, server.URL+url, bytes.NewBuffer([]byte{}))
if err != nil {
log.Panic(err)
}
request.AddCookie(&http.Cookie{
Name: "foo2",
Value: "bar",
})
res, httpErr := client.Do(request)
if httpErr == nil {
body, readErr := ioutil.ReadAll(res.Body)
if readErr == nil {
assert.Equal(t, "response="+url, string(body), "response")
} else {
assert.Nil(t, readErr, "read error")
}
} else {
assert.Nil(t, httpErr, "http client error")
}
}()
}
wg.Wait()
assert.Empty(t, proxy.GetStats(), "no waiting handlers")
}
|
package service
import (
"context"
"culture/cloud/base/server/rpc/proto"
"errors"
"log"
"time"
)
type DemoService struct {}
func (s *DemoService) UserInfo(ctx context.Context, req *proto.Request) (*proto.Response, error) {
log.Println("DemoService UserInfo " + time.Now().Format("2006-01-02 15:04:05"))
if req.Uid > 0 {
return &proto.Response{
Id: req.Uid,
Username: "dds",
Nickname: "栖枝",
}, nil
} else {
return nil, errors.New("id 不能小于1")
}
}
|
package storage
import (
"sync"
"github.com/appootb/substratum/storage"
)
func Init() {
if storage.Implementor() == nil {
storage.RegisterImplementor(&Manager{})
}
}
type Manager struct {
sync.Map
}
func (m *Manager) New(component string) {
m.Store(component, &Storage{})
}
func (m *Manager) Get(component string) storage.Storage {
s, ok := m.Load(component)
if ok {
return s.(storage.Storage)
}
return nil
}
|
// Licensed to SolID under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. SolID licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package main
import (
"encoding/json"
"fmt"
"time"
"github.com/dchest/uniuri"
"github.com/square/go-jose/v3"
jwt "github.com/square/go-jose/v3/jwt"
)
var clientPrivateKey = []byte(`{
"kty": "EC",
"d": "Uwq56PhVB6STB8MvLQWcOsKQlZbBvWFQba8D6Uhb2qDunpzqvoNyFsnAHKS_AkQB",
"use": "sig",
"crv": "P-384",
"x": "m2NDaWfRRGlCkUa4FK949uLtMqitX1lYgi8UCIMtsuR60ux3d00XBlsC6j_YDOTe",
"y": "6vxuUq3V1aoWi4FQ_h9ZNwUsmcGP8Uuqq_YN5dhP0U8lchdmZJbLF9mPiimo_6p4",
"alg": "ES384"
}`)
type privateJWTClaims struct {
JTI string `json:"jti"`
Subject string `json:"sub"`
Issuer string `json:"iss"`
Audience string `json:"aud"`
Expires uint64 `json:"exp"`
IssuedAt uint64 `json:"iat"`
}
func generateAssertion(claims *privateJWTClaims) string {
var privateKey jose.JSONWebKey
// Decode JWK
err := json.Unmarshal(clientPrivateKey, &privateKey)
if err != nil {
panic(err)
}
// Prepare a signer
sig, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.ES384, Key: privateKey}, (&jose.SignerOptions{}).WithType("JWT"))
if err != nil {
panic(err)
}
raw, err := jwt.Signed(sig).Claims(claims).CompactSerialize()
if err != nil {
panic(err)
}
// Assertion
return raw
}
func main() {
fmt.Printf("%s\n", generateAssertion(&privateJWTClaims{
JTI: uniuri.NewLen(8),
Subject: "6779ef20e75817b79602",
Issuer: "6779ef20e75817b79602",
Audience: "http://127.0.0.1:8080",
Expires: uint64(time.Now().Add(2 * time.Hour).Unix()),
IssuedAt: uint64(time.Now().Unix()),
}))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.