text
stringlengths 11
4.05M
|
|---|
package main
import "fmt"
func main() {
for a := 1; a <= 1000; a++ {
for b := 1; a+b <= 1000; b++ {
if c := 1000 - a - b; a*a+b*b == c*c {
fmt.Println(a * b * c)
return
}
}
}
}
|
package internal_test
import (
"archive/tar"
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/paketo-buildpacks/packit/cargo/jam/internal"
"github.com/paketo-buildpacks/packit/scribe"
"github.com/sclevine/spec"
. "github.com/onsi/gomega"
)
func testTarBuilder(t *testing.T, context spec.G, it spec.S) {
var (
Expect = NewWithT(t).Expect
tempFile string
tempDir string
output *bytes.Buffer
builder internal.TarBuilder
)
it.Before(func() {
var err error
tempDir, err = os.MkdirTemp("", "output")
Expect(err).NotTo(HaveOccurred())
tempFile = filepath.Join(tempDir, "buildpack.tgz")
output = bytes.NewBuffer(nil)
builder = internal.NewTarBuilder(scribe.NewLogger(output))
})
it.After(func() {
Expect(os.RemoveAll(tempDir)).To(Succeed())
})
context("Build", func() {
context("given a destination and a list of files", func() {
it("constructs a tarball", func() {
err := builder.Build(tempFile, []internal.File{
{
Name: "buildpack.toml",
Info: internal.NewFileInfo("buildpack.toml", len("buildpack-toml-contents"), 0644, time.Now()),
ReadCloser: io.NopCloser(strings.NewReader("buildpack-toml-contents")),
},
{
Name: "bin/build",
Info: internal.NewFileInfo("build", len("build-contents"), 0755, time.Now()),
ReadCloser: io.NopCloser(strings.NewReader("build-contents")),
},
{
Name: "bin/detect",
Info: internal.NewFileInfo("detect", len("detect-contents"), 0755, time.Now()),
ReadCloser: io.NopCloser(strings.NewReader("detect-contents")),
},
{
Name: "bin/link",
Info: internal.NewFileInfo("link", len("./build"), os.ModeSymlink|0755, time.Now()),
Link: "./build",
},
})
Expect(err).NotTo(HaveOccurred())
Expect(output.String()).To(ContainSubstring(fmt.Sprintf("Building tarball: %s", tempFile)))
Expect(output.String()).To(ContainSubstring("bin/build"))
Expect(output.String()).To(ContainSubstring("bin/detect"))
Expect(output.String()).To(ContainSubstring("bin/link"))
Expect(output.String()).To(ContainSubstring("buildpack.toml"))
file, err := os.Open(tempFile)
Expect(err).NotTo(HaveOccurred())
contents, hdr, err := ExtractFile(file, "buildpack.toml")
Expect(err).NotTo(HaveOccurred())
Expect(string(contents)).To(Equal("buildpack-toml-contents"))
Expect(hdr.Mode).To(Equal(int64(0644)))
contents, hdr, err = ExtractFile(file, "bin")
Expect(err).NotTo(HaveOccurred())
Expect(string(contents)).To(BeEmpty())
Expect(hdr.Mode).To(Equal(int64(0777)))
Expect(hdr.Typeflag).To(Equal(uint8(tar.TypeDir)))
contents, hdr, err = ExtractFile(file, "bin/build")
Expect(err).NotTo(HaveOccurred())
Expect(string(contents)).To(Equal("build-contents"))
Expect(hdr.Mode).To(Equal(int64(0755)))
contents, hdr, err = ExtractFile(file, "bin/detect")
Expect(err).NotTo(HaveOccurred())
Expect(string(contents)).To(Equal("detect-contents"))
Expect(hdr.Mode).To(Equal(int64(0755)))
_, hdr, err = ExtractFile(file, "bin/link")
Expect(err).NotTo(HaveOccurred())
Expect(hdr.Typeflag).To(Equal(byte(tar.TypeSymlink)))
Expect(hdr.Linkname).To(Equal("./build"))
Expect(hdr.Mode).To(Equal(int64(0755)))
})
})
context("failure cases", func() {
context("when it is unable to create the destination file", func() {
it.Before(func() {
Expect(os.Chmod(tempDir, 0000)).To(Succeed())
})
it.Before(func() {
Expect(os.Chmod(tempDir, 0644)).To(Succeed())
})
it("returns an error", func() {
err := builder.Build(tempFile, []internal.File{
{
Name: "bin/build",
Info: internal.NewFileInfo("build", len("build-contents"), 0755, time.Now()),
ReadCloser: io.NopCloser(strings.NewReader("build-contents")),
},
})
Expect(err).To(MatchError(ContainSubstring("failed to create tarball")))
Expect(err).To(MatchError(ContainSubstring("permission denied")))
})
})
context("when one of the files cannot be written", func() {
it("returns an error", func() {
err := builder.Build(tempFile, []internal.File{
{
Name: "bin/build",
Info: internal.NewFileInfo("build", 1, 0755, time.Now()),
ReadCloser: io.NopCloser(strings.NewReader("build-contents")),
},
})
Expect(err).To(MatchError(ContainSubstring("failed to write file to tarball")))
Expect(err).To(MatchError(ContainSubstring("write too long")))
})
})
context("when one of the files cannot have its header created", func() {
it("returns an error", func() {
err := builder.Build(tempFile, []internal.File{
{
Name: "bin/build",
ReadCloser: io.NopCloser(strings.NewReader("build-contents")),
},
})
Expect(err).To(MatchError(ContainSubstring("failed to create header for file \"bin/build\":")))
Expect(err).To(MatchError(ContainSubstring("FileInfo is nil")))
})
})
})
})
}
|
package solutions
func plusOne(digits []int) []int {
for i := len(digits); i > 0; i-- {
if digits[i - 1] < 9 {
digits[i - 1] += 1
return digits
}
digits[i - 1] = 0
if i == 1 {
digits = append([]int{1}, digits...)
}
}
return digits
}
|
package wsqueue
import (
"encoding/json"
"os"
"reflect"
"strconv"
"time"
"github.com/satori/go.uuid"
)
type Header map[string]string
//Message message
type Message struct {
Header Header `json:"metadata,omitempty"`
Body string `json:"data"`
}
func newMessage(data interface{}) (*Message, error) {
m := Message{
Header: make(map[string]string),
Body: "",
}
switch data.(type) {
case string, *string:
m.Header["content-type"] = "string"
m.Body = data.(string)
case int, *int, int32, *int32, int64, *int64:
m.Header["content-type"] = "int"
m.Body = strconv.Itoa(data.(int))
case bool, *bool:
m.Header["content-type"] = "bool"
m.Body = strconv.FormatBool(data.(bool))
default:
m.Header["content-type"] = "application/json"
if reflect.TypeOf(data).Kind() == reflect.Ptr {
m.Header["application-type"] = reflect.ValueOf(data).Elem().Type().String()
} else {
m.Header["application-type"] = reflect.ValueOf(data).Type().String()
}
b, err := json.Marshal(data)
if err != nil {
return nil, err
}
m.Body = string(b)
}
m.Header["id"] = uuid.NewV1().String()
m.Header["date"] = time.Now().String()
m.Header["host"], _ = os.Hostname()
return &m, nil
}
func (m *Message) String() string {
var s string
s = "\n---HEADER---"
for k, v := range m.Header {
s = s + "\n" + k + ":" + v
}
s = s + "\n---BODY---"
s = s + "\n" + m.Body
return s
}
//ID returns message if
func (m *Message) ID() string {
return m.Header["id"]
}
//ContentType returns content-type
func (m *Message) ContentType() string {
return m.Header["content-type"]
}
//ApplicationType returns application-type. Empty if content-type is not application/json
func (m *Message) ApplicationType() string {
return m.Header["application-type"]
}
|
package tree
import (
"math"
"testing"
)
//二叉树的最小深度
func minDepth(root *TreeNode) int {
if root == nil {
return 0
}
if root.Left == nil && root.Right == nil {
return 1
}
min := func(a int, b int) int {
if a < b {
return a
} else {
return b
}
}
depth := math.MaxInt32
if root.Left != nil {
depth = min(minDepth(root.Left), depth)
}
if root.Right != nil {
depth = min(minDepth(root.Right), depth)
}
return depth + 1
}
func Test_111(t *testing.T) {
var t1 = new(TreeNode)
t1.Val, t1.Left, t1.Right = 2, nil, nil
var t2 = new(TreeNode)
t2.Val, t2.Left, t2.Right = 1, t1, nil
t.Log(minDepth(t2))
}
|
package main
import (
"net/http"
"github.com/gorilla/mux"
)
// Route describes a specific route to handle unique requests
type Route struct {
Name string
Method string
Pattern string
HandlerFunc http.HandlerFunc
}
// Routes is a collection of Route types
type Routes []Route
// NewRouter creates a mux.Router from the constant `routes` struct
func NewRouter() *mux.Router {
router := mux.NewRouter().StrictSlash(true)
for _, route := range routes {
router.
Methods(route.Method).
Path(route.Pattern).
Name(route.Name).
Handler(route.HandlerFunc)
}
return router
}
var routes = Routes{
Route{
"RecieveMessageHandler",
"POST",
"/recieve",
RecieveMessageHandler,
},
}
|
package main
import "net/http"
func main() {
r := SetRouter()
http.ListenAndServe(":3456", r)
}
|
package api
import (
"net/http"
"testing"
)
func TestUpdateAlertPolicy(t *testing.T) {
c := newTestAPIClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(`
{
"policy": {
"id": 12345,
"incident_preference": "PER_POLICY",
"name": "New Name",
"created_at": 12345678900000,
"updated_at": 12345678900000
}
}
`))
}))
policy := AlertPolicy{
ID: 123,
IncidentPreference: "PER_CONDITION",
Name: "Old Name",
}
policyResp, err := c.UpdateAlertPolicy(policy)
if err != nil {
t.Log(err)
t.Fatal("UpdateAlertPolicy error")
}
if policyResp == nil {
t.Log(err)
t.Fatal("UpdateAlertPolicy error")
}
if policyResp.Name != "New Name" {
t.Fatal("Failed to change policy name")
}
if policyResp.IncidentPreference != "PER_POLICY" {
t.Fatal("Failed to change incident preference")
}
}
|
package local
import (
"k8s.io/apimachinery/pkg/types"
"github.com/tilt-dev/tilt/internal/store"
)
type CmdCreateAction struct {
Cmd *Cmd
}
func NewCmdCreateAction(cmd *Cmd) CmdCreateAction {
return CmdCreateAction{Cmd: cmd.DeepCopy()}
}
var _ store.Summarizer = CmdCreateAction{}
func (CmdCreateAction) Action() {}
func (a CmdCreateAction) Summarize(s *store.ChangeSummary) {
s.CmdSpecs.Add(types.NamespacedName{Name: a.Cmd.Name})
}
type CmdUpdateStatusAction struct {
Cmd *Cmd
}
func NewCmdUpdateStatusAction(cmd *Cmd) CmdUpdateStatusAction {
return CmdUpdateStatusAction{Cmd: cmd.DeepCopy()}
}
func (CmdUpdateStatusAction) Action() {}
type CmdDeleteAction struct {
Name string
}
func (CmdDeleteAction) Action() {}
func (a CmdDeleteAction) Summarize(s *store.ChangeSummary) {
s.CmdSpecs.Add(types.NamespacedName{Name: a.Name})
}
|
package main
import (
"fmt"
"bufio"
"os"
"strings"
"time"
"sync"
"github.com/go-errors/errors"
"gomfc/models"
"gomfc/ws_client"
"gomfc/rtmpdump"
)
const stateChanCap = 10000
type ModelState struct {
models.MFCModel
ChangeStateTime time.Time
}
type ModelMapType struct {
sync.RWMutex
Data map[uint64]ModelState
StateChan chan ModelState
}
func (m *ModelMapType) Get(uid uint64) (state ModelState, ok bool) {
m.RLock()
defer m.RUnlock()
state, ok = m.Data[uid]
return
}
func (m *ModelMapType) Set(uid uint64, state ModelState) {
m.Lock()
defer m.Unlock()
m.Data[uid] = state
}
func (m *ModelMapType) SendState(state ModelState) (err error) {
select {
case m.StateChan <- state:
default:
err = errors.New("state channel is blocked")
}
return
}
var ModelMap ModelMapType
func stateHandle(modelName string) {
Loop:
for {
select {
case state, ok := <- ModelMap.StateChan:
if !ok {
break Loop
}
if strings.ToLower(state.Nm) == strings.ToLower(modelName) {
for {
currentState, _ := ModelMap.Get(state.Uid)
if currentState.RecordEnable() {
rtmpdump.Record(modelName, "")
} else {
break
}
}
}
}
}
}
func exitProgram(waitEnter bool) {
var exitCode = 0
if r := recover(); r != nil {
exitCode = -1
e, _ := r.(error)
fmt.Println("Error:", e)
fmt.Println(errors.Wrap(e, 2).ErrorStack())
}
if waitEnter {
fmt.Print("Press enter to continue... ")
_, _ = bufio.NewReader(os.Stdin).ReadString('\n')
}
os.Exit(exitCode)
}
func modelMapper(msg string) (err error){
model, err := models.GetModelData(msg)
if err == models.ServiceInfoError {
err = nil
return
}
if err != nil {
return
}
if model.Lv == models.ModelLv {
newState := ModelState{model, time.Now()}
oldState, ok := ModelMap.Get(model.Uid)
if ok {
if oldState.Vs != newState.Vs {
err = ModelMap.SendState(newState)
if err != nil {
return
}
}
} else {
err = ModelMap.SendState(newState)
if err != nil {
return
}
}
ModelMap.Set(model.Uid, newState)
}
return
}
func init() {
ModelMap = ModelMapType{
Data: make(map[uint64]ModelState),
StateChan: make(chan ModelState, stateChanCap),
}
}
func main() {
var waitEnter bool
var modelName string
if len(os.Args) == 1 {
waitEnter = true
reader := bufio.NewReader(os.Stdin)
fmt.Print("Enter model name: ")
modelName, _ = reader.ReadString('\n')
modelName = strings.Replace(modelName, "\n", "", 1)
modelName = strings.Replace(modelName, "\r", "", 1)
} else {
waitEnter = false
modelName = os.Args[1]
}
defer exitProgram(waitEnter)
wsConn, err := ws_client.CreateConnection(modelName, true)
if err != nil {
panic(err)
}
go stateHandle(modelName)
wsConn.SetMsgHdlr(modelMapper)
err = wsConn.ReadForever()
if err != nil {
if err == models.NotFoundError {
fmt.Println(err)
} else {
panic(err)
}
}
}
|
package middleware
import (
"fmt"
"net/http"
"time"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
)
func MyTimeMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
now := time.Now()
next.ServeHTTP(writer, request)
processTime := time.Since(now)
fmt.Println("process time %v", processTime)
})
}
func helloworld(writer http.ResponseWriter, request *http.Request) {
writer.Write([]byte("hello world"))
}
func ChiApp() {
r := chi.NewRouter()
r.Use(MyTimeMiddleware) //自己实现的middleware
r.Use(middleware.Logger) //chi的middleware模块自带方法
r.Get("/hello", helloworld)
http.ListenAndServe(":8080", r)
}
|
package main
import "github.com/gin-gonic/gin"
func routerEntry(router *gin.Engine) {
v1 := router.Group("/v1")
{
v1.POST("/api", chinSelectCaseFunc)
}
v2 := router.Group("/v2")
{
v2.POST("/api", chinSelectFunc)
}
v3 := router.Group("/v3")
{
v3.POST("/api/deduct", DeductWalletController)
v3.POST("/api/store", StoreWalletController)
}
v4 := router.Group("/v4")
{
v4.POST("/api/deduct", DeductWalletControllerDB)
v4.POST("/api/store", StoreWalletControllerDB)
}
ws := router.Group("/ws")
{
ws.GET("", WsWallte)
}
tools := router.Group("/tools")
{
tools.POST("/fakedata", CreateRedisData)
tools.POST("/fakedatadb", CreateDBData)
}
}
|
package solutions
func findMin(nums []int) int {
left, right := 0, len(nums) - 1
for left < right {
middle := (right + left) / 2
if nums[middle] < nums[right] {
right = middle
} else if nums[middle] > nums[right] {
left = middle + 1
} else if nums[middle] != nums[left] {
right = middle
} else {
return searchMin(nums[left : right + 1])
}
}
return nums[left]
}
func searchMin(nums []int) int {
result := nums[0]
for i := 1; i < len(nums); i++ {
if nums[i] < result {
result = nums[i]
}
}
return result
}
|
package lifxlan
// ProductMap is the map of all known hardwares.
//
// If a new product is added and this file is not updated yet,
// you can add it to the map by yourself, for example:
//
// func init() {
// key := lifxlan.ProductMapKey(newVID, newPID)
// lifxlan.ProductMap[key] = ParsedHardwareVersion{
// // Fill in values
// }
// }
//
// The content of this map was fetched from
// https://github.com/LIFX/products/blob/master/products.json
// and generated by
// https://github.com/fishy/lifxlan/tree/master/cmd/gen-product-map
var ProductMap = map[uint64]ParsedHardwareVersion{
ProductMapKey(1, 1): {
VendorName: "LIFX",
ProductName: "LIFX Original 1000",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 3): {
VendorName: "LIFX",
ProductName: "LIFX Color 650",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 10): {
VendorName: "LIFX",
ProductName: "LIFX White 800 (Low Voltage)",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2700,
MaxKelvin: 6500,
},
ProductMapKey(1, 11): {
VendorName: "LIFX",
ProductName: "LIFX White 800 (High Voltage)",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2700,
MaxKelvin: 6500,
},
ProductMapKey(1, 15): {
VendorName: "LIFX",
ProductName: "LIFX Color 1000",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 18): {
VendorName: "LIFX",
ProductName: "LIFX White 900 BR30 (Low Voltage)",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 19): {
VendorName: "LIFX",
ProductName: "LIFX White 900 BR30 (High Voltage)",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 20): {
VendorName: "LIFX",
ProductName: "LIFX Color 1000 BR30",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 22): {
VendorName: "LIFX",
ProductName: "LIFX Color 1000",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 27): {
VendorName: "LIFX",
ProductName: "LIFX A19",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 28): {
VendorName: "LIFX",
ProductName: "LIFX BR30",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 29): {
VendorName: "LIFX",
ProductName: "LIFX A19 Night Vision",
Color: true,
Infrared: true,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 30): {
VendorName: "LIFX",
ProductName: "LIFX BR30 Night Vision",
Color: true,
Infrared: true,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 31): {
VendorName: "LIFX",
ProductName: "LIFX Z",
Color: true,
Infrared: false,
MultiZone: true,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 32): {
VendorName: "LIFX",
ProductName: "LIFX Z",
Color: true,
Infrared: false,
MultiZone: true,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 36): {
VendorName: "LIFX",
ProductName: "LIFX Downlight",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 37): {
VendorName: "LIFX",
ProductName: "LIFX Downlight",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 38): {
VendorName: "LIFX",
ProductName: "LIFX Beam",
Color: true,
Infrared: false,
MultiZone: true,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 39): {
VendorName: "LIFX",
ProductName: "LIFX Downlight White To Warm",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 1500,
MaxKelvin: 9000,
},
ProductMapKey(1, 40): {
VendorName: "LIFX",
ProductName: "LIFX Downlight",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 43): {
VendorName: "LIFX",
ProductName: "LIFX A19",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 44): {
VendorName: "LIFX",
ProductName: "LIFX BR30",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 45): {
VendorName: "LIFX",
ProductName: "LIFX A19 Night Vision",
Color: true,
Infrared: true,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 46): {
VendorName: "LIFX",
ProductName: "LIFX BR30 Night Vision",
Color: true,
Infrared: true,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 49): {
VendorName: "LIFX",
ProductName: "LIFX Mini Color",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 50): {
VendorName: "LIFX",
ProductName: "LIFX Mini White To Warm",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 1500,
MaxKelvin: 4000,
},
ProductMapKey(1, 51): {
VendorName: "LIFX",
ProductName: "LIFX Mini White",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2700,
MaxKelvin: 2700,
},
ProductMapKey(1, 52): {
VendorName: "LIFX",
ProductName: "LIFX GU10",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 53): {
VendorName: "LIFX",
ProductName: "LIFX GU10",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 55): {
VendorName: "LIFX",
ProductName: "LIFX Tile",
Color: true,
Infrared: false,
MultiZone: false,
Chain: true,
Matrix: true,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 57): {
VendorName: "LIFX",
ProductName: "LIFX Candle",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: true,
MinKelvin: 1500,
MaxKelvin: 9000,
},
ProductMapKey(1, 59): {
VendorName: "LIFX",
ProductName: "LIFX Mini Color",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 60): {
VendorName: "LIFX",
ProductName: "LIFX Mini White To Warm",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 1500,
MaxKelvin: 4000,
},
ProductMapKey(1, 61): {
VendorName: "LIFX",
ProductName: "LIFX Mini White",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2700,
MaxKelvin: 2700,
},
ProductMapKey(1, 62): {
VendorName: "LIFX",
ProductName: "LIFX A19",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 63): {
VendorName: "LIFX",
ProductName: "LIFX BR30",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 64): {
VendorName: "LIFX",
ProductName: "LIFX A19 Night Vision",
Color: true,
Infrared: true,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 65): {
VendorName: "LIFX",
ProductName: "LIFX BR30 Night Vision",
Color: true,
Infrared: true,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 66): {
VendorName: "LIFX",
ProductName: "LIFX Mini White",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2700,
MaxKelvin: 2700,
},
ProductMapKey(1, 68): {
VendorName: "LIFX",
ProductName: "LIFX Candle",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: true,
MinKelvin: 1500,
MaxKelvin: 9000,
},
ProductMapKey(1, 70): {
VendorName: "LIFX",
ProductName: "LIFX Switch",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
},
ProductMapKey(1, 81): {
VendorName: "LIFX",
ProductName: "LIFX Candle White To Warm",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2200,
MaxKelvin: 6500,
},
ProductMapKey(1, 82): {
VendorName: "LIFX",
ProductName: "LIFX Filament Clear",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2100,
MaxKelvin: 2100,
},
ProductMapKey(1, 85): {
VendorName: "LIFX",
ProductName: "LIFX Filament Amber",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2000,
MaxKelvin: 2000,
},
ProductMapKey(1, 87): {
VendorName: "LIFX",
ProductName: "LIFX Mini White",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2700,
MaxKelvin: 2700,
},
ProductMapKey(1, 88): {
VendorName: "LIFX",
ProductName: "LIFX Mini White",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2700,
MaxKelvin: 2700,
},
ProductMapKey(1, 89): {
VendorName: "LIFX",
ProductName: "LIFX Switch",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
},
ProductMapKey(1, 90): {
VendorName: "LIFX",
ProductName: "LIFX Clean",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 91): {
VendorName: "LIFX",
ProductName: "LIFX Color",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 92): {
VendorName: "LIFX",
ProductName: "LIFX Color",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 94): {
VendorName: "LIFX",
ProductName: "LIFX BR30",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 96): {
VendorName: "LIFX",
ProductName: "LIFX Candle White To Warm",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2200,
MaxKelvin: 6500,
},
ProductMapKey(1, 97): {
VendorName: "LIFX",
ProductName: "LIFX A19",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 98): {
VendorName: "LIFX",
ProductName: "LIFX BR30",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 99): {
VendorName: "LIFX",
ProductName: "LIFX Clean",
Color: true,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 100): {
VendorName: "LIFX",
ProductName: "LIFX Filament Clear",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2100,
MaxKelvin: 2100,
},
ProductMapKey(1, 101): {
VendorName: "LIFX",
ProductName: "LIFX Filament Amber",
Color: false,
Infrared: false,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2000,
MaxKelvin: 2000,
},
ProductMapKey(1, 109): {
VendorName: "LIFX",
ProductName: "LIFX A19 Night Vision",
Color: true,
Infrared: true,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 110): {
VendorName: "LIFX",
ProductName: "LIFX BR30 Night Vision",
Color: true,
Infrared: true,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
ProductMapKey(1, 111): {
VendorName: "LIFX",
ProductName: "LIFX A19 Night Vision",
Color: true,
Infrared: true,
MultiZone: false,
Chain: false,
Matrix: false,
MinKelvin: 2500,
MaxKelvin: 9000,
},
}
|
package handlers
import (
"encoding/json"
"net/http"
"strconv"
"bitbucket.org/Sanny_Lebedev/test6/fibb"
"github.com/satori/go.uuid"
)
type (
answer struct {
UID string `json:"UID"`
Success bool `json:"success"`
Done bool `json:"done"`
Meta meta `json:"meta"`
}
meta struct {
Last int64 `json:"last"`
Nums []int64 `json:"nums"`
}
)
// home is a simple HTTP handler function which writes a response.
func calculate(w http.ResponseWriter, r *http.Request) {
u1 := uuid.Must(uuid.NewV4()).String()
w.Header().Set("Content-type", "application/json")
json.NewEncoder(w).Encode(answer{UID: u1, Success: true, Done: false})
i, err := strconv.ParseInt(r.FormValue("n"), 10, 64)
if err != nil {
i = 0
}
go fibb.Calc(u1, i)
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ports provides PortManager that manages allocating, reserving and
// releasing ports.
package ports
import (
"math"
"math/rand"
"gvisor.dev/gvisor/pkg/atomicbitops"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
)
const (
firstEphemeral = 16000
)
var (
anyIPAddress = tcpip.Address{}
)
// Reservation describes a port reservation.
type Reservation struct {
// Networks is a list of network protocols to which the reservation
// applies. Can be IPv4, IPv6, or both.
Networks []tcpip.NetworkProtocolNumber
// Transport is the transport protocol to which the reservation applies.
Transport tcpip.TransportProtocolNumber
// Addr is the address of the local endpoint.
Addr tcpip.Address
// Port is the local port number.
Port uint16
// Flags describe features of the reservation.
Flags Flags
// BindToDevice is the NIC to which the reservation applies.
BindToDevice tcpip.NICID
// Dest is the destination address.
Dest tcpip.FullAddress
}
func (rs Reservation) dst() destination {
return destination{
rs.Dest.Addr,
rs.Dest.Port,
}
}
type portDescriptor struct {
network tcpip.NetworkProtocolNumber
transport tcpip.TransportProtocolNumber
port uint16
}
type destination struct {
addr tcpip.Address
port uint16
}
// destToCounter maps each destination to the FlagCounter that represents
// endpoints to that destination.
//
// destToCounter is never empty. When it has no elements, it is removed from
// the map that references it.
type destToCounter map[destination]FlagCounter
// intersectionFlags calculates the intersection of flag bit values which affect
// the specified destination.
//
// If no destinations are present, all flag values are returned as there are no
// entries to limit possible flag values of a new entry.
//
// In addition to the intersection, the number of intersecting refs is
// returned.
func (dc destToCounter) intersectionFlags(res Reservation) (BitFlags, int) {
intersection := FlagMask
var count int
for dest, counter := range dc {
if dest == res.dst() {
intersection &= counter.SharedFlags()
count++
continue
}
// Wildcard destinations affect all destinations for TupleOnly.
if dest.addr == anyIPAddress || res.Dest.Addr == anyIPAddress {
// Only bitwise and the TupleOnlyFlag.
intersection &= (^TupleOnlyFlag) | counter.SharedFlags()
count++
}
}
return intersection, count
}
// deviceToDest maps NICs to destinations for which there are port reservations.
//
// deviceToDest is never empty. When it has no elements, it is removed from the
// map that references it.
type deviceToDest map[tcpip.NICID]destToCounter
// isAvailable checks whether binding is possible by device. If not binding to
// a device, check against all FlagCounters. If binding to a specific device,
// check against the unspecified device and the provided device.
//
// If either of the port reuse flags is enabled on any of the nodes, all nodes
// sharing a port must share at least one reuse flag. This matches Linux's
// behavior.
func (dd deviceToDest) isAvailable(res Reservation, portSpecified bool) bool {
flagBits := res.Flags.Bits()
if res.BindToDevice == 0 {
intersection := FlagMask
for _, dest := range dd {
flags, count := dest.intersectionFlags(res)
if count == 0 {
continue
}
intersection &= flags
if intersection&flagBits == 0 {
// Can't bind because the (addr,port) was
// previously bound without reuse.
return false
}
}
if !portSpecified && res.Transport == header.TCPProtocolNumber {
return false
}
return true
}
intersection := FlagMask
if dests, ok := dd[0]; ok {
var count int
intersection, count = dests.intersectionFlags(res)
if count > 0 {
if intersection&flagBits == 0 {
return false
}
if !portSpecified && res.Transport == header.TCPProtocolNumber {
return false
}
}
}
if dests, ok := dd[res.BindToDevice]; ok {
flags, count := dests.intersectionFlags(res)
intersection &= flags
if count > 0 {
if intersection&flagBits == 0 {
return false
}
if !portSpecified && res.Transport == header.TCPProtocolNumber {
return false
}
}
}
return true
}
// addrToDevice maps IP addresses to NICs that have port reservations.
type addrToDevice map[tcpip.Address]deviceToDest
// isAvailable checks whether an IP address is available to bind to. If the
// address is the "any" address, check all other addresses. Otherwise, just
// check against the "any" address and the provided address.
func (ad addrToDevice) isAvailable(res Reservation, portSpecified bool) bool {
if res.Addr == anyIPAddress {
// If binding to the "any" address then check that there are no
// conflicts with all addresses.
for _, devices := range ad {
if !devices.isAvailable(res, portSpecified) {
return false
}
}
return true
}
// Check that there is no conflict with the "any" address.
if devices, ok := ad[anyIPAddress]; ok {
if !devices.isAvailable(res, portSpecified) {
return false
}
}
// Check that this is no conflict with the provided address.
if devices, ok := ad[res.Addr]; ok {
if !devices.isAvailable(res, portSpecified) {
return false
}
}
return true
}
// PortManager manages allocating, reserving and releasing ports.
type PortManager struct {
// mu protects allocatedPorts.
// LOCK ORDERING: mu > ephemeralMu.
mu sync.RWMutex
// allocatedPorts is a nesting of maps that ultimately map Reservations
// to FlagCounters describing whether the Reservation is valid and can
// be reused.
allocatedPorts map[portDescriptor]addrToDevice
// ephemeralMu protects firstEphemeral and numEphemeral.
ephemeralMu sync.RWMutex
firstEphemeral uint16
numEphemeral uint16
// hint is used to pick ports ephemeral ports in a stable order for
// a given port offset.
//
// hint must be accessed using the portHint/incPortHint helpers.
// TODO(gvisor.dev/issue/940): S/R this field.
hint atomicbitops.Uint32
}
// NewPortManager creates new PortManager.
func NewPortManager() *PortManager {
return &PortManager{
allocatedPorts: make(map[portDescriptor]addrToDevice),
firstEphemeral: firstEphemeral,
numEphemeral: math.MaxUint16 - firstEphemeral + 1,
}
}
// PortTester indicates whether the passed in port is suitable. Returning an
// error causes the function to which the PortTester is passed to return that
// error.
type PortTester func(port uint16) (good bool, err tcpip.Error)
// PickEphemeralPort randomly chooses a starting point and iterates over all
// possible ephemeral ports, allowing the caller to decide whether a given port
// is suitable for its needs, and stopping when a port is found or an error
// occurs.
func (pm *PortManager) PickEphemeralPort(rng *rand.Rand, testPort PortTester) (port uint16, err tcpip.Error) {
pm.ephemeralMu.RLock()
firstEphemeral := pm.firstEphemeral
numEphemeral := pm.numEphemeral
pm.ephemeralMu.RUnlock()
offset := uint32(rng.Int31n(int32(numEphemeral)))
return pickEphemeralPort(offset, firstEphemeral, numEphemeral, testPort)
}
// portHint atomically reads and returns the pm.hint value.
func (pm *PortManager) portHint() uint32 {
return pm.hint.Load()
}
// incPortHint atomically increments pm.hint by 1.
func (pm *PortManager) incPortHint() {
pm.hint.Add(1)
}
// PickEphemeralPortStable starts at the specified offset + pm.portHint and
// iterates over all ephemeral ports, allowing the caller to decide whether a
// given port is suitable for its needs and stopping when a port is found or an
// error occurs.
func (pm *PortManager) PickEphemeralPortStable(offset uint32, testPort PortTester) (port uint16, err tcpip.Error) {
pm.ephemeralMu.RLock()
firstEphemeral := pm.firstEphemeral
numEphemeral := pm.numEphemeral
pm.ephemeralMu.RUnlock()
p, err := pickEphemeralPort(pm.portHint()+offset, firstEphemeral, numEphemeral, testPort)
if err == nil {
pm.incPortHint()
}
return p, err
}
// pickEphemeralPort starts at the offset specified from the FirstEphemeral port
// and iterates over the number of ports specified by count and allows the
// caller to decide whether a given port is suitable for its needs, and stopping
// when a port is found or an error occurs.
func pickEphemeralPort(offset uint32, first, count uint16, testPort PortTester) (port uint16, err tcpip.Error) {
for i := uint32(0); i < uint32(count); i++ {
port := uint16(uint32(first) + (offset+i)%uint32(count))
ok, err := testPort(port)
if err != nil {
return 0, err
}
if ok {
return port, nil
}
}
return 0, &tcpip.ErrNoPortAvailable{}
}
// ReservePort marks a port/IP combination as reserved so that it cannot be
// reserved by another endpoint. If port is zero, ReservePort will search for
// an unreserved ephemeral port and reserve it, returning its value in the
// "port" return value.
//
// An optional PortTester can be passed in which if provided will be used to
// test if the picked port can be used. The function should return true if the
// port is safe to use, false otherwise.
func (pm *PortManager) ReservePort(rng *rand.Rand, res Reservation, testPort PortTester) (reservedPort uint16, err tcpip.Error) {
pm.mu.Lock()
defer pm.mu.Unlock()
// If a port is specified, just try to reserve it for all network
// protocols.
if res.Port != 0 {
if !pm.reserveSpecificPortLocked(res, true /* portSpecified */) {
return 0, &tcpip.ErrPortInUse{}
}
if testPort != nil {
ok, err := testPort(res.Port)
if err != nil {
pm.releasePortLocked(res)
return 0, err
}
if !ok {
pm.releasePortLocked(res)
return 0, &tcpip.ErrPortInUse{}
}
}
return res.Port, nil
}
// A port wasn't specified, so try to find one.
return pm.PickEphemeralPort(rng, func(p uint16) (bool, tcpip.Error) {
res.Port = p
if !pm.reserveSpecificPortLocked(res, false /* portSpecified */) {
return false, nil
}
if testPort != nil {
ok, err := testPort(p)
if err != nil {
pm.releasePortLocked(res)
return false, err
}
if !ok {
pm.releasePortLocked(res)
return false, nil
}
}
return true, nil
})
}
// reserveSpecificPortLocked tries to reserve the given port on all given
// protocols.
func (pm *PortManager) reserveSpecificPortLocked(res Reservation, portSpecified bool) bool {
// Make sure the port is available.
for _, network := range res.Networks {
desc := portDescriptor{network, res.Transport, res.Port}
if addrs, ok := pm.allocatedPorts[desc]; ok {
if !addrs.isAvailable(res, portSpecified) {
return false
}
}
}
// Reserve port on all network protocols.
flagBits := res.Flags.Bits()
dst := res.dst()
for _, network := range res.Networks {
desc := portDescriptor{network, res.Transport, res.Port}
addrToDev, ok := pm.allocatedPorts[desc]
if !ok {
addrToDev = make(addrToDevice)
pm.allocatedPorts[desc] = addrToDev
}
devToDest, ok := addrToDev[res.Addr]
if !ok {
devToDest = make(deviceToDest)
addrToDev[res.Addr] = devToDest
}
destToCntr := devToDest[res.BindToDevice]
if destToCntr == nil {
destToCntr = make(destToCounter)
}
counter := destToCntr[dst]
counter.AddRef(flagBits)
destToCntr[dst] = counter
devToDest[res.BindToDevice] = destToCntr
}
return true
}
// ReserveTuple adds a port reservation for the tuple on all given protocol.
func (pm *PortManager) ReserveTuple(res Reservation) bool {
flagBits := res.Flags.Bits()
dst := res.dst()
pm.mu.Lock()
defer pm.mu.Unlock()
// It is easier to undo the entire reservation, so if we find that the
// tuple can't be fully added, finish and undo the whole thing.
undo := false
// Reserve port on all network protocols.
for _, network := range res.Networks {
desc := portDescriptor{network, res.Transport, res.Port}
addrToDev, ok := pm.allocatedPorts[desc]
if !ok {
addrToDev = make(addrToDevice)
pm.allocatedPorts[desc] = addrToDev
}
devToDest, ok := addrToDev[res.Addr]
if !ok {
devToDest = make(deviceToDest)
addrToDev[res.Addr] = devToDest
}
destToCntr := devToDest[res.BindToDevice]
if destToCntr == nil {
destToCntr = make(destToCounter)
}
counter := destToCntr[dst]
if counter.TotalRefs() != 0 && counter.SharedFlags()&flagBits == 0 {
// Tuple already exists.
undo = true
}
counter.AddRef(flagBits)
destToCntr[dst] = counter
devToDest[res.BindToDevice] = destToCntr
}
if undo {
// releasePortLocked decrements the counts (rather than setting
// them to zero), so it will undo the incorrect incrementing
// above.
pm.releasePortLocked(res)
return false
}
return true
}
// ReleasePort releases the reservation on a port/IP combination so that it can
// be reserved by other endpoints.
func (pm *PortManager) ReleasePort(res Reservation) {
pm.mu.Lock()
defer pm.mu.Unlock()
pm.releasePortLocked(res)
}
func (pm *PortManager) releasePortLocked(res Reservation) {
dst := res.dst()
for _, network := range res.Networks {
desc := portDescriptor{network, res.Transport, res.Port}
addrToDev, ok := pm.allocatedPorts[desc]
if !ok {
continue
}
devToDest, ok := addrToDev[res.Addr]
if !ok {
continue
}
destToCounter, ok := devToDest[res.BindToDevice]
if !ok {
continue
}
counter, ok := destToCounter[dst]
if !ok {
continue
}
counter.DropRef(res.Flags.Bits())
if counter.TotalRefs() > 0 {
destToCounter[dst] = counter
continue
}
delete(destToCounter, dst)
if len(destToCounter) > 0 {
continue
}
delete(devToDest, res.BindToDevice)
if len(devToDest) > 0 {
continue
}
delete(addrToDev, res.Addr)
if len(addrToDev) > 0 {
continue
}
delete(pm.allocatedPorts, desc)
}
}
// PortRange returns the UDP and TCP inclusive range of ephemeral ports used in
// both IPv4 and IPv6.
func (pm *PortManager) PortRange() (uint16, uint16) {
pm.ephemeralMu.RLock()
defer pm.ephemeralMu.RUnlock()
return pm.firstEphemeral, pm.firstEphemeral + pm.numEphemeral - 1
}
// SetPortRange sets the UDP and TCP IPv4 and IPv6 ephemeral port range
// (inclusive).
func (pm *PortManager) SetPortRange(start uint16, end uint16) tcpip.Error {
if start > end {
return &tcpip.ErrInvalidPortRange{}
}
pm.ephemeralMu.Lock()
defer pm.ephemeralMu.Unlock()
pm.firstEphemeral = start
pm.numEphemeral = end - start + 1
return nil
}
|
// https://leetcode.com/problems/k-closest-points-to-origin/
package leetcode_go
import (
"container/heap"
"math"
)
type PointHeap [][]int
func (h PointHeap) Len() int {
return len(h)
}
func (h PointHeap) Less(i, j int) bool {
return math.Sqrt(float64(h[i][0]*h[i][0]+h[i][1]*h[i][1])) < math.Sqrt(float64(h[j][0]*h[j][0]+h[j][1]*h[j][1]))
}
func (h PointHeap) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
func (h *PointHeap) Push(point interface{}) {
*h = append(*h, point.([]int))
}
func (h *PointHeap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
func kClosest(points [][]int, K int) [][]int {
ph := PointHeap(points)
heap.Init(&ph)
res := [][]int{}
for i := 0; i < K; i++ {
res = append(res, heap.Pop(&ph).([]int))
}
return res
}
|
// Copyright 2014 Dirk Jablonowski. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
bricker is a API for the Tinkerforge Hardware based on the brick daemon (brickd).
A bricker is a manager.
It uses one or more connectors to send and receive packets from brick daemons (real hardware).
The connectors could send and receive events to a specific address.
The packets will encapsulate in events.
The bricker is also a producer of events.
To use this events their will be need consumer (subscriber).
There is a fallback mechanism for events without a consumer (default fallback subscriber).
For using this API you need a running brick daemon (brickd) or some hardware with a brick daemon,
please use an actual version of the daemon.
You get the daemon from http://www.tinkerforge.com/en/doc/Software/Brickd.html#brickd as
source or binary package.
This API based on the documentation of the TCP/IP API from http://www.tinkerforge.com/en/doc/index.html#/software-tcpip-open and so this documentation is also useful.
*/
package bricker
import (
"github.com/dirkjabl/bricker/connector"
"github.com/dirkjabl/bricker/event"
"github.com/dirkjabl/bricker/util/hash"
)
// The bricker type.
// A bricker managed connectors and subscriber.
type Bricker struct {
connection map[string]connector.Connector
first string
uids map[uint32]string
subscriber map[hash.Hash]map[string]Subscriber
choosers []uint8
defaultsubscriber Subscriber
}
// New create the bricker.
// The new bricker start direct the service.
// After start, the bricker has no connection and no subscriber.
func New() *Bricker {
return &Bricker{
connection: make(map[string]connector.Connector),
first: "",
uids: make(map[uint32]string),
subscriber: make(map[hash.Hash]map[string]Subscriber),
choosers: make([]uint8, 0)}
}
// Done release all connections and subscriber and release all resources.
func (b *Bricker) Done() {
// Unsubscribe all subscriber.
for _, subs := range b.subscriber {
for _, s := range subs {
b.Unsubscribe(s)
}
}
// Release all connections.
for name := range b.connection {
b.Release(name)
}
}
// Internal method: read wait for a new event and forward it to the dispatcher.
func (b *Bricker) read(c connector.Connector, n string) {
var ev *event.Event
for {
ev = c.Receive()
if ev == nil {
return // done, no more packets
}
ev.ConnectorName = n
go b.dispatch(ev)
}
}
// Internal method: write takes a event and send it to the right bricker (dispatch).
func (b *Bricker) write(e *event.Event) {
if e != nil {
if conn, ok := b.connection[e.ConnectorName]; ok {
conn.Send(e)
} else {
e.Err = NewError(ErrorConnectorNameNotExists)
go b.dispatch(e)
}
}
}
// Internal method: process dispatch the event to the right subscriber.
func (b *Bricker) dispatch(e *event.Event) {
var h hash.Hash
if e.Packet == nil { // without a packet, no subscriber could be determined
go b.process(e, b.defaultsubscriber)
} else {
match := false
for _, chooser := range b.choosers {
h = hash.New(chooser, e.Packet.Head.Uid, e.Packet.Head.FunctionID)
if s, ok := b.subscriber[h]; ok {
match = true && (len(s) > 0)
go func(ev *event.Event, subs map[string]Subscriber) {
for _, s := range subs {
go b.process(e, s)
}
}(e, s)
}
}
if !match { // no subscriber hash matched against packet hash
go b.process(e, b.defaultsubscriber)
}
}
}
// Internal method: process notify given subscriber.
func (b *Bricker) process(e *event.Event, sub Subscriber) {
if sub == nil {
return // no subscriber, no notify
} else {
sub.Notify(e)
if !sub.Subscription().Callback { // not a callback, call only once
b.Unsubscribe(sub)
}
}
}
|
package main
import (
"github.com/asim/go-micro/plugins/client/grpc/v3"
"github.com/asim/go-micro/plugins/server/http/v3"
"github.com/asim/go-micro/v3"
"github.com/asim/go-micro/v3/logger"
"github.com/gin-gonic/gin"
pb "github.com/xpunch/go-micro-example/v3/helloworld/proto"
)
func main() {
srv := micro.NewService(
micro.Server(http.NewServer()),
micro.Client(grpc.NewClient()),
micro.Name("web"),
micro.Address(":80"),
)
srv.Init()
router := gin.New()
router.Use(gin.Recovery())
router.Use(gin.Logger())
helloworldService := pb.NewHelloworldService("helloworld", srv.Client())
router.POST("/helloworld", func(ctx *gin.Context) {
var req struct {
User string `json:"user"`
}
if err := ctx.ShouldBindJSON(&req); err != nil {
ctx.AbortWithStatusJSON(400, err)
return
}
resp, err := helloworldService.Call(ctx, &pb.Request{Name: req.User})
if err != nil {
ctx.AbortWithStatusJSON(500, err)
return
}
ctx.JSON(200, resp)
})
if err := micro.RegisterHandler(srv.Server(), router); err != nil {
logger.Fatal(err)
}
if err := srv.Run(); err != nil {
logger.Fatal(err)
}
}
|
package localcache
import (
"encoding/base64"
"fmt"
"os"
"path/filepath"
"sync"
"github.com/loft-sh/devspace/pkg/devspace/env"
"github.com/loft-sh/devspace/pkg/util/encryption"
"gopkg.in/yaml.v3"
)
type Cache interface {
ListImageCache() map[string]ImageCache
GetImageCache(imageConfigName string) (ImageCache, bool)
SetImageCache(imageConfigName string, imageCache ImageCache)
GetLastContext() *LastContextConfig
SetLastContext(config *LastContextConfig)
GetData(key string) (string, bool)
SetData(key, value string)
GetVar(varName string) (string, bool)
SetVar(varName, value string)
ListVars() map[string]string
ClearVars()
DeepCopy() Cache
// Save persists changes to file
Save() error
}
// LocalCache specifies the runtime cache
type LocalCache struct {
Vars map[string]string `yaml:"vars,omitempty"`
VarsEncrypted bool `yaml:"varsEncrypted,omitempty"`
Images map[string]ImageCache `yaml:"images,omitempty"`
LastContext *LastContextConfig `yaml:"lastContext,omitempty"`
// Data is arbitrary key value cache
Data map[string]string `yaml:"data,omitempty"`
// config path is the path where the cache was loaded from
cachePath string `yaml:"-" json:"-"`
accessMutex sync.Mutex `yaml:"-" json:"-"`
}
// LastContextConfig holds all the informations about the last used kubernetes context
type LastContextConfig struct {
Namespace string `yaml:"namespace,omitempty"`
Context string `yaml:"context,omitempty"`
}
// ImageCache holds the cache related information about a certain image
type ImageCache struct {
ImageConfigHash string `yaml:"imageConfigHash,omitempty"`
DockerfileHash string `yaml:"dockerfileHash,omitempty"`
ContextHash string `yaml:"contextHash,omitempty"`
EntrypointHash string `yaml:"entrypointHash,omitempty"`
CustomFilesHash string `yaml:"customFilesHash,omitempty"`
ImageName string `yaml:"imageName,omitempty"`
LocalRegistryImageName string `yaml:"localRegistryImageName,omitempty"`
Tag string `yaml:"tag,omitempty"`
}
func (ic ImageCache) IsLocalRegistryImage() bool {
return ic.LocalRegistryImageName != ""
}
func (ic ImageCache) ResolveImage() string {
if ic.IsLocalRegistryImage() {
return ic.LocalRegistryImageName
}
return ic.ImageName
}
func (l *LocalCache) ListImageCache() map[string]ImageCache {
l.accessMutex.Lock()
defer l.accessMutex.Unlock()
retMap := map[string]ImageCache{}
for k, v := range l.Images {
retMap[k] = v
}
return retMap
}
func (l *LocalCache) GetImageCache(imageConfigName string) (ImageCache, bool) {
l.accessMutex.Lock()
defer l.accessMutex.Unlock()
cache, ok := l.Images[imageConfigName]
return cache, ok
}
func (l *LocalCache) SetImageCache(imageConfigName string, imageCache ImageCache) {
l.accessMutex.Lock()
defer l.accessMutex.Unlock()
l.Images[imageConfigName] = imageCache
}
func (l *LocalCache) GetLastContext() *LastContextConfig {
l.accessMutex.Lock()
defer l.accessMutex.Unlock()
return l.LastContext
}
func (l *LocalCache) SetLastContext(config *LastContextConfig) {
l.accessMutex.Lock()
defer l.accessMutex.Unlock()
l.LastContext = config
}
func (l *LocalCache) GetData(key string) (string, bool) {
l.accessMutex.Lock()
defer l.accessMutex.Unlock()
cache, ok := l.Data[key]
return cache, ok
}
func (l *LocalCache) SetData(key, value string) {
l.accessMutex.Lock()
defer l.accessMutex.Unlock()
l.Data[key] = value
}
func (l *LocalCache) GetVar(varName string) (string, bool) {
l.accessMutex.Lock()
defer l.accessMutex.Unlock()
cache, ok := l.Vars[varName]
return cache, ok
}
func (l *LocalCache) SetVar(varName, value string) {
l.accessMutex.Lock()
defer l.accessMutex.Unlock()
l.Vars[varName] = value
}
func (l *LocalCache) ListVars() map[string]string {
l.accessMutex.Lock()
defer l.accessMutex.Unlock()
listVars := map[string]string{}
for k, v := range l.Vars {
listVars[k] = v
}
return listVars
}
func (l *LocalCache) ClearVars() {
l.accessMutex.Lock()
defer l.accessMutex.Unlock()
l.Vars = map[string]string{}
}
// DeepCopy creates a deep copy of the config
func (l *LocalCache) DeepCopy() Cache {
l.accessMutex.Lock()
defer l.accessMutex.Unlock()
o, _ := yaml.Marshal(l)
n := &LocalCache{}
_ = yaml.Unmarshal(o, n)
n.cachePath = l.cachePath
return n
}
// Save saves the config to the filesystem
func (l *LocalCache) Save() error {
if l.cachePath == "" {
return fmt.Errorf("no path specified where to save the local cache")
}
l.accessMutex.Lock()
defer l.accessMutex.Unlock()
data, err := yaml.Marshal(l)
if err != nil {
return err
}
copiedConfig := &LocalCache{}
err = yaml.Unmarshal(data, copiedConfig)
if err != nil {
return err
}
// encrypt variables
if env.GlobalGetEnv(DevSpaceDisableVarsEncryptionEnv) != "true" && EncryptionKey != "" {
for k, v := range copiedConfig.Vars {
if len(v) == 0 {
continue
}
encrypted, err := encryption.EncryptAES([]byte(EncryptionKey), []byte(v))
if err != nil {
return err
}
copiedConfig.Vars[k] = base64.StdEncoding.EncodeToString(encrypted)
}
copiedConfig.VarsEncrypted = true
}
// marshal again with the encrypted vars
data, err = yaml.Marshal(copiedConfig)
if err != nil {
return err
}
_, err = os.Stat(l.cachePath)
if err != nil {
if os.IsNotExist(err) {
// check if a save is really necessary
if len(l.Data) == 0 && len(l.Vars) == 0 && len(l.Images) == 0 && l.LastContext == nil {
return nil
}
}
}
err = os.MkdirAll(filepath.Dir(l.cachePath), 0755)
if err != nil {
return err
}
return os.WriteFile(l.cachePath, data, 0666)
}
|
// This file is part of CycloneDX GoMod
//
// Licensed under the Apache License, Version 2.0 (the “License”);
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an “AS IS” BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) OWASP Foundation. All Rights Reserved.
package mod
import (
"errors"
"flag"
"fmt"
"strings"
cdx "github.com/CycloneDX/cyclonedx-go"
"github.com/CycloneDX/cyclonedx-gomod/internal/cli/options"
)
type Options struct {
options.LogOptions
options.OutputOptions
options.SBOMOptions
ComponentType string
ModuleDir string
IncludeTest bool
}
func (m *Options) RegisterFlags(fs *flag.FlagSet) {
m.LogOptions.RegisterFlags(fs)
m.OutputOptions.RegisterFlags(fs)
m.SBOMOptions.RegisterFlags(fs)
fs.StringVar(&m.ComponentType, "type", "application", "Type of the main component")
fs.BoolVar(&m.IncludeTest, "test", false, "Include test dependencies")
}
var allowedComponentTypes = []cdx.ComponentType{
cdx.ComponentTypeApplication,
cdx.ComponentTypeFirmware,
cdx.ComponentTypeFramework,
cdx.ComponentTypeLibrary,
}
func (m Options) Validate() error {
errs := make([]error, 0)
if err := m.OutputOptions.Validate(); err != nil {
var verr *options.ValidationError
if errors.As(err, &verr) {
errs = append(errs, verr.Errors...)
} else {
return err
}
}
if err := m.SBOMOptions.Validate(); err != nil {
var verr *options.ValidationError
if errors.As(err, &verr) {
errs = append(errs, verr.Errors...)
} else {
return err
}
}
isAllowedComponentType := false
for i := range allowedComponentTypes {
if allowedComponentTypes[i] == cdx.ComponentType(m.ComponentType) {
isAllowedComponentType = true
break
}
}
if !isAllowedComponentType {
allowed := make([]string, len(allowedComponentTypes))
for i := range allowedComponentTypes {
allowed[i] = string(allowedComponentTypes[i])
}
errs = append(errs, fmt.Errorf("component type: \"%s\" is invalid (allowed: %s)", m.ComponentType, strings.Join(allowed, ",")))
}
if len(errs) > 0 {
return &options.ValidationError{Errors: errs}
}
return nil
}
|
package raw
import (
"time"
"github.com/docker/docker/api/types"
)
// Metrics holds containers raw metric values as they are extracted from the system
type Metrics struct {
Time time.Time
ContainerID string
Memory Memory
Network Network
CPU CPU
Pids Pids
Blkio Blkio
}
// Memory usage snapshot
type Memory struct {
UsageLimit uint64
Cache uint64
RSS uint64
SwapUsage uint64
FuzzUsage uint64
}
// CPU usage snapshot
type CPU struct {
TotalUsage uint64
UsageInUsermode uint64
UsageInKernelmode uint64
PercpuUsage []uint64
ThrottledPeriods uint64
ThrottledTimeNS uint64
SystemUsage uint64
OnlineCPUs uint
}
// Pids inside the container
type Pids struct {
Current uint64
Limit uint64
}
// Blkio stores multiple entries of the Block I/O stats
type Blkio struct {
IoServiceBytesRecursive []BlkioEntry
IoServicedRecursive []BlkioEntry
}
// BlkioEntry stores basic information of a simple blkio operation
type BlkioEntry struct {
Op string
Value uint64
}
// Network transmission and receive metrics
type Network struct {
RxBytes int64
RxDropped int64
RxErrors int64
RxPackets int64
TxBytes int64
TxDropped int64
TxErrors int64
TxPackets int64
}
// MetricsFetcher fetches raw basic metrics from cgroups and the proc filesystem
type MetricsFetcher struct {
cgroups *cgroupsFetcher
network *networkFetcher
}
// Fetcher is the minimal abstraction of any raw metrics fetcher implementation
type Fetcher interface {
Fetch(types.ContainerJSON) (Metrics, error)
}
// NewFetcher returns a raw MetricsFetcher
func NewFetcher(hostRoot, cgroups, mountsFilePath string) *MetricsFetcher {
return &MetricsFetcher{
cgroups: newCGroupsFetcher(hostRoot, cgroups, mountsFilePath),
network: newNetworkFetcher(hostRoot),
}
}
// Fetch returns a raw Metrics snapshot of a container, given its ID and its PID
func (mf *MetricsFetcher) Fetch(c types.ContainerJSON) (Metrics, error) {
metrics, err := mf.cgroups.fetch(c)
if err != nil {
return metrics, err
}
metrics.ContainerID = c.ID
metrics.Network, err = mf.network.Fetch(c.State.Pid)
return metrics, err
}
|
package services
import "github.com/jceatwell/bookstore_users-api/domain/users"
// CreateUser service method to create user
func CreateUser(user users.User) (*users.User, error) {
return nil, nil
}
|
package rbac
import (
"fmt"
//"errors"
m "cms_admin/admin/src/models"
"github.com/astaxie/beego/logs"
)
type ChannelController struct {
CommonController
}
func (this *ChannelController) Index() {
// 写入日志测试
logs.Warn("json is a type of kv like", map[string]int{"key": 2016})
// redis
redis := m.GetRedis()
defer redis.Close()
_, err := redis.Do("SET", "test", "testredis")
if err != nil {
fmt.Println("redis set error")
}
channels, count := m.Getchannellist()
this.Data["channels"] = &channels
this.Data["count"] = &count
userinfo := this.GetSession("userinfo")
this.Data["userinfo"] = userinfo
this.TplName = this.GetTemplatetype() + "/rbac/channel.tpl"
}
// 添加频道
func (this *ChannelController) Add() {
channel := m.Channel{}
/*
if err := this.ParseForm(&channel); err != nil {
this.Rsp(false, err.Error())
return
}
*/
channel.Name = this.GetString("name")
channel.Weight, _ = this.GetInt64("weight")
channel.Status, _ = this.GetInt("status")
if len(channel.Name) == 0 {
this.Rsp(false, "name is empty")
}
id, err := m.AddChannel(&channel)
if err == nil && id > 0 {
this.Rsp(true, "Success")
} else {
this.Rsp(false, err.Error())
}
return
}
// 编辑频道
func (this *ChannelController) Edit () {
Id, _ := this.GetInt64("id")
fmt.Println("id:",Id)
channel := m.GetChannelById(Id)
if channel.Id == 0 {
this.Rsp(false, "频道不存在")
return
}
this.Rsp(true, channel)
return
}
// 更新频道
func (this *ChannelController) Update() {
channel := m.Channel{}
channel.Name = this.GetString("name")
channel.Weight, _ = this.GetInt64("weight")
channel.Status, _ = this.GetInt("status")
channel.Id, _ = this.GetInt64("id")
Id, err := m.UpdateChannel(&channel)
if Id > 0 && err == nil {
this.Rsp(true, "Success")
} else {
this.Rsp(false, err.Error())
}
return
}
// 删除频道
func (this *ChannelController) Delete() {
Id, _ := this.GetInt64("id")
fmt.Println("id:",Id)
res, err := m.DeleteChannelById(Id)
if err == nil && res > 0 {
this.Rsp(true, "Success")
} else {
this.Rsp(false, err.Error())
}
return
}
|
package scalars_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestSessions(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Sessions Suite")
}
|
package config
import (
"os"
"strconv"
)
type config struct {
GrpcPort uint16
MongoDbHost string
MongoDbDatabase string
}
var configInstance config
func init() {
port, err := strconv.Atoi(os.Getenv("WALLET_PORT"))
if err != nil {
port = 50051
}
configInstance.GrpcPort = uint16(port)
configInstance.MongoDbHost = os.Getenv("WALLET_MONGODB_HOST")
configInstance.MongoDbDatabase = os.Getenv("WALLET_MONGODB_DB_NAME")
}
func GetConfig() config {
return configInstance
}
|
package main
import (
"fmt"
ds "github.com/deepak-muley/golangexamples/prefix-transform"
)
/*
Input
{
"1": "ABC",
"2": "AB",
"3", "B",
"4", "ABCD"
}
* Transform into the below
Output:
{
"1": "2C",
"2": "AB",
"3": "B",
"4": "1D"
}
*/
func createGraph(input map[string][]string) *ds.Graph {
gr := ds.NewGraph()
//Create the graph with given input
for tag, vals := range input {
var curNode, curRoot *ds.GraphNode
var v string
if len(vals) > 0 {
v = vals[0]
} else {
continue
}
var gnode *ds.GraphNode
gnode = gr.GetRootNode(v)
if gnode == nil {
gnode = gr.AddRootNode(v)
}
curRoot = gnode
curNode = curRoot
for _, val := range vals {
childNode := curNode.GetChild(val)
if childNode == nil {
childNode = curNode.AddChild(val)
}
curNode = childNode
}
curNode.SetTag(tag)
}
return gr
}
func createOutputMap(gr *ds.Graph, input map[string][]string) map[string][]string {
output := make(map[string][]string)
var curNode, curRoot *ds.GraphNode
for tag, vals := range input {
var v string
if len(vals) > 0 {
v = vals[0]
} else {
continue
}
var gnode *ds.GraphNode
gnode = gr.GetRootNode(v)
if gnode == nil {
gnode = gr.AddRootNode(v)
}
curRoot = gnode
curNode = curRoot
var curTag, prevTag string
var outputVals []string
var history []string
for _, val := range vals {
childNode := curNode.GetChild(val)
if childNode == nil {
outputVals = append(outputVals, val)
continue
}
if childNode.GetTag() != "" {
curTag = childNode.GetTag()
} else {
history = append(history, val)
}
//end of vals == input tag
if curTag == tag {
if prevTag != "" {
outputVals = append(outputVals, prevTag)
outputVals = append(outputVals, val)
} else {
outputVals = append(history, val)
}
} else {
curTag = childNode.GetTag()
prevTag = childNode.GetTag()
}
curNode = childNode
}
output[tag] = outputVals
}
return output
}
func main() {
input := map[string][]string{
"1": []string{"A", "B", "C"},
"2": []string{"A", "B"},
"3": []string{"B"},
"4": []string{"A", "B", "C", "D"},
}
fmt.Println("Input:")
for tag, vals := range input {
fmt.Println(tag, vals)
}
//Create graph from input map
gr := createGraph(input)
//print the entire graph
gr.Print()
//Create Output from graph
output := createOutputMap(gr, input)
fmt.Println("Output:")
for tag, vals := range output {
fmt.Println(tag, vals)
}
}
|
package utreexo
import (
"fmt"
"testing"
)
// Add 2. delete 1. Repeat.
func Test2Fwd1Back(t *testing.T) {
f := NewForest()
var absidx uint32
adds := make([]LeafTXO, 2)
for i := 0; i < 100; i++ {
for j := range adds {
adds[j].Hash[0] = uint8(absidx>>8) | 0xa0
adds[j].Hash[1] = uint8(absidx)
adds[j].Hash[3] = 0xaa
absidx++
// if i%30 == 0 {
// utree.Track(adds[i])
// trax = append(trax, adds[i])
// }
}
// t.Logf("-------- block %d\n", i)
fmt.Printf("\t\t\t########### block %d ##########\n\n", i)
// add 2
err := f.Modify(adds, nil)
if err != nil {
t.Fatal(err)
}
s := f.ToString()
fmt.Printf(s)
// get proof for the first
_, err = f.Prove(adds[0].Hash)
if err != nil {
t.Fatal(err)
}
// delete the first
// err = f.Modify(nil, []Hash{p.Payload})
// if err != nil {
// t.Fatal(err)
// }
// s = f.ToString()
// fmt.Printf(s)
// get proof for the 2nd
keep, err := f.Prove(adds[1].Hash)
if err != nil {
t.Fatal(err)
}
// check proof
worked := f.Verify(keep)
if !worked {
t.Fatalf("proof at postition %d, length %d failed to verify\n",
keep.Position, len(keep.Siblings))
}
}
}
// Add and delete variable numbers, repeat.
// deletions are all on the left side and contiguous.
func TestAddxDelyLeftFullBlockProof(t *testing.T) {
for x := 0; x < 100; x++ {
for y := 0; y < x; y++ {
err := AddDelFullBlockProof(x, y)
if err != nil {
t.Fatal(err)
}
}
}
}
// Add x, delete y, construct & reconstruct blockproof
func AddDelFullBlockProof(nAdds, nDels int) error {
if nDels > nAdds-1 {
return fmt.Errorf("too many deletes")
}
f := NewForest()
adds := make([]LeafTXO, nAdds)
for j := range adds {
adds[j].Hash[0] = uint8(j>>8) | 0xa0
adds[j].Hash[1] = uint8(j)
adds[j].Hash[3] = 0xaa
}
// add x
err := f.Modify(adds, nil)
if err != nil {
return err
}
addHashes := make([]Hash, len(adds))
for i, h := range adds {
addHashes[i] = h.Hash
}
// get block proof
bp, err := f.ProveBlock(addHashes[:nDels])
if err != nil {
return err
}
// check block proof. Note this doesn't delete anything, just proves inclusion
worked, _ := VerifyBlockProof(bp, f.GetTops(), f.numLeaves, f.height)
// worked := f.VerifyBlockProof(bp)
if !worked {
return fmt.Errorf("VerifyBlockProof failed")
}
fmt.Printf("VerifyBlockProof worked\n")
return nil
}
|
package storage
import (
"context"
"fmt"
"os"
"testing"
"github.com/databrickslabs/terraform-provider-databricks/access"
"github.com/databrickslabs/terraform-provider-databricks/compute"
"github.com/databrickslabs/terraform-provider-databricks/internal"
"github.com/databrickslabs/terraform-provider-databricks/qa"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/databrickslabs/terraform-provider-databricks/common"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func mountPointThroughReusedCluster(t *testing.T) (*common.DatabricksClient, MountPoint) {
if _, ok := os.LookupEnv("CLOUD_ENV"); !ok {
t.Skip("Acceptance tests skipped unless env 'CLOUD_ENV' is set")
}
ctx := context.Background()
client := common.CommonEnvironmentClient()
clusterInfo := compute.NewTinyClusterInCommonPoolPossiblyReused()
randomName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
return client, MountPoint{
exec: client.CommandExecutor(ctx),
clusterID: clusterInfo.ClusterID,
name: randomName,
}
}
func testWithNewSecretScope(t *testing.T, callback func(string, string),
client *common.DatabricksClient, suffix, secret string) {
randomScope := "test" + suffix
randomKey := "key" + suffix
ctx := context.Background()
secretScopes := access.NewSecretScopesAPI(ctx, client)
err := secretScopes.Create(access.SecretScope{
Name: randomScope,
InitialManagePrincipal: "users",
})
require.NoError(t, err)
defer func() {
err = secretScopes.Delete(randomScope)
assert.NoError(t, err)
}()
secrets := access.NewSecretsAPI(ctx, client)
err = secrets.Create(secret, randomScope, randomKey)
require.NoError(t, err)
callback(randomScope, randomKey)
}
func testMounting(t *testing.T, mp MountPoint, m Mount) {
client := common.CommonEnvironmentClient()
source, err := mp.Mount(m, client)
assert.Equal(t, m.Source(), source)
assert.NoError(t, err)
defer func() {
err = mp.Delete()
assert.NoError(t, err)
}()
source, err = mp.Source()
require.Equalf(t, m.Source(), source, "Error: %v", err)
}
func TestAccSourceOnInvalidMountFails(t *testing.T) {
_, mp := mountPointThroughReusedCluster(t)
source, err := mp.Source()
assert.Equal(t, "", source)
qa.AssertErrorStartsWith(t, err, "Mount not found")
}
func TestAccInvalidSecretScopeFails(t *testing.T) {
_, mp := mountPointThroughReusedCluster(t)
client := common.CommonEnvironmentClient()
source, err := mp.Mount(AzureADLSGen1Mount{
ClientID: "abc",
TenantID: "bcd",
PrefixType: "dfs.adls",
StorageResource: "def",
Directory: "/",
SecretKey: "key",
SecretScope: "y",
}, client)
assert.Equal(t, "", source)
qa.AssertErrorStartsWith(t, err, "Secret does not exist with scope: y and key: key")
}
func TestValidateMountDirectory(t *testing.T) {
testCases := []struct {
directory string
errorCount int
}{
{"", 0},
{"/directory", 0},
{"directory", 1},
}
for _, tc := range testCases {
_, errs := ValidateMountDirectory(tc.directory, "key")
assert.Lenf(t, errs, tc.errorCount, "directory '%s' does not generate the expected error count", tc.directory)
}
}
const expectedCommandResp = "done"
func testMountFuncHelper(t *testing.T, mountFunc func(mp MountPoint, mount Mount) (string, error), mount Mount,
mountName, expectedCommand string) {
c := common.DatabricksClient{
Host: ".",
Token: ".",
}
err := c.Configure()
assert.NoError(t, err)
var called bool
c.WithCommandMock(func(commandStr string) common.CommandResults {
called = true
assert.Equal(t, internal.TrimLeadingWhitespace(expectedCommand), internal.TrimLeadingWhitespace(commandStr))
return common.CommandResults{
ResultType: "text",
Data: expectedCommandResp,
}
})
ctx := context.Background()
mp := MountPoint{
exec: c.CommandExecutor(ctx),
clusterID: "random_cluster_id",
name: mountName,
}
resp, err := mountFunc(mp, mount)
assert.NoError(t, err)
assert.True(t, called, "mocked command was not invoked")
assert.Equal(t, expectedCommandResp, resp)
}
type mockMount struct{}
func (t mockMount) Source() string { return "fake-mount" }
func (t mockMount) Name() string { return "fake-mount" }
func (t mockMount) Config(client *common.DatabricksClient) map[string]string {
return map[string]string{"fake-key": "fake-value"}
}
func (m mockMount) ValidateAndApplyDefaults(d *schema.ResourceData, client *common.DatabricksClient) error {
return nil
}
func TestMountPoint_Mount(t *testing.T) {
mount := mockMount{}
expectedMountSource := "fake-mount"
expectedMountConfig := `{"fake-key":"fake-value"}`
mountName := "this_mount"
expectedCommand := fmt.Sprintf(`
def safe_mount(mount_point, mount_source, configs, encryptionType):
for mount in dbutils.fs.mounts():
if mount.mountPoint == mount_point and mount.source == mount_source:
return
try:
dbutils.fs.mount(mount_source, mount_point, extra_configs=configs, encryption_type=encryptionType)
dbutils.fs.refreshMounts()
dbutils.fs.ls(mount_point)
return mount_source
except Exception as e:
try:
dbutils.fs.unmount(mount_point)
except Exception as e2:
print("Failed to unmount", e2)
raise e
mount_source = safe_mount("/mnt/%s", %q, %s, "")
dbutils.notebook.exit(mount_source)
`, mountName, expectedMountSource, expectedMountConfig)
testMountFuncHelper(t, func(mp MountPoint, mount Mount) (s string, e error) {
client := common.DatabricksClient{
Host: ".",
Token: ".",
}
return mp.Mount(mount, &client)
}, mount, mountName, expectedCommand)
}
func TestMountPoint_Source(t *testing.T) {
mountName := "this_mount"
expectedCommand := fmt.Sprintf(`
dbutils.fs.refreshMounts()
for mount in dbutils.fs.mounts():
if mount.mountPoint == "/mnt/%s":
dbutils.notebook.exit(mount.source)
raise Exception("Mount not found")
`, mountName)
testMountFuncHelper(t, func(mp MountPoint, mount Mount) (s string, e error) {
return mp.Source()
}, nil, mountName, expectedCommand)
}
func TestMountPoint_Delete(t *testing.T) {
mountName := "this_mount"
expectedCommand := fmt.Sprintf(`
found = False
mount_point = "/mnt/%s"
dbutils.fs.refreshMounts()
for mount in dbutils.fs.mounts():
if mount.mountPoint == mount_point:
found = True
if not found:
dbutils.notebook.exit("success")
dbutils.fs.unmount(mount_point)
dbutils.fs.refreshMounts()
for mount in dbutils.fs.mounts():
if mount.mountPoint == mount_point:
raise Exception("Failed to unmount")
dbutils.notebook.exit("success")
`, mountName)
testMountFuncHelper(t, func(mp MountPoint, mount Mount) (s string, e error) {
return expectedCommandResp, mp.Delete()
}, nil, mountName, expectedCommand)
}
func TestDeletedMountClusterRecreates(t *testing.T) {
qa.HTTPFixturesApply(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Status: 404,
},
{
Method: "GET",
ReuseRequest: true,
Resource: "/api/2.0/clusters/list",
Response: map[string]interface{}{},
},
{
Method: "GET",
ReuseRequest: true,
Resource: "/api/2.0/clusters/spark-versions",
Response: compute.SparkVersionsList{
SparkVersions: []compute.SparkVersion{
{
Version: "7.1.x-cpu-ml-scala2.12",
Description: "7.1 ML (includes Apache Spark 3.0.0, Scala 2.12)",
},
},
},
},
{
Method: "GET",
ReuseRequest: true,
Resource: "/api/2.0/clusters/list-node-types",
Response: compute.NodeTypeList{
NodeTypes: []compute.NodeType{
{
NodeTypeID: "Standard_F4s",
InstanceTypeID: "Standard_F4s",
MemoryMB: 8192,
NumCores: 4,
NodeInstanceType: &compute.NodeInstanceType{
LocalDisks: 1,
InstanceTypeID: "Standard_F4s",
LocalDiskSizeGB: 16,
LocalNVMeDisks: 0,
},
},
},
},
},
{
Method: "POST",
ReuseRequest: true,
Resource: "/api/2.0/clusters/create",
ExpectedRequest: compute.Cluster{
AutoterminationMinutes: 10,
ClusterName: "terraform-mount",
NodeTypeID: "Standard_F4s",
SparkVersion: "7.3.x-scala2.12",
CustomTags: map[string]string{
"ResourceClass": "SingleNode",
},
SparkConf: map[string]string{
"spark.databricks.cluster.profile": "singleNode",
"spark.master": "local[*]",
"spark.scheduler.mode": "FIFO",
},
},
Response: compute.ClusterID{
ClusterID: "bcd",
},
},
{
Method: "GET",
ReuseRequest: true,
Resource: "/api/2.0/clusters/get?cluster_id=bcd",
Response: compute.ClusterInfo{
ClusterID: "bcd",
State: "RUNNING",
SparkConf: map[string]string{
"spark.databricks.acl.dfAclsEnabled": "true",
"spark.databricks.cluster.profile": "singleNode",
"spark.scheduler.mode": "FIFO",
},
},
},
}, func(ctx context.Context, client *common.DatabricksClient) {
clusterID, err := getMountingClusterID(ctx, client, "abc")
assert.NoError(t, err)
assert.Equal(t, "bcd", clusterID)
})
}
func TestOldMountImplementations(t *testing.T) {
n := "test"
m1 := AzureADLSGen2Mount{ContainerName: n}
assert.Equal(t, m1.Name(), n)
assert.Nil(t, m1.ValidateAndApplyDefaults(nil, nil))
m2 := AzureBlobMount{ContainerName: n}
assert.Equal(t, m2.Name(), n)
assert.Nil(t, m2.ValidateAndApplyDefaults(nil, nil))
m3 := AzureADLSGen1Mount{StorageResource: n}
assert.Equal(t, m3.Name(), n)
assert.Nil(t, m3.ValidateAndApplyDefaults(nil, nil))
m4 := AWSIamMount{S3BucketName: n}
assert.Equal(t, m4.Name(), n)
assert.Nil(t, m4.ValidateAndApplyDefaults(nil, nil))
}
|
package keptn
import (
"errors"
"fmt"
"github.com/keptn-contrib/dynatrace-service/internal/common"
keptnmodels "github.com/keptn/go-utils/pkg/api/models"
api "github.com/keptn/go-utils/pkg/api/utils"
log "github.com/sirupsen/logrus"
)
// ConfigResourceClientInterface defines the methods for interacting with resources of Keptn's configuration service
type ConfigResourceClientInterface interface {
GetResource(project string, stage string, service string, resourceURI string) (string, error)
GetProjectResource(project string, resourceURI string) (string, error)
GetStageResource(project string, stage string, resourceURI string) (string, error)
GetServiceResource(project string, stage string, service string, resourceURI string) (string, error)
UploadResource(contentToUpload []byte, remoteResourceURI string, project string, stage string, service string) error
}
// ResourceError represents an error for a resource that was not found
type ResourceError struct {
uri string
project string
stage string
service string
}
// ResourceNotFoundError represents an error for a resource that was not found
type ResourceNotFoundError ResourceError
// Error returns a string representation of this error
func (e *ResourceNotFoundError) Error() string {
return fmt.Sprintf("could not find resource: '%s' %s", e.uri, getLocation(e.service, e.stage, e.project))
}
// ResourceEmptyError represents an error for a resource that was found, but is empty
type ResourceEmptyError ResourceError
// Error returns a string representation of this error
func (e *ResourceEmptyError) Error() string {
return fmt.Sprintf("found resource: '%s' %s, but it is empty", e.uri, getLocation(e.service, e.stage, e.project))
}
// ResourceUploadFailedError represents an error for a resource that could not be uploaded
type ResourceUploadFailedError struct {
ResourceError
message string
}
// Error returns a string representation of this error
func (e *ResourceUploadFailedError) Error() string {
return fmt.Sprintf("could not upload resource: '%s' %s: %s", e.uri, getLocation(e.service, e.stage, e.project), e.message)
}
// ResourceRetrievalFailedError represents an error for a resource that could not be retrieved because of an error
type ResourceRetrievalFailedError struct {
ResourceError
message string
}
// Error returns a string representation of this error
func (e *ResourceRetrievalFailedError) Error() string {
return fmt.Sprintf("could not retrieve resource: '%s' %s: %s", e.uri, getLocation(e.service, e.stage, e.project), e.message)
}
func getLocation(service string, stage string, project string) string {
var location string
if service != "" {
location += fmt.Sprintf(" for service '%s'", service)
}
if stage != "" {
location += fmt.Sprintf(" at stage '%s'", stage)
}
if project != "" {
location += fmt.Sprintf(" of project '%s'", project)
}
return location
}
// ConfigResourceClient is the default implementation for the ConfigResourceClientInterface using a Keptn api.ResourceHandler
type ConfigResourceClient struct {
handler *api.ResourceHandler
}
// NewDefaultConfigResourceClient creates a new ResourceClient with a default Keptn resource handler for the configuration service
func NewDefaultConfigResourceClient() *ConfigResourceClient {
return NewConfigResourceClient(
api.NewResourceHandler(common.GetConfigurationServiceURL()))
}
// NewConfigResourceClient creates a new ResourceClient with a Keptn resource handler for the configuration service
func NewConfigResourceClient(handler *api.ResourceHandler) *ConfigResourceClient {
return &ConfigResourceClient{
handler: handler,
}
}
// GetResource tries to find the first instance of a given resource on service, stage or project level
func (rc *ConfigResourceClient) GetResource(project string, stage string, service string, resourceURI string) (string, error) {
var rnfErrorType *ResourceNotFoundError
if project != "" && stage != "" && service != "" {
keptnResourceContent, err := rc.GetServiceResource(project, stage, service, resourceURI)
if errors.As(err, &rnfErrorType) {
log.WithFields(
log.Fields{
"project": project,
"stage": stage,
"service": service,
}).Debugf("%s not available for service", resourceURI)
} else if err != nil {
return "", err
} else {
log.WithFields(
log.Fields{
"project": project,
"stage": stage,
"service": service,
}).Infof("Found %s for service", resourceURI)
return keptnResourceContent, nil
}
}
if project != "" && stage != "" {
keptnResourceContent, err := rc.GetStageResource(project, stage, resourceURI)
if errors.As(err, &rnfErrorType) {
log.WithFields(
log.Fields{
"project": project,
"stage": stage,
}).Debugf("%s not available for stage", resourceURI)
} else if err != nil {
return "", err
} else {
log.WithFields(
log.Fields{
"project": project,
"stage": stage,
}).Infof("Found %s for stage", resourceURI)
return keptnResourceContent, nil
}
}
if project != "" {
keptnResourceContent, err := rc.GetProjectResource(project, resourceURI)
if err == api.ResourceNotFoundError {
log.WithField("project", project).Debugf("%s not available for project", resourceURI)
} else if err != nil {
return "", err
} else {
log.WithField("project", project).Infof("Found %s for project", resourceURI)
return keptnResourceContent, nil
}
}
log.Infof("%s not found", resourceURI)
return "", &ResourceNotFoundError{uri: resourceURI, project: project, stage: stage, service: service}
}
// GetServiceResource tries to retrieve a resourceURI on service level
func (rc *ConfigResourceClient) GetServiceResource(project string, stage string, service string, resourceURI string) (string, error) {
return getResourceByFunc(
func() (*keptnmodels.Resource, error) {
return rc.handler.GetServiceResource(project, stage, service, resourceURI)
},
func() *ResourceNotFoundError {
return &ResourceNotFoundError{uri: resourceURI, project: project, stage: stage, service: service}
},
func(msg string) *ResourceRetrievalFailedError {
return &ResourceRetrievalFailedError{ResourceError{uri: resourceURI, project: project, stage: stage, service: service}, msg}
},
func() *ResourceEmptyError {
return &ResourceEmptyError{uri: resourceURI, project: project, stage: stage, service: service}
})
}
// GetStageResource tries to retrieve a resourceURI on stage level
func (rc *ConfigResourceClient) GetStageResource(project string, stage string, resourceURI string) (string, error) {
return getResourceByFunc(
func() (*keptnmodels.Resource, error) { return rc.handler.GetStageResource(project, stage, resourceURI) },
func() *ResourceNotFoundError {
return &ResourceNotFoundError{uri: resourceURI, project: project, stage: stage}
},
func(msg string) *ResourceRetrievalFailedError {
return &ResourceRetrievalFailedError{ResourceError{uri: resourceURI, project: project, stage: stage}, msg}
},
func() *ResourceEmptyError {
return &ResourceEmptyError{uri: resourceURI, project: project, stage: stage}
})
}
// GetProjectResource tries to retrieve a resourceURI on project level
func (rc *ConfigResourceClient) GetProjectResource(project string, resourceURI string) (string, error) {
return getResourceByFunc(
func() (*keptnmodels.Resource, error) { return rc.handler.GetProjectResource(project, resourceURI) },
func() *ResourceNotFoundError { return &ResourceNotFoundError{uri: resourceURI, project: project} },
func(msg string) *ResourceRetrievalFailedError {
return &ResourceRetrievalFailedError{ResourceError{uri: resourceURI, project: project}, msg}
},
func() *ResourceEmptyError { return &ResourceEmptyError{uri: resourceURI, project: project} })
}
func getResourceByFunc(
resFunc func() (*keptnmodels.Resource, error),
rnfErrFunc func() *ResourceNotFoundError,
rrfErrFunc func(msg string) *ResourceRetrievalFailedError,
reErrFunc func() *ResourceEmptyError) (string, error) {
resource, err := resFunc()
if err != nil {
if err == api.ResourceNotFoundError {
return "", rnfErrFunc()
}
return "", rrfErrFunc(err.Error())
}
if resource.ResourceContent == "" {
return "", reErrFunc()
}
return resource.ResourceContent, nil
}
// UploadResource tries to upload a resourceURI on service level
func (rc *ConfigResourceClient) UploadResource(contentToUpload []byte, remoteResourceURI string, project string, stage string, service string) error {
resources := []*keptnmodels.Resource{{ResourceContent: string(contentToUpload), ResourceURI: &remoteResourceURI}}
_, err := rc.handler.CreateResources(project, stage, service, resources)
if err != nil {
return &ResourceUploadFailedError{
ResourceError{
uri: remoteResourceURI,
project: project,
stage: stage,
service: service,
},
err.GetMessage(),
}
}
log.WithField("remoteResourceURI", remoteResourceURI).Info("Uploaded file")
return nil
}
|
package routes
import (
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"net"
"net/http"
"os"
"strings"
"ark/store"
)
const (
routesCmd = "routes"
backendsCmd = "backends"
)
var errNotImplemented = errors.New("not implemented")
// CanRun ...
func CanRun(args []string) bool {
return args[0] == routesCmd || args[0] == backendsCmd
}
// Run ...
func Run(laddr net.Addr, args []string) {
switch args[0] {
case routesCmd:
runRoutes(laddr, args)
case backendsCmd:
runBackends(laddr, args)
default:
fmt.Fprintf(os.Stderr, "'%s' is not a command", args[1])
os.Exit(1)
}
}
func routesUsage() {
// TODO(knorton): Fix this.
fmt.Fprintln(os.Stderr, "routes usage")
os.Exit(1)
}
// urlFor produces a URL from the address and the uri
func urlFor(laddr net.Addr, uri string) string {
return fmt.Sprintf("http://%s%s", laddr.String(), uri)
}
func decodeJSON(res *http.Response, data interface{}) error {
switch res.StatusCode {
case http.StatusOK:
return json.NewDecoder(res.Body).Decode(data)
case http.StatusNoContent:
return nil
}
var e struct {
Error string `json:"error"`
}
if err := json.NewDecoder(res.Body).Decode(&e); err != nil {
return fmt.Errorf("%d: %s", res.StatusCode,
http.StatusText(res.StatusCode))
}
return errors.New(e.Error)
}
func getJSON(laddr net.Addr, uri string, dst interface{}) error {
res, err := http.Get(urlFor(laddr, uri))
if err != nil {
return err
}
defer res.Body.Close()
return decodeJSON(res, dst)
}
func postJSON(laddr net.Addr, uri string, src, dst interface{}) error {
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(src); err != nil {
return err
}
res, err := http.Post(
fmt.Sprintf("http://%s%s", laddr.String(), uri),
"application/json",
&buf)
if err != nil {
return err
}
defer res.Body.Close()
return decodeJSON(res, dst)
}
func errorLn(msg string) {
fmt.Fprintln(os.Stderr, msg)
os.Exit(1)
}
func errorf(format string, args ...interface{}) {
fmt.Fprintf(os.Stderr, format, args...)
os.Exit(1)
}
func deleteRoute(laddr net.Addr, args []string) {
req, err := http.NewRequest(
"DELETE",
urlFor(laddr, fmt.Sprintf("/api/v1/routes/%s", args[0])),
nil)
if err != nil {
errorLn(err.Error())
}
var c http.Client
res, err := c.Do(req)
if err != nil {
errorLn(err.Error())
}
defer res.Body.Close()
if err := decodeJSON(res, nil); err != nil {
errorLn(err.Error())
}
}
func createRoutes(laddr net.Addr, args []string) {
f := flag.NewFlagSet("create-routes", flag.PanicOnError)
flagPort := f.Int("port", 80, "tcp port")
f.Parse(args)
if f.NArg() < 2 {
errorLn("routes create help")
}
rt := store.Route{
Name: f.Arg(0),
Port: int32(*flagPort),
Hosts: f.Args()[1:],
}
if err := postJSON(laddr, "/api/v1/routes", &rt, &rt); err != nil {
errorLn(err.Error())
}
fmt.Println(rt.Name)
}
func listRoutes(laddr net.Addr, args []string) {
var rts []*store.Route
if err := getJSON(laddr, "/api/v1/routes", &rts); err != nil {
errorLn(err.Error())
}
fmt.Printf("%- 15s % 5s %- 30s %-30s\n", "NAME", "PORT", "HOSTS", "BACKENDS")
for _, rt := range rts {
fmt.Printf("%- 15s % 5d %- 30s %- 30s\n",
rt.Name,
rt.Port,
strings.Join(rt.Hosts, ","),
strings.Join(rt.Backends, ","))
}
}
func runRoutes(laddr net.Addr, args []string) {
if len(args) < 2 {
routesUsage()
}
switch args[1] {
case "create":
createRoutes(laddr, args[2:])
case "rm":
deleteRoute(laddr, args[2:])
case "ls":
listRoutes(laddr, args[2:])
default:
errorf("'%s' is not a routes command.\n", args[1])
}
}
func backendsUsage() {
fmt.Fprintln(os.Stderr, "backends usage")
os.Exit(1)
}
func setBackends(laddr net.Addr, name string, args []string) {
var bes []string
if err := postJSON(
laddr,
fmt.Sprintf("/api/v1/routes/%s/backends", name),
&args,
&bes); err != nil {
errorLn(err.Error())
}
for _, be := range bes {
fmt.Println(be)
}
}
func getBackends(laddr net.Addr, name string) {
var bes []string
if err := getJSON(
laddr,
fmt.Sprintf("/api/v1/routes/%s/backends", name),
&bes); err != nil {
errorLn(err.Error())
}
for _, be := range bes {
fmt.Println(be)
}
}
func runBackends(laddr net.Addr, args []string) {
if len(args) < 3 {
backendsUsage()
}
switch args[2] {
case "set":
setBackends(laddr, args[1], args[3:])
case "get":
getBackends(laddr, args[1])
default:
errorf("'%s' is not a backends command.\n", args[2])
}
}
|
package partition_test
import (
"fmt"
"testing"
"time"
"github.com/Workiva/go-datastructures/queue"
"github.com/stretchr/testify/assert"
"github.com/zhuangzhi/go-programming/partition"
)
func TestPartitionTable(t *testing.T) {
table := partition.NewPartitionTable(1024)
for i := 0; i < 1024; i++ {
for j := 0; j < 1024; j++ {
assert.True(t, table.Add(i, j))
}
}
assert.False(t, table.Add(1, 1))
table.Reset()
}
func TestDistributeUnownedPartitions(t *testing.T) {
partitionNumber := 271
nodes := queue.NewRingBuffer(1024)
ids := make([]int, partitionNumber)
for i := 0; i < partitionNumber; i++ {
ids[i] = i
}
partition.Shuffle(ids)
partitionIDs := queue.NewRingBuffer(1024)
for i := 0; i < 30; i++ {
nodes.Put(&partition.Node{
Replica: partition.Replica{
Address: fmt.Sprint(i),
UUID: fmt.Sprint(i),
},
Table: partition.NewPartitionTable(300),
})
}
for i := 0; i < partitionNumber; i++ {
partitionIDs.Put(ids[i])
}
partition.DistributeUnownedPartitions(nodes, partitionIDs, 0)
for {
n, err := nodes.Poll(time.Millisecond)
if err != nil {
break
}
node := n.(*partition.Node)
ids := node.Table.GetPartitions(0).Ids()
fmt.Printf("Node: %v, Count: %v, partitions:%v\n", node.Replica.Address, len(ids), ids)
}
}
|
package main
import (
"fmt"
"math"
)
type Point struct {
X, Y float64
}
func (p Point) Distance(q Point) float64 {
return math.Hypot(q.X-p.X, q.Y-p.Y)
}
// 指针方式的接收器
func (p *Point) ScaleBy(factor float64) {
p.X *= factor
p.Y *= factor
}
type Line struct {
Start Point
End Point
// Length float64
}
func (L Line) Length() float64 {
return L.Start.Distance(L.End)
}
type Path []Point
func (path Path) Distance() float64 {
sum := 0.0
for i := range path {
if i > 0 {
sum += path[i-1].Distance(path[i])
}
}
return sum
}
func main() {
p := Point{1, 2}
q := Point{4, 6}
fmt.Println(p.Distance(q))
line := Line{p, q}
fmt.Println(line.Length())
// 计算三角形周长
perim := Path{
{1, 1},
{5, 1},
{5, 4},
{1, 1},
}
fmt.Println(perim.Distance())
p.ScaleBy(200)
fmt.Println(p)
r := &Point{1, 2}
r.ScaleBy(2)
fmt.Println(*r)
p = Point{1, 2}
pptr := &p
pptr.ScaleBy(2)
fmt.Println(p) // {2, 4}
p2 := Point{1, 2}
(&p2).ScaleBy(2)
fmt.Println(p2) // {2, 4}
// 方法变量
a := Point{1, 2}
b := Point{4, 6}
distanceFromA := a.Distance
fmt.Println(distanceFromA(b)) // 5
origin := Point{0, 0}
fmt.Println(distanceFromA(origin)) // 2.23606797749979
scaleA := a.ScaleBy
scaleA(2)
fmt.Println(a)
distance := Point.Distance // 方法表达式 由Point类型的方法赋予
fmt.Println(distance(a, b))
fmt.Printf("%T\n", distance)
// scale := Point.ScaleBy // 编译报错:nvalid method expression Point.ScaleBy (needs pointer receiver: (*Point).ScaleBy
scale := (*Point).ScaleBy
scale(&a, 2)
fmt.Println(a)
fmt.Printf("%T\n", scale)
}
|
package leetcode
func isHappy(n int) bool {
nmap := make(map[int]int)
var sum int
for {
for _, v := range strconv.Itoa(n) {
num, _ := strconv.Atoi(string(v))
square := num * num
sum += square
_, ok := nmap[sum]
if ok {
return false
}
}
if sum == 1 {
return true
}
nmap[sum] = 1
n = sum
sum = 0
}
}
|
package heaps
import (
"fmt"
"reflect"
"testing"
)
func TestHeap(t *testing.T) {
array := []int{9, 7, 8, 5, 6, 4, 3, 2, 0, 1}
maxHeapify(array, 0, len(array))
fmt.Printf("Max Heap:\n")
String(array)
sortedArray := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
HeapSort(array)
if !reflect.DeepEqual(array, sortedArray) {
t.Errorf("Expected %v but got %v\n", sortedArray, array)
}
fmt.Printf("Sorted array:\n")
String(array)
}
|
package main
import (
"log"
"net/http"
"github.com/gorilla/mux"
controller "./controller"
help "./helper"
model "./model"
config "./config"
)
func main() {
model.InitBdd()
defer model.Bdd.Close()
router := mux.NewRouter()
/**
ENDPOINT /ping
*/
{
/**
* @api {get} /ping Get
* @apiDescription Ping le serveur pour vérifier qu'il fonctionne
* @apiGroup Ping
*/
router.HandleFunc("/ping", Ping).Methods("GET")
/**
* @api {post} /ping Post
* @apiDescription Ping le serveur pour vérifier qu'il fonctionne
* @apiGroup Ping
*/
router.HandleFunc("/ping", Ping).Methods("POST")
/**
* @api {put} /ping Put
* @apiDescription Ping le serveur pour vérifier qu'il fonctionne
* @apiGroup Ping
*/
router.HandleFunc("/ping", Ping).Methods("PUT")
/**
* @api {delete} /ping Delete
* @apiDescription Ping le serveur pour vérifier qu'il fonctionne
* @apiGroup Ping
*/
router.HandleFunc("/ping", Ping).Methods("DELETE")
/**
* @api {options} /ping Options
* @apiDescription Renvoie la liste des méthodes autorisées
* @apiGroup Ping
*/
router.HandleFunc("/ping", OptionsPing).Methods("OPTIONS")
}
/**
ENDPOINT /ressource
*/
{
/**
* @api {get} /ressource?id={id} Structure
* @apiDescription Récupère la structure complète d'une ressource
* @apiGroup Ressource
*
* @apiParam {Number} id L'id de la ressource
* @apiSuccess {Object} Ressource Les informations de la ressource
* @apiSuccessExample {json} Exemple de retour :
* {
* "id": 1,
* "nom": "user",
* "createur": "2alheure",
* "date_creation": "2018-05-20 15:27:38",
* "champs": [
* {
* "id": 3,
* "clef": "age",
* "regle": {
* "id": 11,
* "nom": "Compris entre",
* "parametres": [
* {
* "id": 13,
* "type": "float",
* "value": "0"
* },
* {
* "id": 10,
* "type": "float",
* "value": "100"
* }
* ]
* }
* },
* {
* "id": 1,
* "clef": "nom",
* "regle": {
* "id": 10,
* "nom": "Dictionnaire",
* "parametres": [
* {
* "id": 8,
* "type": "string",
* "value": "nom"
* }
* ]
* }
* },
* {
* "id": 2,
* "clef": "prenom",
* "regle": {
* "id": 10,
* "nom": "Dictionnaire",
* "parametres": [
* {
* "id": 9,
* "type": "string",
* "value": "prenom"
* }
* ]
* }
* },
* {
* "id": 4,
* "clef": "sexe",
* "regle": {
* "id": 1,
* "nom": "Regex",
* "parametres": [
* {
* "id": 11,
* "type": "string",
* "value": "[Homme|Femme]"
* }
* ]
* }
* },
* {
* "id": 5,
* "clef": "ville",
* "regle": {
* "id": 10,
* "nom": "Dictionnaire",
* "parametres": [
* {
* "id": 12,
* "type": "string",
* "value": "ville"
* }
* ]
* }
* }
* ]
* }
*/
router.HandleFunc("/ressource", controller.GetRessource).Methods("GET")
/**
* @api {post} /ressource Créer
* @apiDescription Crée une ressource
* @apiGroup Ressource
*
* @apiParam {String} nom Le nom de la ressource
* @apiParam {String} createur Le nom du créateur de la ressource
*
* @apiSuccess (Created 201) {Object} Ressource Les informations de la ressource nouvellement créée
* @apiSuccessExample {json} Exemple de retour :
* {
* "id": 6,
* "nom": "ressource",
* "createur": "frambur",
* "date_creation": "2018-05-28 22:52:26"
* }
*/
router.HandleFunc("/ressource", controller.CreateRessource).Methods("POST")
/**
* @api {put} /ressource Modifier
* @apiDescription Modifie une ressource
* @apiGroup Ressource
*
* @apiParam {String} id L'id de la ressource à modifier
* @apiParam {String} [nom] Le nouveau nom de la ressource
* @apiParam {Number} [createur] Le nom de la personne à qui réaffecter la création de la ressource
*/
router.HandleFunc("/ressource", controller.ModifyRessource).Methods("PUT")
/**
* @api {delete} /ressource Supprimer
* @apiDescription Supprime une ressource
* @apiGroup Ressource
*
* @apiParam {Number} id L'id de la ressource à supprimer
*/
router.HandleFunc("/ressource", controller.DeleteRessource).Methods("DELETE")
/**
* @api {options} /ressource Options
* @apiDescription Renvoie la liste des méthodes autorisées
* @apiGroup Ressource
*/
router.HandleFunc("/ressource", controller.OptionsRessource).Methods("OPTIONS")
}
/**
ENDPOINT /ressources
*/
{
/**
* @api {get} /ressources?max={max} Lister
* @apiDescription Récupère les informations minimales des ressources
* @apiGroup Ressources
*
* @apiParam {Number} max Le nombre maximum de ressources à récupérer
* @apiSuccess {Object} Regle[] Les informations minimales des ressources
* @apiSuccessExample {json} Exemple de retour :
* [
* {
* "id": 1,
* "nom": "user",
* "createur": "2alheure",
* "date_creation": "2018-05-20 15:27:38"
* },
* {
* "id": 2,
* "nom": "machin",
* "createur": "frambur",
* "date_creation": "2018-05-20 17:34:43"
* }
* ]
*/
router.HandleFunc("/ressources", controller.GetRessources).Methods("GET")
/**
* @api {options} /ressources Options
* @apiDescription Renvoie la liste des méthodes autorisées
* @apiGroup Ressources
*/
router.HandleFunc("/ressources", controller.OptionsRessources).Methods("OPTIONS")
}
/**
ENDPOINT /champs
*/
{
/**
* @api {get} /champs?max={max} Lister
* @apiDescription Récupère les informations minimales des champs
* @apiGroup Champs
*
* @apiParam {Number} max Le nombre maximum de champs à récupérer
* @apiSuccess {Object} Champ[] Les informations minimales des champs
* @apiSuccessExample {json} Exemple de retour :
* [
* {
* "id": 1,
* "clef": "nom"
* },
* {
* "id": 2,
* "clef": "prenom"
* },
* {
* "id": 3,
* "clef": "age"
* },
* {
* "id": 4,
* "clef": "sexe"
* },
* {
* "id": 5,
* "clef": "ville"
* }
* ]
*/
router.HandleFunc("/champs", controller.GetChamps).Methods("GET")
/**
* @api {options} /champs Options
* @apiDescription Renvoie la liste des méthodes autorisées
* @apiGroup Champs
*/
router.HandleFunc("/champs", controller.OptionsChamps).Methods("OPTIONS")
}
/**
ENDPOINT /champ
*/
{
/**
* @api {get} /champ?id={id} Structure
* @apiDescription Récupère la structure complète d'un champ
* @apiGroup Champ
*
* @apiParam {Number} id L'id du champ
* @apiSuccess {Object} Champ Les informations du champ
* @apiSuccessExample {json} Exemple de retour :
* {
* "id": 9,
* "clef": "machin",
* "ressource_id": null,
* "regle": {
* "id": 11,
* "nom": "Compris entre",
* "parametres": [
* {
* "id": 4,
* "type": "float"
* },
* {
* "id": 4,
* "type": "float"
* }
* ]
* }
* }
*/
router.HandleFunc("/champ", controller.GetChamp).Methods("GET")
/**
* @api {post} /champ Créer
* @apiDescription Crée un champ
* @apiGroup Champ
*
* @apiParam {String} clef La clef du champ
* @apiParam {Number} [ressource_id] L'id de la ressource à laquelle le champ se rattache
*
* @apiSuccess (Created 201) {Object} Champ Les informations du champ nouvellement créé
* @apiSuccessExample {json} Exemple de retour :
* {
* "id": 10,
* "clef": "champ",
* "ressource_id": null
* }
*/
router.HandleFunc("/champ", controller.CreateChamp).Methods("POST")
/**
* @api {put} /champ Modifier
* @apiDescription Modifie un champ
* @apiGroup Champ
*
* @apiParam {String} id L'id du champ à modifier
* @apiParam {String} [clef] La nouvelle clef du champ
* @apiParam {Number} [ressource_id] Le nouvel id de la ressource à laquelle le champ se rattache <br /><br />
* Si vous souhaitez ne plus attacher le champ à une ressource, renseignez la clef et laissez sa valeur vide.
*/
router.HandleFunc("/champ", controller.ModifyChamp).Methods("PUT")
/**
* @api {delete} /champ Supprimer
* @apiDescription Supprime un champ
* @apiGroup Champ
*
* @apiParam {Number} id L'id du champ à supprimer
*/
router.HandleFunc("/champ", controller.DeleteChamp).Methods("DELETE")
/**
* @api {options} /champ Options
* @apiDescription Renvoie la liste des méthodes autorisées
* @apiGroup Champ
*/
router.HandleFunc("/champ", controller.OptionsChamp).Methods("OPTIONS")
}
/**
ENDPOINT /regles
*/
{
/**
* @api {get} /regles?max={max} Lister
* @apiDescription Récupère les informations des règles
* @apiGroup Regles
*
* @apiParam {Number} max Le nombre maximum de regles à récupérer
* @apiSuccess {Object} Regle[] Les informations des règles
* @apiSuccessExample {json} Exemple de retour :
* [
* {
* "id": 7,
* "nom": "Pair"
* },
* {
* "id": 1,
* "nom": "Regex",
* "parametres": [
* {
* "id": 1,
* "type": "string"
* }
* ]
* },
* {
* "id": 2,
* "nom": "Inférieur",
* "parametres": [
* {
* "id": 3,
* "type": "int"
* }
* ]
* }
* ]
*/
router.HandleFunc("/regles", controller.GetRegles).Methods("GET")
/**
* @api {options} /regles Options
* @apiDescription Renvoie la liste des méthodes autorisées
* @apiGroup Regles
*/
router.HandleFunc("/regles", controller.OptionsRegles).Methods("OPTIONS")
}
/**
ENDPOINT /regle
*/
{
/**
* @api {get} /regle?id={id} Structure
* @apiDescription Récupère les informations des règles
* @apiGroup Regle
*
* @apiParam {Number} id L'id de la règle à récupérer
* @apiSuccess {Object} Regle Les informations de al règle
* @apiSuccessExample {json} Exemple de retour :
* {
* "id": 2,
* "nom": "Inférieur",
* "parametres": [
* {
* "id": 3,
* "type": "int"
* }
* ]
* }
*
*/
router.HandleFunc("/regle", controller.GetRegle).Methods("GET")
/**
* @api {post} /regle Assigner
* @apiDescription Assigne une règle à un champ
* @apiGroup Regle
*
* @apiParam {String} regle_id L'id de la règle à assigner
* @apiParam {String} champ_id L'id du champ auquel sera assignée la règle
*/
router.HandleFunc("/regle", controller.AttachRegle).Methods("POST")
/**
* @api {delete} /regle Désassigner
* @apiDescription Désssigne sa règle à un champ
* @apiGroup Regle
*
* @apiParam {String} champ_id L'id du champ duquel désassigner sa règle
*/
router.HandleFunc("/regle", controller.DetachRegle).Methods("DELETE")
/**
* @api {options} /regle Options
* @apiDescription Renvoie la liste des méthodes autorisées
* @apiGroup Regle
*/
router.HandleFunc("/regle", controller.OptionsRegle).Methods("OPTIONS")
}
/**
ENDPOINT /parametres
*/
{
/**
* @api {post} /parametres (Re)définir
* @apiDescription (Re)définit les paramètres de la règle d'un champ donné
* @apiGroup Parametres
*
* @apiParam {String} champ_id L'id du champ duquel les paramètres seront (re)définis
* @apiParam {String[]} parametres Les paramètres <br /><br />
* Les paramètres sont à mettre, en json, dans le corps de la requête.<br /><br />
* Ils doivent être sous forme de tableau de chaînes de caractères.
* @apiParamExample {json} Exemple d'envoi de paramètres :
* [
* "1.3",
* "true",
* "42",
* "Hello, World"
* ]
*/
router.HandleFunc("/parametres", controller.SetParametres).Methods("POST")
/**
* @api {put} /parametres (Re)définir
* @apiDescription (Re)définit les paramètres de la règle d'un champ donné
* @apiGroup Parametres
*
* @apiParam {String} champ_id L'id du champ duquel les paramètres seront (re)définis
* @apiParam {String[]} parametres Les paramètres <br /><br />
* Les paramètres sont à mettre, en json, dans le corps de la requête.<br /><br />
* Ils doivent être sous forme de tableau de chaînes de caractères.
* @apiParamExample {json} Exemple d'envoi de paramètres :
* [
* "1.3",
* "true",
* "42",
* "Hello, World"
* ]
*/
router.HandleFunc("/parametres", controller.SetParametres).Methods("PUT")
/**
* @api {delete} /parametres Réinitialiser
* @apiDescription Réinitialise les paramètres d'une règle d'un champ et remet leur valeur à <code>null</code>
* @apiGroup Parametres
*
* @apiParam {String} champ_id L'id du champ duquel les paramètres seront réinitialisés
*/
router.HandleFunc("/parametres", controller.ResetParametres).Methods("DELETE")
/**
* @api {options} /parametres Options
* @apiDescription Renvoie la liste des méthodes autorisées
* @apiGroup Parametres
*/
router.HandleFunc("/parametres", controller.OptionsParametres).Methods("OPTIONS")
}
/**
ENDPOINT /generate
*/
{
/**
* @api {get} /generate?ressource_id={ressource_id}&nombre={nombre} Générer
* @apiDescription Génère aléatoirement une ou plusieurs ressources
* @apiGroup Generate
*
* @apiParam {Number} ressource_id L'id de la ressource à générer
* @apiParam {Number} [nombre] Le nombre d'instances à renvoyer <br /><br />
* Doit être un nombre entier et positif.<br /><br />
* Si non renseigné, renvoie une seule instance.
* @apiSuccess {[]Ressource} Ressource Un tableau de {nombre} ressource
* @apiSuccessExample {json} Exemple de retour :
* [
* {"clef": 1.3, "key": true, "clef2": 42, "key2": "Hello, World"},
* {"clef": 3.1415, "key": false, "clef2": 1337, "key2": "SDfgmohgkf"}
* ]
*/
router.HandleFunc("/generate", controller.Generate).Methods("GET")
/**
* @api {options} /generate Options
* @apiDescription Renvoie la liste des méthodes autorisées
* @apiGroup Generate
*/
router.HandleFunc("/generate", controller.OptionsGenerate).Methods("OPTIONS")
}
/**
ENDPOINT /
*/
{
/**
* @api {get} / Home
* @apiDescription Renvoie le lien vers la <a href="http://2dtension.fr/alea-data-est">documentation de l'API</a>
* @apiGroup Home
* @apiSuccessExample {json} Retourne :
* {"documentation": "http://2dtension.fr/alea-data-est"}
*/
router.HandleFunc("/", Home).Methods("GET")
/**
* @api {options} / Options
* @apiDescription Renvoie la liste des méthodes autorisées
* @apiGroup Home
*/
router.HandleFunc("/", OptionsHome).Methods("OPTIONS")
}
log.Fatal(http.ListenAndServe(config.Port, router))
}
func Ping(w http.ResponseWriter, r *http.Request) {
help.ReturnJson(w, `"Pong"`)
}
func OptionsPing(w http.ResponseWriter, r *http.Request) {
options := []string{
"GET",
"POST",
"PUT",
"DELETE",
"OPTIONS",
}
help.ReturnOptions(w, options)
}
func Home(w http.ResponseWriter, r *http.Request) {
help.ReturnJson(w, `{"documentation": "http://2dtension.fr/alea-data-est"}`)
}
func OptionsHome(w http.ResponseWriter, r *http.Request) {
options := []string{
"GET",
"OPTIONS",
}
help.ReturnOptions(w, options)
}
|
//go:build stacktrace
// +build stacktrace
package ierrors
import (
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func TestErrors(t *testing.T) {
var errWithStacktrace *errorWithStacktrace
// check that there is no stacktrace included
err1 := New("err1")
require.False(t, Is(err1, &errorWithStacktrace{}))
// check that there is a stacktrace included
err2 := Errorf("err%d", 2)
require.ErrorAs(t, err2, &errWithStacktrace)
err3 := Wrap(err1, "err3")
require.ErrorAs(t, err3, &errWithStacktrace)
err4 := Wrapf(err1, "%s", "err4")
require.ErrorAs(t, err4, &errWithStacktrace)
err5 := WithStack(err1)
require.ErrorAs(t, err5, &errWithStacktrace)
// check that there is no duplicated stacktrace included
errStacktrace := WithStack(New("errStacktrace"))
require.Equal(t, 1, strings.Count(errStacktrace.Error(), "github.com/iotaledger/hive.go/ierrors.TestErrors"))
err6 := Errorf("err%d: %w", 6, errStacktrace)
require.Equal(t, 1, strings.Count(err6.Error(), "github.com/iotaledger/hive.go/ierrors.TestErrors"))
err7 := Wrap(errStacktrace, "err7")
require.Equal(t, 1, strings.Count(err7.Error(), "github.com/iotaledger/hive.go/ierrors.TestErrors"))
err8 := Wrapf(errStacktrace, "%s", "err8")
require.Equal(t, 1, strings.Count(err8.Error(), "github.com/iotaledger/hive.go/ierrors.TestErrors"))
err9 := WithStack(errStacktrace)
require.Equal(t, 1, strings.Count(err9.Error(), "github.com/iotaledger/hive.go/ierrors.TestErrors"))
}
|
package typgo_test
import (
"flag"
"strings"
"testing"
"github.com/stretchr/testify/require"
"github.com/typical-go/typical-go/pkg/typgo"
"github.com/urfave/cli/v2"
)
func TestGoBuild_Command(t *testing.T) {
cmpl := &typgo.GoBuild{}
command := cmpl.Task().CliCommand(&typgo.Descriptor{})
require.Equal(t, "build", command.Name)
require.Equal(t, []string{"b"}, command.Aliases)
require.Equal(t, "build the project", command.Usage)
var out strings.Builder
c := &typgo.Context{
Context: cli.NewContext(nil, &flag.FlagSet{}, nil),
Logger: typgo.Logger{Stdout: &out},
Descriptor: &typgo.Descriptor{
ProjectName: "some-project",
ProjectVersion: "0.0.1",
},
}
defer c.PatchBash([]*typgo.MockCommand{
{CommandLine: "go build -ldflags \"-X github.com/typical-go/typical-go/pkg/typgo.ProjectName=some-project -X github.com/typical-go/typical-go/pkg/typgo.ProjectVersion=0.0.1\" -o bin/some-project ./cmd/some-project"},
})(t)
require.NoError(t, cmpl.Execute(c))
require.Equal(t, "> go build -ldflags \"-X github.com/typical-go/typical-go/pkg/typgo.ProjectName=some-project -X github.com/typical-go/typical-go/pkg/typgo.ProjectVersion=0.0.1\" -o bin/some-project ./cmd/some-project\n", out.String())
}
func TestGoBuild_Predefined(t *testing.T) {
cmpl := &typgo.GoBuild{
MainPackage: "some-package",
Output: "some-output",
Ldflags: typgo.BuildVars{
"some-var": "some-value",
},
}
c := &typgo.Context{
Context: cli.NewContext(nil, &flag.FlagSet{}, nil),
}
defer c.PatchBash([]*typgo.MockCommand{
{CommandLine: "go build -ldflags \"-X some-var=some-value\" -o some-output some-package"},
})(t)
require.NoError(t, cmpl.Execute(c))
}
|
package app
import "errors"
// InvalidRequestError is special error type returned when any request params are invalid.
type InvalidRequestError string
// Error implements error interface.
func (e InvalidRequestError) Error() string {
return string(e)
}
// IsInvalidRequest tells that this error is 'invalid request'.
// Returns always true.
func (InvalidRequestError) IsInvalidRequest() bool {
return true
}
// IsInvalidRequestError checks if given error is caused by invalid request.
func IsInvalidRequestError(err error) bool {
type invalidReqErr interface {
IsInvalidRequest() bool
}
var ie invalidReqErr
if errors.As(err, &ie) {
return ie.IsInvalidRequest()
}
return false
}
// TooManyRequestsError is special error type returned when there's too many request to handle at a time.
type TooManyRequestsError string
// Error implements error interface.
func (e TooManyRequestsError) Error() string {
return string(e)
}
// IsTooManyRequests tells that this error is 'too many requests'.
// Returns always true.
func (TooManyRequestsError) IsTooManyRequests() bool {
return true
}
// IsTooManyRequestsError checks if given error is caused by too many requests.
func IsTooManyRequestsError(err error) bool {
type tooManyReqErr interface {
IsTooManyRequests() bool
}
var ie tooManyReqErr
if errors.As(err, &ie) {
return ie.IsTooManyRequests()
}
return false
}
// ScheduledForLaterError is special error type returned request could not be immediately processed and is scheduled for later.
type ScheduledForLaterError string
// Error implements error interface.
func (e ScheduledForLaterError) Error() string {
return string(e)
}
// IsScheduledForLater tells that this error means 'scheduled for later processing'.
// Returns always true.
func (ScheduledForLaterError) IsScheduledForLater() bool {
return true
}
// IsScheduledForLaterError checks if given error means 'scheduled for later processing'.
func IsScheduledForLaterError(err error) bool {
type scheduledForLaterReqErr interface {
IsScheduledForLater() bool
}
var ie scheduledForLaterReqErr
if errors.As(err, &ie) {
return ie.IsScheduledForLater()
}
return false
}
|
package main
import (
"flag"
"fmt"
"log"
"os"
"text/template"
"github.com/dominicbarnes/terraform-provider-mongodb/mongodb"
"github.com/hashicorp/terraform/terraform"
)
var tmpl = flag.String("template", "", "template file to render with")
var output = flag.String("output", "", "destination file")
func init() {
flag.Parse()
}
func main() {
p := mongodb.Provider()
// TODO: figure out if/how to avoid specifying all of this by hand
req := terraform.ProviderSchemaRequest{
ResourceTypes: []string{"mongodb_document"},
}
s, err := p.GetSchema(&req)
if err != nil {
log.Fatal(err)
}
t := template.Must(template.ParseFiles(*tmpl))
f, err := os.Create(*output)
if err != nil {
log.Fatal(err)
}
if err := t.Execute(f, s); err != nil {
log.Fatal(err)
}
fmt.Printf("rendered %s to %s\n", *tmpl, *output)
}
|
package entities
import (
"encoding/json"
"io/ioutil"
"os"
"testing"
)
var initialSymbol = Symbol{
Text: "Lorum ipsum",
}
func TestItUnmarshalsSymbolJson(t *testing.T) {
data, err := ioutil.ReadFile("testdata/symbol.json")
if err != nil {
panic(err)
}
var symbol Symbol
json.Unmarshal(data, &symbol)
assertSymbol(t, initialSymbol, symbol)
}
func TestItMarshalsSymbolJson(t *testing.T) {
body, err := json.Marshal(initialSymbol)
tmpfile, err := ioutil.TempFile("testdata/", "symbol_*.json")
tmpName := tmpfile.Name()
defer os.Remove(tmpName)
tmpfile.Write(body)
tmpfile.Close()
data, err := ioutil.ReadFile(tmpName)
if err != nil {
panic(err)
}
var symbol Symbol
json.Unmarshal(data, &symbol)
assertSymbol(t, initialSymbol, symbol)
}
func assertSymbol(t *testing.T, initialSymbol Symbol, symbol Symbol) {
if symbol.Text != initialSymbol.Text {
t.Error("Text expected", initialSymbol.Text, "got", symbol.Text)
}
}
|
package graph
import "testing"
func TestPathStringRepresentation(t *testing.T) {
a := &Node{ID: "A"}
b := &Node{ID: "B"}
c := &Node{ID: "C"}
pOne := Path{a, b, c}
pTwo := Path{a, c, b}
pThree := Path{b, a}
pFour := Path{c}
pFive := Path{}
if pOne.String() != "A -> B -> C" {
t.Errorf("Path string representation incorrect. Got: %v, Expected: A -> B -> C", pOne.String())
}
if pTwo.String() != "A -> C -> B" {
t.Errorf("Path string representation incorrect. Got: %v, Expected: A -> C -> B", pTwo.String())
}
if pThree.String() != "B -> A" {
t.Errorf("Path string representation incorrect. Got: %v, Expected: B -> A", pThree.String())
}
if pFour.String() != "C" {
t.Errorf("Path string representation incorrect. Got: %v, Expected: C", pFour.String())
}
if pFive.String() != "<EMPTY PATH>" {
t.Errorf("Path string representation incorrect. Got: %v, Expected: <EMPTY PATH>", pFive.String())
}
}
func TestPathEquality(t *testing.T) {
a := &Node{ID: "A"}
b := &Node{ID: "B"}
c := &Node{ID: "C"}
pOne := Path{a, b, c}
pTwo := Path{a, b, c}
pThree := Path{a, b}
pFour := Path{a, c, b}
if !pOne.Equal(pTwo) {
t.Errorf("%v should have been Equal to %v", pOne, pTwo)
}
if !pTwo.Equal(pOne) {
t.Errorf("%v should have been Equal to %v", pTwo, pOne)
}
if pOne.Equal(pThree) {
t.Errorf("%v should not have been Equal to %v", pOne, pThree)
}
if pOne.Equal(pFour) {
t.Errorf("%v should not have been Equal to %v", pOne, pFour)
}
}
func TestPathContainsNode(t *testing.T) {
a := &Node{ID: "A"}
b := &Node{ID: "B"}
c := &Node{ID: "C"}
d := &Node{ID: "D"}
path := Path{a, b, c}
if !path.Contains(a) {
t.Errorf("%v should contain %v, but it didn't", path, a)
}
if !path.Contains(b) {
t.Errorf("%v should contain %v, but it didn't", path, b)
}
if !path.Contains(c) {
t.Errorf("%v should contain %v, but it didn't", path, c)
}
if path.Contains(d) {
t.Errorf("%v should not contain %v, but it didn't", path, d)
}
}
|
package env
import (
"fmt"
"os"
"strconv"
)
func Get(key string) string {
value := os.Getenv(key)
if len(value) == 0 {
fmt.Printf("Environment variable '%s' not set\n", key)
}
return value
}
func GetBool(key string) bool {
if Get(key) == "yes" {
return true
}
return false
}
func GetInt(key string) int {
val, err := strconv.Atoi(Get(key))
if err != nil {
fmt.Printf("Error while setting env '%s': ", key)
fmt.Println(err)
}
return val
}
|
package ravendb
import (
"encoding/json"
"io"
"strconv"
)
const (
outOfRangeStatus = -1
dropStatus = -2
)
func negotiateProtocolVersion(stream io.Writer, parameters *tcpNegotiateParameters) (*supportedFeatures, error) {
v := parameters.version
currentRef := &v
for {
sendTcpVersionInfo(stream, parameters, *currentRef)
version := parameters.readResponseAndGetVersionCallback(parameters.destinationUrl)
/*
if (logger.isInfoEnabled()) {
logger.info("Read response from " + ObjectUtils.firstNonNull(parameters.getSourceNodeTag(), parameters.getDestinationUrl()) + " for " + parameters.getOperation() + ", received version is '" + version + "'");
}
*/
if version == *currentRef {
break
}
//In this case we usually throw internally but for completeness we better handle it
if version == dropStatus {
return getSupportedFeaturesFor(operationDrop, dropBaseLine), nil
}
status := operationVersionSupported(parameters.operation, version, currentRef)
if status == supportedStatus_OUT_OF_RANGE {
sendTcpVersionInfo(stream, parameters, outOfRangeStatus)
return nil, newIllegalArgumentError("The " + parameters.operation + " version " + strconv.Itoa(parameters.version) + " is out of range, out lowest version is " + strconv.Itoa(*currentRef))
}
/*
if (logger.isInfoEnabled()) {
logger.info("The version " + version + " is " + status + ", will try to agree on '"
+ currentRef.value + "' for " + parameters.getOperation() + " with "
+ ObjectUtils.firstNonNull(parameters.getDestinationNodeTag(), parameters.getDestinationUrl()));
}
*/
}
/*
if (logger.isInfoEnabled()) {
logger.info(ObjectUtils.firstNonNull(parameters.getDestinationNodeTag(), parameters.getDestinationUrl()) + " agreed on version " + currentRef.value + " for " + parameters.getOperation());
}
*/
return getSupportedFeaturesFor(parameters.operation, *currentRef), nil
}
func sendTcpVersionInfo(stream io.Writer, parameters *tcpNegotiateParameters, currentVersion int) error {
/*
if (logger.isInfoEnabled()) {
logger.info("Send negotiation for " + parameters.getOperation() + " in version " + currentVersion);
}
*/
m := map[string]interface{}{
"DatabaseName": parameters.database,
"Operation": parameters.operation,
"SourceNodeTag": parameters.sourceNodeTag,
"OperationVersion": currentVersion,
}
enc := json.NewEncoder(stream)
return enc.Encode(m)
}
|
// sortgen holds the implementations of the most common sorting and permutation algorithms.
package sortgen
import (
"math/rand"
"time"
"github.com/paulidealiste/goalgs/datagen"
"github.com/paulidealiste/goalgs/rangen"
"github.com/paulidealiste/goalgs/utilgen"
)
// Bubble sort proceeds by traversing the target array and compares each pair of
// adjacent items thus sorting/swapping them if needed.
func Bubblesort(inslice []float64) []float64 {
defer utilgen.Timetracker(time.Now(), "Bubblesort")
outslice := make([]float64, len(inslice))
copy(outslice, inslice)
for i := 0; i < len(outslice); i++ {
for j := len(outslice) - 1; j >= i+1; j-- {
if outslice[j] < outslice[j-1] {
utilgen.Swapitems(outslice[j-1 : j+1])
}
}
}
return outslice
}
// Insertsort utilizes the insertion sort algorithm which proceeds by iteration where
// in each iteration step the array element is taken and compared with all of the
// previous elements and insterted in the position when it is found to be less than
// either one of the elements in the target array.
func Insertsort(inslice []float64) []float64 {
defer utilgen.Timetracker(time.Now(), "Insertsort")
outslice := make([]float64, len(inslice))
copy(outslice, inslice)
for j := 1; j < len(outslice); j++ {
key := outslice[j]
i := j - 1
for i >= 0 && outslice[i] > key {
outslice[i+1] = outslice[i]
i = i - 1
}
outslice[i+1] = key
}
return outslice
}
// Mergesort is a dynamic top-level sorting function utilizing divide-and-conquer approach
// where target array is recursively divided to its smallest parts (subarrays arrays of length
// one) and then combined/merged to an ever-longer combined array until all of the subarrays
// are not merged back but all in sorted order (http://austingwalters.com/merge-sort-in-go-golang/).
func Mergesort(inslice []float64) []float64 {
defer utilgen.Timetracker(time.Now(), "Mergesort")
innerslice := make([]float64, len(inslice))
copy(innerslice, inslice)
outslice := msrunner(innerslice)
return outslice
}
func msrunner(inslice []float64) []float64 {
if len(inslice) < 2 {
return inslice
}
l, r := mergesplit(inslice)
return innermerge(msrunner(l), msrunner(r))
}
func mergesplit(inslice []float64) ([]float64, []float64) {
q := len(inslice) / 2
return inslice[:q], inslice[q:]
}
func innermerge(innerleft, innerright []float64) []float64 {
s, l, r := len(innerleft)+len(innerright), 0, 0
innerslice := make([]float64, s, s)
for n := 0; n < s; n++ {
if l > len(innerleft)-1 && r <= len(innerright)-1 {
innerslice[n] = innerright[r]
r++
} else if r > len(innerright)-1 && l <= len(innerleft)-1 {
innerslice[n] = innerleft[l]
l++
} else if innerleft[l] > innerright[r] {
innerslice[n] = innerright[r]
r++
} else {
innerslice[n] = innerleft[l]
l++
}
}
return innerslice
}
// Heapsort uses the max-heap data structure and proceeds from the root node of the heap
// tree, which holds the largest element, while subsequently decreasing the heap.size
// property leaving only ever smaller sub-max-heaps, until all the elements appear in the
// sorted order reflecting the max-heap structure where each parent is larger that either
// of its children.
func Heapsort(inslice []float64) []float64 {
defer utilgen.Timetracker(time.Now(), "Heapsort")
innerslice := make([]float64, len(inslice))
copy(innerslice, inslice)
iheap := datagen.Heapgen(innerslice)
for i := iheap.Length; i >= 1; i-- {
iheap.Inslice[i], iheap.Inslice[0] = iheap.Inslice[0], iheap.Inslice[i]
iheap.Heapsize--
datagen.Maxheapmaintain(&iheap, 0)
}
return iheap.Inslice
}
// Quicksort operates in a manner similar to mergesort but with the specific technique
// used for partitioning the array during the divide step. Partitioning is based on the
// selection of the pivot element around wich the partitioning takes place, i.e. all
// elements smaller than the pivot are being moved to the left side of the pivot
// element (http://stackoverflow.com/questions/15802890/idiomatic-quicksort-in-go).
func Quicksort(inslice []float64) []float64 {
defer utilgen.Timetracker(time.Now(), "Quicksort")
outslice := make([]float64, len(inslice))
copy(outslice, inslice)
quicksortinner(outslice)
return outslice
}
func quicksortinner(inslice []float64) []float64 {
if len(inslice) < 2 {
return inslice
}
q := quickpartition(inslice)
quicksortinner(inslice[:q])
quicksortinner(inslice[q+1:])
return inslice
}
func quickpartition(inslice []float64) int {
pivot := rand.Int() % len(inslice)
l, r := 0, len(inslice)-1
inslice[pivot], inslice[r] = inslice[r], inslice[pivot]
for n := range inslice {
if inslice[n] < inslice[r] {
inslice[n], inslice[l] = inslice[l], inslice[n]
l++
}
}
inslice[l], inslice[r] = inslice[r], inslice[l]
return l
}
// Sortpermute performs randomization of input array elements by utilizing sorting
// of the original array elments according to the array of random priorities.
func Sortpermute(inslice []float64) []float64 {
defer utilgen.Timetracker(time.Now(), "Sortpermute")
outslice := make([]float64, len(inslice))
innerpriority := rangen.Gorpa(len(outslice))
for i, v := range innerpriority {
outslice[i] = inslice[v]
}
return outslice
}
// Inplacepermute randomizes the order of the array elements by swapping randomly
// chosen pairings during one traversing of the original, input array.
func Inplacepermute(inslice []float64) []float64 {
defer utilgen.Timetracker(time.Now(), "Inplacepermute")
outslice := make([]float64, len(inslice))
copy(outslice, inslice)
source := rand.NewSource(time.Now().UnixNano())
randomer := rand.New(source)
for i := len(outslice) - 1; i > 0; i-- {
j := randomer.Intn(i)
outslice[i], outslice[j] = outslice[j], outslice[i]
}
return outslice
}
|
package crawler
import (
"github.com/l-dandelion/cwgo/spider"
"sync"
)
var (
crawler Crawler
once sync.Once
)
type Crawler interface {
GetSpider(name string) spider.Spider
AddSpider(sp spider.Spider) error
DeleteSpider(name string) error
InitSpider(name string) error
StartSpider(name string) error
StopSpider(name string) error
PauseSpider(name string) error
RecoverSpider(name string) error
}
type myCrawler struct {
spiderMapLock sync.RWMutex
spiderMap map[string]spider.Spider
}
/*
* 获取crawler 如果未初始化,先初始化
*/
func New() Crawler {
once.Do(func() {
crawler = &myCrawler{
spiderMap: map[string]spider.Spider{},
}
})
return crawler
}
/*
* 根据爬虫名获取爬虫
*/
func (crawler *myCrawler) GetSpider(name string) spider.Spider {
crawler.spiderMapLock.RLock()
defer crawler.spiderMapLock.RUnlock()
return crawler.spiderMap[name]
}
/*
* 添加爬虫
*/
func (crawler *myCrawler) AddSpider(sp spider.Spider) error {
crawler.spiderMapLock.Lock()
defer crawler.spiderMapLock.Unlock()
sp, ok := crawler.spiderMap[sp.Name()]
if ok {
return ERR_SPIDER_NAME_REPEATED
}
crawler.spiderMap[sp.Name()] = sp
return nil
}
/*
* 删除爬虫
*/
func (crawler *myCrawler) DeleteSpider(name string) error {
crawler.spiderMapLock.Lock()
defer crawler.spiderMapLock.Unlock()
_, ok := crawler.spiderMap[name]
if !ok {
return ERR_SPIDER_NOT_FOUND
}
delete(crawler.spiderMap, name)
return nil
}
/*
* 初始化爬虫
*/
func (crawler *myCrawler) InitSpider(name string) error {
sp := crawler.GetSpider(name)
if sp == nil {
return ERR_SPIDER_NOT_FOUND
}
return sp.Init()
}
/*
* 启动爬虫
*/
func (crawler *myCrawler) StartSpider(name string) error {
sp := crawler.GetSpider(name)
if sp == nil {
return ERR_SPIDER_NOT_FOUND
}
return sp.Start()
}
/*
* 终止爬虫
*/
func (crawler *myCrawler) StopSpider(name string) error {
sp := crawler.GetSpider(name)
if sp == nil {
return ERR_SPIDER_NOT_FOUND
}
return sp.Stop()
}
/*
* 暂停爬虫
*/
func (crawler *myCrawler) PauseSpider(name string) error {
sp := crawler.GetSpider(name)
if sp == nil {
return ERR_SPIDER_NOT_FOUND
}
return sp.Stop()
}
/*
* 恢复爬虫
*/
func (crawler *myCrawler) RecoverSpider(name string) error {
sp := crawler.GetSpider(name)
if sp == nil {
return ERR_SPIDER_NOT_FOUND
}
return sp.Recover()
}
|
package main
import (
"beego_url/controllers"
"github.com/astaxie/beego"
)
func main() {
beego.SetStaticPath("/images", "static/images")
beego.SetStaticPath("/css", "static/css")
beego.SetStaticPath("/js", "static/js")
beego.Router("/:shorturl:string", &controllers.RedirectController{})
beego.Router("/", &controllers.ShortenController{})
beego.Run()
}
|
package durationdata
import (
"io"
"net/http"
"sync"
"github.com/BerryHub/helpers/request"
"github.com/BerryHub/config"
)
// WeatherRemoteData - Definisce il tipo di duration data specifico per il meteo
// implementa RemoteData
type WeatherRemoteData struct{}
var weatherData *DurationData
var onceWeather sync.Once
// GetWeatherData - Restituisce l'istanza di DurationData relativo al meteo
func GetWeatherData() *DurationData {
onceWeather.Do(func() {
config := config.GetCacheConfig()
weatherData = new(DurationData)
weatherData.ddi = WeatherRemoteData{}
weatherData.sleepMinute = config.OpenWeatherMapTimeToRefresh
weatherData.Daemon()
})
return weatherData
}
// EncodeQueryString - Restituisce la query string encodata per eseguire la richiesta remota
func (w WeatherRemoteData) EncodeQueryString(req *http.Request) {
config := config.GetCacheConfig()
q := req.URL.Query()
q.Add("appid", config.OpenWeatherMapAPIToken)
q.Add("units", config.OpenWeatherMapUnits)
q.Add("lat", config.OpenWeatherMapLatitude)
q.Add("lon", config.OpenWeatherMapLongitude)
req.URL.RawQuery = q.Encode()
}
// GetBody - Restituisce il body da inserire in una request
func (w WeatherRemoteData) GetBody() io.Reader {
return nil
}
// GetMethod - Restituisce il metodo della richiesta remota
func (w WeatherRemoteData) GetMethod() string {
return "GET"
}
// GetURL - Restituisce la url della richiesta remota
func (w WeatherRemoteData) GetURL() string {
config := config.GetCacheConfig()
return config.OpenWeatherMapURL
}
// HandlerData - Metodo per il recupero dei dati
func (w WeatherRemoteData) HandlerData() (interface{}, error) {
content, err := request.GetRemoteData(w)
return content, err
}
|
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package inet
import (
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/nsfs"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
)
// Namespace represents a network namespace. See network_namespaces(7).
//
// +stateify savable
type Namespace struct {
inode *nsfs.Inode
// stack is the network stack implementation of this network namespace.
stack Stack `state:"nosave"`
// creator allows kernel to create new network stack for network namespaces.
// If nil, no networking will function if network is namespaced.
//
// At afterLoad(), creator will be used to create network stack. Stateify
// needs to wait for this field to be loaded before calling afterLoad().
creator NetworkStackCreator `state:"wait"`
// isRoot indicates whether this is the root network namespace.
isRoot bool
userNS *auth.UserNamespace
}
// NewRootNamespace creates the root network namespace, with creator
// allowing new network namespaces to be created. If creator is nil, no
// networking will function if the network is namespaced.
func NewRootNamespace(stack Stack, creator NetworkStackCreator, userNS *auth.UserNamespace) *Namespace {
n := &Namespace{
stack: stack,
creator: creator,
isRoot: true,
userNS: userNS,
}
return n
}
// UserNamespace returns the user namespace associated with this namespace.
func (n *Namespace) UserNamespace() *auth.UserNamespace {
return n.userNS
}
// SetInode sets the nsfs `inode` to the namespace.
func (n *Namespace) SetInode(inode *nsfs.Inode) {
n.inode = inode
}
// GetInode returns the nsfs inode associated with this namespace.
func (n *Namespace) GetInode() *nsfs.Inode {
return n.inode
}
// NewNamespace creates a new network namespace from the root.
func NewNamespace(root *Namespace, userNS *auth.UserNamespace) *Namespace {
n := &Namespace{
creator: root.creator,
userNS: userNS,
}
n.init()
return n
}
// Destroy implements nsfs.Namespace.Destroy.
func (n *Namespace) Destroy(ctx context.Context) {
if s := n.Stack(); s != nil {
s.Destroy()
}
}
// Type implements nsfs.Namespace.Type.
func (n *Namespace) Type() string {
return "net"
}
// IncRef increments the Namespace's refcount.
func (n *Namespace) IncRef() {
n.inode.IncRef()
}
// DecRef decrements the Namespace's refcount.
func (n *Namespace) DecRef(ctx context.Context) {
n.inode.DecRef(ctx)
}
// Stack returns the network stack of n. Stack may return nil if no network
// stack is configured.
func (n *Namespace) Stack() Stack {
return n.stack
}
// IsRoot returns whether n is the root network namespace.
func (n *Namespace) IsRoot() bool {
return n.isRoot
}
// RestoreRootStack restores the root network namespace with stack. This should
// only be called when restoring kernel.
func (n *Namespace) RestoreRootStack(stack Stack) {
if !n.isRoot {
panic("RestoreRootStack can only be called on root network namespace")
}
if n.stack != nil {
panic("RestoreRootStack called after a stack has already been set")
}
n.stack = stack
}
func (n *Namespace) init() {
// Root network namespace will have stack assigned later.
if n.isRoot {
return
}
if n.creator != nil {
var err error
n.stack, err = n.creator.CreateStack()
if err != nil {
panic(err)
}
}
}
// afterLoad is invoked by stateify.
func (n *Namespace) afterLoad() {
n.init()
}
// NetworkStackCreator allows new instances of a network stack to be created. It
// is used by the kernel to create new network namespaces when requested.
type NetworkStackCreator interface {
// CreateStack creates a new network stack for a network namespace.
CreateStack() (Stack, error)
}
|
package nebula
import (
"errors"
"net"
"sync"
"sync/atomic"
"time"
"github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/cidr"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
)
// const ProbeLen = 100
const PromoteEvery = 1000
const ReQueryEvery = 5000
const MaxRemotes = 10
// MaxHostInfosPerVpnIp is the max number of hostinfos we will track for a given vpn ip
// 5 allows for an initial handshake and each host pair re-handshaking twice
const MaxHostInfosPerVpnIp = 5
// How long we should prevent roaming back to the previous IP.
// This helps prevent flapping due to packets already in flight
const RoamingSuppressSeconds = 2
const (
Requested = iota
PeerRequested
Established
)
const (
Unknowntype = iota
ForwardingType
TerminalType
)
type Relay struct {
Type int
State int
LocalIndex uint32
RemoteIndex uint32
PeerIp iputil.VpnIp
}
type HostMap struct {
sync.RWMutex //Because we concurrently read and write to our maps
Indexes map[uint32]*HostInfo
Relays map[uint32]*HostInfo // Maps a Relay IDX to a Relay HostInfo object
RemoteIndexes map[uint32]*HostInfo
Hosts map[iputil.VpnIp]*HostInfo
preferredRanges []*net.IPNet
vpnCIDR *net.IPNet
metricsEnabled bool
l *logrus.Logger
}
// For synchronization, treat the pointed-to Relay struct as immutable. To edit the Relay
// struct, make a copy of an existing value, edit the fileds in the copy, and
// then store a pointer to the new copy in both realyForBy* maps.
type RelayState struct {
sync.RWMutex
relays map[iputil.VpnIp]struct{} // Set of VpnIp's of Hosts to use as relays to access this peer
relayForByIp map[iputil.VpnIp]*Relay // Maps VpnIps of peers for which this HostInfo is a relay to some Relay info
relayForByIdx map[uint32]*Relay // Maps a local index to some Relay info
}
func (rs *RelayState) DeleteRelay(ip iputil.VpnIp) {
rs.Lock()
defer rs.Unlock()
delete(rs.relays, ip)
}
func (rs *RelayState) CopyAllRelayFor() []*Relay {
rs.RLock()
defer rs.RUnlock()
ret := make([]*Relay, 0, len(rs.relayForByIdx))
for _, r := range rs.relayForByIdx {
ret = append(ret, r)
}
return ret
}
func (rs *RelayState) GetRelayForByIp(ip iputil.VpnIp) (*Relay, bool) {
rs.RLock()
defer rs.RUnlock()
r, ok := rs.relayForByIp[ip]
return r, ok
}
func (rs *RelayState) InsertRelayTo(ip iputil.VpnIp) {
rs.Lock()
defer rs.Unlock()
rs.relays[ip] = struct{}{}
}
func (rs *RelayState) CopyRelayIps() []iputil.VpnIp {
rs.RLock()
defer rs.RUnlock()
ret := make([]iputil.VpnIp, 0, len(rs.relays))
for ip := range rs.relays {
ret = append(ret, ip)
}
return ret
}
func (rs *RelayState) CopyRelayForIps() []iputil.VpnIp {
rs.RLock()
defer rs.RUnlock()
currentRelays := make([]iputil.VpnIp, 0, len(rs.relayForByIp))
for relayIp := range rs.relayForByIp {
currentRelays = append(currentRelays, relayIp)
}
return currentRelays
}
func (rs *RelayState) CopyRelayForIdxs() []uint32 {
rs.RLock()
defer rs.RUnlock()
ret := make([]uint32, 0, len(rs.relayForByIdx))
for i := range rs.relayForByIdx {
ret = append(ret, i)
}
return ret
}
func (rs *RelayState) RemoveRelay(localIdx uint32) (iputil.VpnIp, bool) {
rs.Lock()
defer rs.Unlock()
r, ok := rs.relayForByIdx[localIdx]
if !ok {
return iputil.VpnIp(0), false
}
delete(rs.relayForByIdx, localIdx)
delete(rs.relayForByIp, r.PeerIp)
return r.PeerIp, true
}
func (rs *RelayState) CompleteRelayByIP(vpnIp iputil.VpnIp, remoteIdx uint32) bool {
rs.Lock()
defer rs.Unlock()
r, ok := rs.relayForByIp[vpnIp]
if !ok {
return false
}
newRelay := *r
newRelay.State = Established
newRelay.RemoteIndex = remoteIdx
rs.relayForByIdx[r.LocalIndex] = &newRelay
rs.relayForByIp[r.PeerIp] = &newRelay
return true
}
func (rs *RelayState) CompleteRelayByIdx(localIdx uint32, remoteIdx uint32) (*Relay, bool) {
rs.Lock()
defer rs.Unlock()
r, ok := rs.relayForByIdx[localIdx]
if !ok {
return nil, false
}
newRelay := *r
newRelay.State = Established
newRelay.RemoteIndex = remoteIdx
rs.relayForByIdx[r.LocalIndex] = &newRelay
rs.relayForByIp[r.PeerIp] = &newRelay
return &newRelay, true
}
func (rs *RelayState) QueryRelayForByIp(vpnIp iputil.VpnIp) (*Relay, bool) {
rs.RLock()
defer rs.RUnlock()
r, ok := rs.relayForByIp[vpnIp]
return r, ok
}
func (rs *RelayState) QueryRelayForByIdx(idx uint32) (*Relay, bool) {
rs.RLock()
defer rs.RUnlock()
r, ok := rs.relayForByIdx[idx]
return r, ok
}
func (rs *RelayState) InsertRelay(ip iputil.VpnIp, idx uint32, r *Relay) {
rs.Lock()
defer rs.Unlock()
rs.relayForByIp[ip] = r
rs.relayForByIdx[idx] = r
}
type HostInfo struct {
sync.RWMutex
remote *udp.Addr
remotes *RemoteList
promoteCounter atomic.Uint32
ConnectionState *ConnectionState
handshakeStart time.Time //todo: this an entry in the handshake manager
HandshakeReady bool //todo: being in the manager means you are ready
HandshakeCounter int //todo: another handshake manager entry
HandshakeLastRemotes []*udp.Addr //todo: another handshake manager entry, which remotes we sent to last time
HandshakeComplete bool //todo: this should go away in favor of ConnectionState.ready
HandshakePacket map[uint8][]byte
packetStore []*cachedPacket //todo: this is other handshake manager entry
remoteIndexId uint32
localIndexId uint32
vpnIp iputil.VpnIp
recvError int
remoteCidr *cidr.Tree4
relayState RelayState
// lastRebindCount is the other side of Interface.rebindCount, if these values don't match then we need to ask LH
// for a punch from the remote end of this tunnel. The goal being to prime their conntrack for our traffic just like
// with a handshake
lastRebindCount int8
// lastHandshakeTime records the time the remote side told us about at the stage when the handshake was completed locally
// Stage 1 packet will contain it if I am a responder, stage 2 packet if I am an initiator
// This is used to avoid an attack where a handshake packet is replayed after some time
lastHandshakeTime uint64
lastRoam time.Time
lastRoamRemote *udp.Addr
// Used to track other hostinfos for this vpn ip since only 1 can be primary
// Synchronised via hostmap lock and not the hostinfo lock.
next, prev *HostInfo
}
type ViaSender struct {
relayHI *HostInfo // relayHI is the host info object of the relay
remoteIdx uint32 // remoteIdx is the index included in the header of the received packet
relay *Relay // relay contains the rest of the relay information, including the PeerIP of the host trying to communicate with us.
}
type cachedPacket struct {
messageType header.MessageType
messageSubType header.MessageSubType
callback packetCallback
packet []byte
}
type packetCallback func(t header.MessageType, st header.MessageSubType, h *HostInfo, p, nb, out []byte)
type cachedPacketMetrics struct {
sent metrics.Counter
dropped metrics.Counter
}
func NewHostMap(l *logrus.Logger, vpnCIDR *net.IPNet, preferredRanges []*net.IPNet) *HostMap {
h := map[iputil.VpnIp]*HostInfo{}
i := map[uint32]*HostInfo{}
r := map[uint32]*HostInfo{}
relays := map[uint32]*HostInfo{}
m := HostMap{
Indexes: i,
Relays: relays,
RemoteIndexes: r,
Hosts: h,
preferredRanges: preferredRanges,
vpnCIDR: vpnCIDR,
l: l,
}
return &m
}
// EmitStats reports host, index, and relay counts to the stats collection system
func (hm *HostMap) EmitStats() {
hm.RLock()
hostLen := len(hm.Hosts)
indexLen := len(hm.Indexes)
remoteIndexLen := len(hm.RemoteIndexes)
relaysLen := len(hm.Relays)
hm.RUnlock()
metrics.GetOrRegisterGauge("hostmap.main.hosts", nil).Update(int64(hostLen))
metrics.GetOrRegisterGauge("hostmap.main.indexes", nil).Update(int64(indexLen))
metrics.GetOrRegisterGauge("hostmap.main.remoteIndexes", nil).Update(int64(remoteIndexLen))
metrics.GetOrRegisterGauge("hostmap.main.relayIndexes", nil).Update(int64(relaysLen))
}
func (hm *HostMap) RemoveRelay(localIdx uint32) {
hm.Lock()
_, ok := hm.Relays[localIdx]
if !ok {
hm.Unlock()
return
}
delete(hm.Relays, localIdx)
hm.Unlock()
}
// DeleteHostInfo will fully unlink the hostinfo and return true if it was the final hostinfo for this vpn ip
func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) bool {
// Delete the host itself, ensuring it's not modified anymore
hm.Lock()
// If we have a previous or next hostinfo then we are not the last one for this vpn ip
final := (hostinfo.next == nil && hostinfo.prev == nil)
hm.unlockedDeleteHostInfo(hostinfo)
hm.Unlock()
return final
}
func (hm *HostMap) MakePrimary(hostinfo *HostInfo) {
hm.Lock()
defer hm.Unlock()
hm.unlockedMakePrimary(hostinfo)
}
func (hm *HostMap) unlockedMakePrimary(hostinfo *HostInfo) {
oldHostinfo := hm.Hosts[hostinfo.vpnIp]
if oldHostinfo == hostinfo {
return
}
if hostinfo.prev != nil {
hostinfo.prev.next = hostinfo.next
}
if hostinfo.next != nil {
hostinfo.next.prev = hostinfo.prev
}
hm.Hosts[hostinfo.vpnIp] = hostinfo
if oldHostinfo == nil {
return
}
hostinfo.next = oldHostinfo
oldHostinfo.prev = hostinfo
hostinfo.prev = nil
}
func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
primary, ok := hm.Hosts[hostinfo.vpnIp]
if ok && primary == hostinfo {
// The vpnIp pointer points to the same hostinfo as the local index id, we can remove it
delete(hm.Hosts, hostinfo.vpnIp)
if len(hm.Hosts) == 0 {
hm.Hosts = map[iputil.VpnIp]*HostInfo{}
}
if hostinfo.next != nil {
// We had more than 1 hostinfo at this vpnip, promote the next in the list to primary
hm.Hosts[hostinfo.vpnIp] = hostinfo.next
// It is primary, there is no previous hostinfo now
hostinfo.next.prev = nil
}
} else {
// Relink if we were in the middle of multiple hostinfos for this vpn ip
if hostinfo.prev != nil {
hostinfo.prev.next = hostinfo.next
}
if hostinfo.next != nil {
hostinfo.next.prev = hostinfo.prev
}
}
hostinfo.next = nil
hostinfo.prev = nil
// The remote index uses index ids outside our control so lets make sure we are only removing
// the remote index pointer here if it points to the hostinfo we are deleting
hostinfo2, ok := hm.RemoteIndexes[hostinfo.remoteIndexId]
if ok && hostinfo2 == hostinfo {
delete(hm.RemoteIndexes, hostinfo.remoteIndexId)
if len(hm.RemoteIndexes) == 0 {
hm.RemoteIndexes = map[uint32]*HostInfo{}
}
}
delete(hm.Indexes, hostinfo.localIndexId)
if len(hm.Indexes) == 0 {
hm.Indexes = map[uint32]*HostInfo{}
}
if hm.l.Level >= logrus.DebugLevel {
hm.l.WithField("hostMap", m{"mapTotalSize": len(hm.Hosts),
"vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
Debug("Hostmap hostInfo deleted")
}
for _, localRelayIdx := range hostinfo.relayState.CopyRelayForIdxs() {
delete(hm.Relays, localRelayIdx)
}
}
func (hm *HostMap) QueryIndex(index uint32) *HostInfo {
hm.RLock()
if h, ok := hm.Indexes[index]; ok {
hm.RUnlock()
return h
} else {
hm.RUnlock()
return nil
}
}
func (hm *HostMap) QueryRelayIndex(index uint32) *HostInfo {
//TODO: we probably just want to return bool instead of error, or at least a static error
hm.RLock()
if h, ok := hm.Relays[index]; ok {
hm.RUnlock()
return h
} else {
hm.RUnlock()
return nil
}
}
func (hm *HostMap) QueryReverseIndex(index uint32) *HostInfo {
hm.RLock()
if h, ok := hm.RemoteIndexes[index]; ok {
hm.RUnlock()
return h
} else {
hm.RUnlock()
return nil
}
}
func (hm *HostMap) QueryVpnIp(vpnIp iputil.VpnIp) *HostInfo {
return hm.queryVpnIp(vpnIp, nil)
}
func (hm *HostMap) QueryVpnIpRelayFor(targetIp, relayHostIp iputil.VpnIp) (*HostInfo, *Relay, error) {
hm.RLock()
defer hm.RUnlock()
h, ok := hm.Hosts[relayHostIp]
if !ok {
return nil, nil, errors.New("unable to find host")
}
for h != nil {
r, ok := h.relayState.QueryRelayForByIp(targetIp)
if ok && r.State == Established {
return h, r, nil
}
h = h.next
}
return nil, nil, errors.New("unable to find host with relay")
}
// PromoteBestQueryVpnIp will attempt to lazily switch to the best remote every
// `PromoteEvery` calls to this function for a given host.
func (hm *HostMap) PromoteBestQueryVpnIp(vpnIp iputil.VpnIp, ifce *Interface) *HostInfo {
return hm.queryVpnIp(vpnIp, ifce)
}
func (hm *HostMap) queryVpnIp(vpnIp iputil.VpnIp, promoteIfce *Interface) *HostInfo {
hm.RLock()
if h, ok := hm.Hosts[vpnIp]; ok {
hm.RUnlock()
// Do not attempt promotion if you are a lighthouse
if promoteIfce != nil && !promoteIfce.lightHouse.amLighthouse {
h.TryPromoteBest(hm.preferredRanges, promoteIfce)
}
return h
}
hm.RUnlock()
return nil
}
// unlockedAddHostInfo assumes you have a write-lock and will add a hostinfo object to the hostmap Indexes and RemoteIndexes maps.
// If an entry exists for the Hosts table (vpnIp -> hostinfo) then the provided hostinfo will be made primary
func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
if f.serveDns {
remoteCert := hostinfo.ConnectionState.peerCert
dnsR.Add(remoteCert.Details.Name+".", remoteCert.Details.Ips[0].IP.String())
}
existing := hm.Hosts[hostinfo.vpnIp]
hm.Hosts[hostinfo.vpnIp] = hostinfo
if existing != nil {
hostinfo.next = existing
existing.prev = hostinfo
}
hm.Indexes[hostinfo.localIndexId] = hostinfo
hm.RemoteIndexes[hostinfo.remoteIndexId] = hostinfo
if hm.l.Level >= logrus.DebugLevel {
hm.l.WithField("hostMap", m{"vpnIp": hostinfo.vpnIp, "mapTotalSize": len(hm.Hosts),
"hostinfo": m{"existing": true, "localIndexId": hostinfo.localIndexId, "hostId": hostinfo.vpnIp}}).
Debug("Hostmap vpnIp added")
}
i := 1
check := hostinfo
for check != nil {
if i > MaxHostInfosPerVpnIp {
hm.unlockedDeleteHostInfo(check)
}
check = check.next
i++
}
}
func (hm *HostMap) GetPreferredRanges() []*net.IPNet {
return hm.preferredRanges
}
func (hm *HostMap) ForEachVpnIp(f controlEach) {
hm.RLock()
defer hm.RUnlock()
for _, v := range hm.Hosts {
f(v)
}
}
func (hm *HostMap) ForEachIndex(f controlEach) {
hm.RLock()
defer hm.RUnlock()
for _, v := range hm.Indexes {
f(v)
}
}
// TryPromoteBest handles re-querying lighthouses and probing for better paths
// NOTE: It is an error to call this if you are a lighthouse since they should not roam clients!
func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface) {
c := i.promoteCounter.Add(1)
if c%PromoteEvery == 0 {
// The lock here is currently protecting i.remote access
i.RLock()
remote := i.remote
i.RUnlock()
// return early if we are already on a preferred remote
if remote != nil {
rIP := remote.IP
for _, l := range preferredRanges {
if l.Contains(rIP) {
return
}
}
}
i.remotes.ForEach(preferredRanges, func(addr *udp.Addr, preferred bool) {
if remote != nil && (addr == nil || !preferred) {
return
}
// Try to send a test packet to that host, this should
// cause it to detect a roaming event and switch remotes
ifce.sendTo(header.Test, header.TestRequest, i.ConnectionState, i, addr, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
})
}
// Re query our lighthouses for new remotes occasionally
if c%ReQueryEvery == 0 && ifce.lightHouse != nil {
ifce.lightHouse.QueryServer(i.vpnIp, ifce)
}
}
func (i *HostInfo) cachePacket(l *logrus.Logger, t header.MessageType, st header.MessageSubType, packet []byte, f packetCallback, m *cachedPacketMetrics) {
//TODO: return the error so we can log with more context
if len(i.packetStore) < 100 {
tempPacket := make([]byte, len(packet))
copy(tempPacket, packet)
//l.WithField("trace", string(debug.Stack())).Error("Caching packet", tempPacket)
i.packetStore = append(i.packetStore, &cachedPacket{t, st, f, tempPacket})
if l.Level >= logrus.DebugLevel {
i.logger(l).
WithField("length", len(i.packetStore)).
WithField("stored", true).
Debugf("Packet store")
}
} else if l.Level >= logrus.DebugLevel {
m.dropped.Inc(1)
i.logger(l).
WithField("length", len(i.packetStore)).
WithField("stored", false).
Debugf("Packet store")
}
}
// handshakeComplete will set the connection as ready to communicate, as well as flush any stored packets
func (i *HostInfo) handshakeComplete(l *logrus.Logger, m *cachedPacketMetrics) {
//TODO: I'm not certain the distinction between handshake complete and ConnectionState being ready matters because:
//TODO: HandshakeComplete means send stored packets and ConnectionState.ready means we are ready to send
//TODO: if the transition from HandhsakeComplete to ConnectionState.ready happens all within this function they are identical
i.ConnectionState.queueLock.Lock()
i.HandshakeComplete = true
//TODO: this should be managed by the handshake state machine to set it based on how many handshake were seen.
// Clamping it to 2 gets us out of the woods for now
i.ConnectionState.messageCounter.Store(2)
if l.Level >= logrus.DebugLevel {
i.logger(l).Debugf("Sending %d stored packets", len(i.packetStore))
}
if len(i.packetStore) > 0 {
nb := make([]byte, 12, 12)
out := make([]byte, mtu)
for _, cp := range i.packetStore {
cp.callback(cp.messageType, cp.messageSubType, i, cp.packet, nb, out)
}
m.sent.Inc(int64(len(i.packetStore)))
}
i.remotes.ResetBlockedRemotes()
i.packetStore = make([]*cachedPacket, 0)
i.ConnectionState.ready = true
i.ConnectionState.queueLock.Unlock()
}
func (i *HostInfo) GetCert() *cert.NebulaCertificate {
if i.ConnectionState != nil {
return i.ConnectionState.peerCert
}
return nil
}
func (i *HostInfo) SetRemote(remote *udp.Addr) {
// We copy here because we likely got this remote from a source that reuses the object
if !i.remote.Equals(remote) {
i.remote = remote.Copy()
i.remotes.LearnRemote(i.vpnIp, remote.Copy())
}
}
// SetRemoteIfPreferred returns true if the remote was changed. The lastRoam
// time on the HostInfo will also be updated.
func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote *udp.Addr) bool {
if newRemote == nil {
// relays have nil udp Addrs
return false
}
currentRemote := i.remote
if currentRemote == nil {
i.SetRemote(newRemote)
return true
}
// NOTE: We do this loop here instead of calling `isPreferred` in
// remote_list.go so that we only have to loop over preferredRanges once.
newIsPreferred := false
for _, l := range hm.preferredRanges {
// return early if we are already on a preferred remote
if l.Contains(currentRemote.IP) {
return false
}
if l.Contains(newRemote.IP) {
newIsPreferred = true
}
}
if newIsPreferred {
// Consider this a roaming event
i.lastRoam = time.Now()
i.lastRoamRemote = currentRemote.Copy()
i.SetRemote(newRemote)
return true
}
return false
}
func (i *HostInfo) RecvErrorExceeded() bool {
if i.recvError < 3 {
i.recvError += 1
return false
}
return true
}
func (i *HostInfo) CreateRemoteCIDR(c *cert.NebulaCertificate) {
if len(c.Details.Ips) == 1 && len(c.Details.Subnets) == 0 {
// Simple case, no CIDRTree needed
return
}
remoteCidr := cidr.NewTree4()
for _, ip := range c.Details.Ips {
remoteCidr.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{})
}
for _, n := range c.Details.Subnets {
remoteCidr.AddCIDR(n, struct{}{})
}
i.remoteCidr = remoteCidr
}
func (i *HostInfo) logger(l *logrus.Logger) *logrus.Entry {
if i == nil {
return logrus.NewEntry(l)
}
li := l.WithField("vpnIp", i.vpnIp).
WithField("localIndex", i.localIndexId).
WithField("remoteIndex", i.remoteIndexId)
if connState := i.ConnectionState; connState != nil {
if peerCert := connState.peerCert; peerCert != nil {
li = li.WithField("certName", peerCert.Details.Name)
}
}
return li
}
// Utility functions
func localIps(l *logrus.Logger, allowList *LocalAllowList) *[]net.IP {
//FIXME: This function is pretty garbage
var ips []net.IP
ifaces, _ := net.Interfaces()
for _, i := range ifaces {
allow := allowList.AllowName(i.Name)
if l.Level >= logrus.TraceLevel {
l.WithField("interfaceName", i.Name).WithField("allow", allow).Trace("localAllowList.AllowName")
}
if !allow {
continue
}
addrs, _ := i.Addrs()
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
//continue
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
//TODO: Filtering out link local for now, this is probably the most correct thing
//TODO: Would be nice to filter out SLAAC MAC based ips as well
if ip.IsLoopback() == false && !ip.IsLinkLocalUnicast() {
allow := allowList.Allow(ip)
if l.Level >= logrus.TraceLevel {
l.WithField("localIp", ip).WithField("allow", allow).Trace("localAllowList.Allow")
}
if !allow {
continue
}
ips = append(ips, ip)
}
}
}
return &ips
}
|
package logger
import (
"fmt"
"io"
"os"
"path"
"path/filepath"
"sync"
"time"
"github.com/mattn/go-colorable"
"github.com/rs/zerolog"
fallbacklog "github.com/rs/zerolog/log"
"github.com/urfave/cli/v2"
"golang.org/x/term"
"gopkg.in/natefinch/lumberjack.v2"
"github.com/cloudflare/cloudflared/features"
"github.com/cloudflare/cloudflared/management"
)
const (
EnableTerminalLog = false
DisableTerminalLog = true
LogLevelFlag = "loglevel"
LogFileFlag = "logfile"
LogDirectoryFlag = "log-directory"
LogTransportLevelFlag = "transport-loglevel"
LogSSHDirectoryFlag = "log-directory"
LogSSHLevelFlag = "log-level"
dirPermMode = 0744 // rwxr--r--
filePermMode = 0644 // rw-r--r--
consoleTimeFormat = time.RFC3339
)
var (
ManagementLogger *management.Logger
)
func init() {
zerolog.TimeFieldFormat = time.RFC3339
zerolog.TimestampFunc = utcNow
if features.Contains(features.FeatureManagementLogs) {
// Management logger needs to be initialized before any of the other loggers as to not capture
// it's own logging events.
ManagementLogger = management.NewLogger()
}
}
func utcNow() time.Time {
return time.Now().UTC()
}
func fallbackLogger(err error) *zerolog.Logger {
failLog := fallbacklog.With().Logger()
fallbacklog.Error().Msgf("Falling back to a default logger due to logger setup failure: %s", err)
return &failLog
}
// resilientMultiWriter is an alternative to zerolog's so that we can make it resilient to individual
// writer's errors. E.g., when running as a Windows service, the console writer fails, but we don't want to
// allow that to prevent all logging to fail due to breaking the for loop upon an error.
type resilientMultiWriter struct {
level zerolog.Level
writers []io.Writer
managementWriter zerolog.LevelWriter
}
func (t resilientMultiWriter) Write(p []byte) (n int, err error) {
for _, w := range t.writers {
_, _ = w.Write(p)
}
if t.managementWriter != nil {
_, _ = t.managementWriter.Write(p)
}
return len(p), nil
}
func (t resilientMultiWriter) WriteLevel(level zerolog.Level, p []byte) (n int, err error) {
// Only write the event to normal writers if it exceeds the level, but always write to the
// management logger and let it decided with the provided level of the log event.
if t.level <= level {
for _, w := range t.writers {
_, _ = w.Write(p)
}
}
if t.managementWriter != nil {
_, _ = t.managementWriter.WriteLevel(level, p)
}
return len(p), nil
}
var levelErrorLogged = false
func newZerolog(loggerConfig *Config) *zerolog.Logger {
var writers []io.Writer
if loggerConfig.ConsoleConfig != nil {
writers = append(writers, createConsoleLogger(*loggerConfig.ConsoleConfig))
}
if loggerConfig.FileConfig != nil {
fileLogger, err := createFileWriter(*loggerConfig.FileConfig)
if err != nil {
return fallbackLogger(err)
}
writers = append(writers, fileLogger)
}
if loggerConfig.RollingConfig != nil {
rollingLogger, err := createRollingLogger(*loggerConfig.RollingConfig)
if err != nil {
return fallbackLogger(err)
}
writers = append(writers, rollingLogger)
}
var managementWriter zerolog.LevelWriter
if features.Contains(features.FeatureManagementLogs) {
managementWriter = ManagementLogger
}
level, levelErr := zerolog.ParseLevel(loggerConfig.MinLevel)
if levelErr != nil {
level = zerolog.InfoLevel
}
multi := resilientMultiWriter{level, writers, managementWriter}
log := zerolog.New(multi).With().Timestamp().Logger()
if !levelErrorLogged && levelErr != nil {
log.Error().Msgf("Failed to parse log level %q, using %q instead", loggerConfig.MinLevel, level)
levelErrorLogged = true
}
return &log
}
func CreateTransportLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
return createFromContext(c, LogTransportLevelFlag, LogDirectoryFlag, disableTerminal)
}
func CreateLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
return createFromContext(c, LogLevelFlag, LogDirectoryFlag, disableTerminal)
}
func CreateSSHLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
return createFromContext(c, LogSSHLevelFlag, LogSSHDirectoryFlag, disableTerminal)
}
func createFromContext(
c *cli.Context,
logLevelFlagName,
logDirectoryFlagName string,
disableTerminal bool,
) *zerolog.Logger {
logLevel := c.String(logLevelFlagName)
logFile := c.String(LogFileFlag)
logDirectory := c.String(logDirectoryFlagName)
loggerConfig := CreateConfig(
logLevel,
disableTerminal,
logDirectory,
logFile,
)
log := newZerolog(loggerConfig)
if incompatibleFlagsSet := logFile != "" && logDirectory != ""; incompatibleFlagsSet {
log.Error().Msgf("Your config includes values for both %s (%s) and %s (%s), but they are incompatible. %s takes precedence.", LogFileFlag, logFile, logDirectoryFlagName, logDirectory, LogFileFlag)
}
return log
}
func Create(loggerConfig *Config) *zerolog.Logger {
if loggerConfig == nil {
loggerConfig = &Config{
defaultConfig.ConsoleConfig,
nil,
nil,
defaultConfig.MinLevel,
}
}
return newZerolog(loggerConfig)
}
func createConsoleLogger(config ConsoleConfig) io.Writer {
consoleOut := os.Stderr
return zerolog.ConsoleWriter{
Out: colorable.NewColorable(consoleOut),
NoColor: config.noColor || !term.IsTerminal(int(consoleOut.Fd())),
TimeFormat: consoleTimeFormat,
}
}
type fileInitializer struct {
once sync.Once
writer io.Writer
creationError error
}
var (
singleFileInit fileInitializer
rotatingFileInit fileInitializer
)
func createFileWriter(config FileConfig) (io.Writer, error) {
singleFileInit.once.Do(func() {
var logFile io.Writer
fullpath := config.Fullpath()
// Try to open the existing file
logFile, err := os.OpenFile(fullpath, os.O_APPEND|os.O_WRONLY, filePermMode)
if err != nil {
// If the existing file wasn't found, or couldn't be opened, just ignore
// it and recreate a new one.
logFile, err = createDirFile(config)
// If creating a new logfile fails, then we have no choice but to error out.
if err != nil {
singleFileInit.creationError = err
return
}
}
singleFileInit.writer = logFile
})
return singleFileInit.writer, singleFileInit.creationError
}
func createDirFile(config FileConfig) (io.Writer, error) {
if config.Dirname != "" {
err := os.MkdirAll(config.Dirname, dirPermMode)
if err != nil {
return nil, fmt.Errorf("unable to create directories for new logfile: %s", err)
}
}
mode := os.FileMode(filePermMode)
fullPath := filepath.Join(config.Dirname, config.Filename)
logFile, err := os.OpenFile(fullPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, mode)
if err != nil {
return nil, fmt.Errorf("unable to create a new logfile: %s", err)
}
return logFile, nil
}
func createRollingLogger(config RollingConfig) (io.Writer, error) {
rotatingFileInit.once.Do(func() {
if err := os.MkdirAll(config.Dirname, dirPermMode); err != nil {
rotatingFileInit.creationError = err
return
}
rotatingFileInit.writer = &lumberjack.Logger{
Filename: path.Join(config.Dirname, config.Filename),
MaxBackups: config.maxBackups,
MaxSize: config.maxSize,
MaxAge: config.maxAge,
}
})
return rotatingFileInit.writer, rotatingFileInit.creationError
}
|
package snailframe
import (
"github.com/CloudyKit/jet"
log "github.com/sirupsen/logrus"
"io"
"os"
"path/filepath"
)
type tpl struct {
tplSet *jet.Set
tplSuffix string
}
type tplConfig struct {
Dir string
Suffix string
Reload bool
}
func newTpl(cfg tplConfig) *tpl {
if cfg.Dir == "" {
cfg.Dir = "template"
}
if cfg.Suffix == "" {
cfg.Suffix = "jet"
}
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
panic(err.Error())
}
dir = dir + "/" + cfg.Dir
tplSuffix := cfg.Suffix
log.WithFields(log.Fields{
"Dir": dir,
"Suffix": tplSuffix,
"Debug":cfg.Reload,
}).Trace("Init Template")
tplObj := new(tpl)
tplObj.tplSet = jet.NewHTMLSet(dir)
isDebug := cfg.Reload
tplObj.tplSet.SetDevelopmentMode(isDebug)
tplObj.tplSuffix = tplSuffix
return tplObj
}
func (this *tpl)AddGlobal(key string, i interface{}) *jet.Set {
log.WithFields(log.Fields{
"Global": key,
}).Trace("Template AddGlobal")
return this.tplSet.AddGlobal(key, i)
}
/*
简便方法
var theVar = make(map[string]interface{})
theVar["xxx"] = "xxx"
snailrouter.Execute(w,"index.jet",theVar)
*/
func (this *tpl)Execute(w io.Writer, tplName string, maps map[string]interface{}) {
obj := this.GetTemplate(tplName)
vars := make(jet.VarMap)
for mapk, mapv := range maps {
vars.Set(mapk, mapv)
}
obj.Execute(w, vars, nil)
}
func(this *tpl) GetTemplate(Name string) *jet.Template {
log.WithFields(log.Fields{
"tplName": Name,
}).Debug("GetTemplate")
jt, err := this.tplSet.GetTemplate(Name + "." + this.tplSuffix)
if err != nil {
log.WithFields(log.Fields{
"tplName": Name,
"err":err,
}).Panic("GetTemplate Error")
panic(err)
}
return jt
}
func (this *tpl)SetVars(maps map[string]interface{}) jet.VarMap {
vars := make(jet.VarMap)
for mapk, mapv := range maps {
vars.Set(mapk, mapv)
}
return vars
}
/*
templateName := "index.jet"
t := snailrouter.GetTemplate(templateName)
theVar := map[string]interface{}{}
theVar["xxx"] = "xxx"
vars := snailrouter.SetVars(theVar)
vars.Set("user", "xx")
data := r
t.Execute(w, vars, data.URL.Query());
*/
|
package template
import (
"fmt"
"net/mail"
"github.com/jrapoport/gothic/config"
"github.com/matcornic/hermes/v2"
)
// ChangeEmailAction confirm email action
const ChangeEmailAction = "change/email"
// ChangeEmail mail template
type ChangeEmail struct {
MailTemplate
newAddress string
}
var _ Template = (*ChangeEmail)(nil)
// NewChangeEmail returns a new change mail template.
func NewChangeEmail(c config.MailTemplate, to mail.Address, newAddress, token, referralURL string) *ChangeEmail {
e := new(ChangeEmail)
e.newAddress = newAddress
e.Configure(c, to, token, referralURL)
return e
}
// Action returns the action for the mail template.
func (e ChangeEmail) Action() string {
return ChangeEmailAction
}
// Subject returns the subject for the mail.
func (e ChangeEmail) Subject() string {
if e.MailTemplate.Subject() != "" {
return e.MailTemplate.Subject()
}
return e.subject()
}
// LoadBody loads the body for the mail.
func (e *ChangeEmail) LoadBody(action string, tc config.MailTemplate) error {
err := e.MailTemplate.LoadBody(action, tc)
if err != nil {
return err
}
if len(e.Body.Intros) <= 0 {
e.Body.Intros = []string{e.intro()}
}
if len(e.Body.Actions) <= 0 {
e.Body.Actions = append(e.Body.Actions, hermes.Action{})
}
a := &e.Body.Actions[0]
if a.Instructions == "" {
a.Instructions = e.instructions()
}
if a.Button.Text == "" {
a.Button.Text = e.buttonText()
}
if a.Button.Link == "" {
a.Button.Link = e.Link()
}
e.Body.Outros = append([]string{e.outro()}, e.Body.Outros...)
return nil
}
func (e ChangeEmail) subject() string {
const subjectFormat = "%s email change request"
return fmt.Sprintf(subjectFormat, e.Service())
}
func (e ChangeEmail) intro() string {
const introFormat = "You received this message because there was a request " +
"to change the email address you use to access your %s account."
return fmt.Sprintf(introFormat, e.Service())
}
func (e ChangeEmail) instructions() string {
const instructFormat = "Please click the button below to confirm and change" +
" your email to: %s"
return fmt.Sprintf(instructFormat, e.newAddress)
}
func (e ChangeEmail) buttonText() string {
return "Change Email"
}
func (e ChangeEmail) outro() string {
return "Once confirmed, your login email will change to the new address. If" +
" you did not request this change, no further action is required. You " +
"can safely ignore this message."
}
|
// ===================================== //
// author: gavingqf //
// == Please don'g change me by hand == //
//====================================== //
/*you have defined the following interface:
type IConfig interface {
// load interface
Load(path string) bool
// clear interface
Clear()
}
*/
package base
import (
"shared/utility/glog"
"strings"
)
type CfgCharacterStar struct {
Id int32
CharID int32
Star int32
HpRatio float64
PhyAtkRatio float64
MagAtkRatio float64
PhyDfsRatio float64
MagDfsRatio float64
HpMax int32
PhyAtk int32
MagAtk int32
PhyDfs int32
MagDfs int32
RarityUp int32
Cost []string
BuildProducePercent int32
}
type CfgCharacterStarConfig struct {
data map[int32]*CfgCharacterStar
}
func NewCfgCharacterStarConfig() *CfgCharacterStarConfig {
return &CfgCharacterStarConfig{
data: make(map[int32]*CfgCharacterStar),
}
}
func (c *CfgCharacterStarConfig) Load(filePath string) bool {
parse := NewParser()
if err := parse.Load(filePath, true); err != nil {
glog.Info("Load", filePath, "err: ", err)
return false
}
// iterator all lines' content
for i := 2; i < parse.GetAllCount(); i++ {
data := new(CfgCharacterStar)
/* parse Id field */
vId, _ := parse.GetFieldByName(uint32(i), "id")
var IdRet bool
data.Id, IdRet = String2Int32(vId)
if !IdRet {
glog.Error("Parse CfgCharacterStar.Id field error,value:", vId)
return false
}
/* parse CharID field */
vCharID, _ := parse.GetFieldByName(uint32(i), "charID")
var CharIDRet bool
data.CharID, CharIDRet = String2Int32(vCharID)
if !CharIDRet {
glog.Error("Parse CfgCharacterStar.CharID field error,value:", vCharID)
return false
}
/* parse Star field */
vStar, _ := parse.GetFieldByName(uint32(i), "star")
var StarRet bool
data.Star, StarRet = String2Int32(vStar)
if !StarRet {
glog.Error("Parse CfgCharacterStar.Star field error,value:", vStar)
return false
}
/* parse HpRatio field */
vHpRatio, _ := parse.GetFieldByName(uint32(i), "hpRatio")
var HpRatioRet bool
data.HpRatio, HpRatioRet = String2Float(vHpRatio)
if !HpRatioRet {
glog.Error("Parse CfgCharacterStar.HpRatio field error,value:", vHpRatio)
}
/* parse PhyAtkRatio field */
vPhyAtkRatio, _ := parse.GetFieldByName(uint32(i), "phyAtkRatio")
var PhyAtkRatioRet bool
data.PhyAtkRatio, PhyAtkRatioRet = String2Float(vPhyAtkRatio)
if !PhyAtkRatioRet {
glog.Error("Parse CfgCharacterStar.PhyAtkRatio field error,value:", vPhyAtkRatio)
}
/* parse MagAtkRatio field */
vMagAtkRatio, _ := parse.GetFieldByName(uint32(i), "magAtkRatio")
var MagAtkRatioRet bool
data.MagAtkRatio, MagAtkRatioRet = String2Float(vMagAtkRatio)
if !MagAtkRatioRet {
glog.Error("Parse CfgCharacterStar.MagAtkRatio field error,value:", vMagAtkRatio)
}
/* parse PhyDfsRatio field */
vPhyDfsRatio, _ := parse.GetFieldByName(uint32(i), "phyDfsRatio")
var PhyDfsRatioRet bool
data.PhyDfsRatio, PhyDfsRatioRet = String2Float(vPhyDfsRatio)
if !PhyDfsRatioRet {
glog.Error("Parse CfgCharacterStar.PhyDfsRatio field error,value:", vPhyDfsRatio)
}
/* parse MagDfsRatio field */
vMagDfsRatio, _ := parse.GetFieldByName(uint32(i), "magDfsRatio")
var MagDfsRatioRet bool
data.MagDfsRatio, MagDfsRatioRet = String2Float(vMagDfsRatio)
if !MagDfsRatioRet {
glog.Error("Parse CfgCharacterStar.MagDfsRatio field error,value:", vMagDfsRatio)
}
/* parse HpMax field */
vHpMax, _ := parse.GetFieldByName(uint32(i), "hpMax")
var HpMaxRet bool
data.HpMax, HpMaxRet = String2Int32(vHpMax)
if !HpMaxRet {
glog.Error("Parse CfgCharacterStar.HpMax field error,value:", vHpMax)
return false
}
/* parse PhyAtk field */
vPhyAtk, _ := parse.GetFieldByName(uint32(i), "phyAtk")
var PhyAtkRet bool
data.PhyAtk, PhyAtkRet = String2Int32(vPhyAtk)
if !PhyAtkRet {
glog.Error("Parse CfgCharacterStar.PhyAtk field error,value:", vPhyAtk)
return false
}
/* parse MagAtk field */
vMagAtk, _ := parse.GetFieldByName(uint32(i), "magAtk")
var MagAtkRet bool
data.MagAtk, MagAtkRet = String2Int32(vMagAtk)
if !MagAtkRet {
glog.Error("Parse CfgCharacterStar.MagAtk field error,value:", vMagAtk)
return false
}
/* parse PhyDfs field */
vPhyDfs, _ := parse.GetFieldByName(uint32(i), "phyDfs")
var PhyDfsRet bool
data.PhyDfs, PhyDfsRet = String2Int32(vPhyDfs)
if !PhyDfsRet {
glog.Error("Parse CfgCharacterStar.PhyDfs field error,value:", vPhyDfs)
return false
}
/* parse MagDfs field */
vMagDfs, _ := parse.GetFieldByName(uint32(i), "magDfs")
var MagDfsRet bool
data.MagDfs, MagDfsRet = String2Int32(vMagDfs)
if !MagDfsRet {
glog.Error("Parse CfgCharacterStar.MagDfs field error,value:", vMagDfs)
return false
}
/* parse RarityUp field */
vRarityUp, _ := parse.GetFieldByName(uint32(i), "rarityUp")
var RarityUpRet bool
data.RarityUp, RarityUpRet = String2Int32(vRarityUp)
if !RarityUpRet {
glog.Error("Parse CfgCharacterStar.RarityUp field error,value:", vRarityUp)
return false
}
/* parse Cost field */
vecCost, _ := parse.GetFieldByName(uint32(i), "cost")
arrayCost := strings.Split(vecCost, ",")
for j := 0; j < len(arrayCost); j++ {
v := arrayCost[j]
data.Cost = append(data.Cost, v)
}
/* parse BuildProducePercent field */
vBuildProducePercent, _ := parse.GetFieldByName(uint32(i), "buildProducePercent")
var BuildProducePercentRet bool
data.BuildProducePercent, BuildProducePercentRet = String2Int32(vBuildProducePercent)
if !BuildProducePercentRet {
glog.Error("Parse CfgCharacterStar.BuildProducePercent field error,value:", vBuildProducePercent)
return false
}
if _, ok := c.data[data.Id]; ok {
glog.Errorf("Find %d repeated", data.Id)
return false
}
c.data[data.Id] = data
}
return true
}
func (c *CfgCharacterStarConfig) Clear() {
}
func (c *CfgCharacterStarConfig) Find(id int32) (*CfgCharacterStar, bool) {
v, ok := c.data[id]
return v, ok
}
func (c *CfgCharacterStarConfig) GetAllData() map[int32]*CfgCharacterStar {
return c.data
}
func (c *CfgCharacterStarConfig) Traverse() {
for _, v := range c.data {
glog.Info(v.Id, ",", v.CharID, ",", v.Star, ",", v.HpRatio, ",", v.PhyAtkRatio, ",", v.MagAtkRatio, ",", v.PhyDfsRatio, ",", v.MagDfsRatio, ",", v.HpMax, ",", v.PhyAtk, ",", v.MagAtk, ",", v.PhyDfs, ",", v.MagDfs, ",", v.RarityUp, ",", v.Cost, ",", v.BuildProducePercent)
}
}
|
package main
import (
"bytes"
"flag"
"fmt"
"image"
"image/color"
"os"
"github.com/marianina8/expression/azure"
"gocv.io/x/gocv"
)
func check(msg string, e error) {
if e != nil {
panic(fmt.Errorf("%s: %s", msg, e.Error()))
}
}
func main() {
video := flag.String("video", "", "video for emotion analysis")
flag.Parse()
if *video == "" {
flag.Usage()
return
}
key := os.Getenv("emotion_key")
host := os.Getenv("emotion_host")
azure, err := azure.NewClient(host, key)
check("new azure client", err)
displayVideo(azure, *video)
}
func displayVideo(client *azure.Client, filename string) {
stream, err := gocv.VideoCaptureFile(filename)
if err != nil {
check("capture video file", err)
}
defer stream.Close()
// open display window
window := gocv.NewWindow("Detect")
defer window.Close()
// prepare image matrix
img := gocv.NewMat()
defer img.Close()
framenum := 0
black := color.RGBA{0, 0, 0, 0}
dominantEmotion := ""
for {
if ok := stream.Read(&img); !ok {
return
}
if img.Empty() {
continue
}
framenum++
b, err := gocv.IMEncode(".jpg", img)
check("gocv image encode", err)
// check frame ever 120 frames / 1 sec
if framenum%30 == 0 {
emotionData := client.FaceAnalysis(bytes.NewReader(b))
if (emotionData.FaceRectangle != azure.FaceRectangle{}) {
dominantEmotion = emotionData.Dominant()
}
}
gocv.PutText(&img, dominantEmotion, image.Pt(40, 200), gocv.FontHersheyPlain, 3, black, 2)
// show the image in the window, and wait 1 millisecond
window.IMShow(img)
if window.WaitKey(1) >= 0 {
break
}
}
}
|
package core_test
import (
core "github.com/misostack/ezgo/core"
"testing"
"fmt"
)
func TestParseWebServerConfig(t *testing.T) {
cfg := core.WebServerConfig{}
core.ParseWebServerConfig(&cfg)
fmt.Printf("%v\n", cfg)
}
// func TestConfigStructToMap(t *testing.T) {
// // test with Config struct
// cfg := core.Config{}
// cfgm := make(map[string]interface{})
// cfg.StructToMap(&cfgm)
// expectedMap := map[string]string{
// "Port": "",
// "Address": "",
// }
// is_valid := true
// for k, _ := range expectedMap {
// _, ok := cfgm[k]
// if !ok {
// is_valid = false
// break
// }
// }
// if !is_valid {
// t.Errorf("Config.StructToMap(m) failed!")
// }
// }
// func TestSetStructField(t *testing.T) {
// s := core.Config{
// Address: "127.0.0.1",
// Port: "8081",
// }
// // reflect.ValueOf(&s).Elem().FieldByName("Address").Set(reflect.ValueOf("something"))
// core.SetStructField(s, "Address", "10.84.4.64")
// fmt.Printf("%v", s)
// }
|
package crypto
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"encoding/asn1"
"encoding/pem"
"errors"
"fmt"
"github.com/dgrijalva/jwt-go"
"io/ioutil"
"log"
"math/big"
"os"
)
var TEST_ENV = false
var keyName = "private.pem"
func newPrivateKey() *ecdsa.PrivateKey {
log.Println("creating a new private key")
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
log.Println(err)
}
log.Println("new private key created")
return privateKey
}
func encodePrivateKeyToPem(key *ecdsa.PrivateKey) string {
x509Encoded, _ := x509.MarshalECPrivateKey(key)
pemEncoded := pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: x509Encoded})
log.Println("private key encoded to PEM")
return string(pemEncoded)
}
func decodePrivateKeyFromPem(encoded string) *ecdsa.PrivateKey {
block, _ := pem.Decode([]byte(encoded))
x509Encoded := block.Bytes
privateKey, err := x509.ParseECPrivateKey(x509Encoded)
if err != nil {
log.Print(err)
} else {
log.Println("private key decoded from PEM")
}
return privateKey
}
func EncodePublicKeyToPem(key *ecdsa.PublicKey) string {
x509EncodedPub, _ := x509.MarshalPKIXPublicKey(key)
pemEncodedPub := pem.EncodeToMemory(&pem.Block{Type: "PUBLIC KEY", Bytes: x509EncodedPub})
log.Println("public key encoded to PEM")
return string(pemEncodedPub)
}
func DecodePublicKeyFromPem(encoded string) (publicKey *ecdsa.PublicKey, err error) {
blockPub, _ := pem.Decode([]byte(encoded))
if blockPub == nil {
log.Printf("Invalid publicKey: %v", encoded)
}
x509EncodedPub := blockPub.Bytes
genericPublicKey, err := x509.ParsePKIXPublicKey(x509EncodedPub)
if err != nil {
log.Printf("error parsing x509: %s", err)
return
}
publicKey = genericPublicKey.(*ecdsa.PublicKey)
log.Println("public key decoded from PEM")
return
}
func Sign(message string) (r *big.Int, s *big.Int, err error) {
defer func() {
if r := recover(); r != nil {
err = errors.New(r.(string))
return
}
}()
key, err := getPrivateKey()
r, s = sign(message, key)
return
}
func sign(message string, key *ecdsa.PrivateKey) (r *big.Int, s *big.Int) {
byteMessage := []byte(message)
r, s, err := ecdsa.Sign(rand.Reader, key, byteMessage)
if err != nil {
log.Panicln(err)
}
log.Println("message signed")
return
}
func verify(key *ecdsa.PublicKey, message string, r *big.Int, s *big.Int) bool {
byteMessage := []byte(message)
check := ecdsa.Verify(key, byteMessage, r, s)
if check {
log.Println("signature verified")
} else {
log.Println("signature refused")
}
return check
}
func EncodeSignatureDER(r *big.Int, s *big.Int) (der []byte, err error) {
sig := ECDSASignature{R: r, S: s}
der, err = asn1.Marshal(sig)
return
}
func DecodeSignatureDER(der []byte) (r *big.Int, s *big.Int, err error) {
sig := &ECDSASignature{}
_, err = asn1.Unmarshal(der, sig)
if err != nil {
return
}
r = sig.R
s = sig.S
return
}
func getPrivateKey() (privateKey *ecdsa.PrivateKey, err error) {
//var user string
//if TEST_ENV {
// user = "test"
//} else {
// user = "crypto"
//}
//// get signingKey
//secret, err := keyring.Get(service, user)
if TEST_ENV == true {
keyName = "test.pem"
}
keyPath := "keys/" + keyName
secret, err := ioutil.ReadFile(keyPath)
if err != nil {
log.Println(err)
return
}
log.Printf("key retrieved from %s", keyPath)
privateKey = decodePrivateKeyFromPem(string(secret))
return
}
func GetPublicKey() (publicKey *ecdsa.PublicKey, err error) {
privKey, err := getPrivateKey()
if err != nil {
log.Println(err)
return
}
publicKey = &privKey.PublicKey
log.Printf("got public key")
return
}
func storePrivateKey(key *ecdsa.PrivateKey) {
if TEST_ENV == true {
keyName = "test.pem"
}
keyPath := "keys/" + keyName
if _, err := os.Stat("keys"); os.IsNotExist(err) {
log.Print("folder keys doesn't exists. Creating keys")
err = os.Mkdir("keys", os.FileMode(os.ModePerm))
}
data := []byte(encodePrivateKeyToPem(key))
err := ioutil.WriteFile(keyPath, data, os.FileMode(os.ModePerm))
//var user string
//if TEST_ENV {
// user = "test"
//} else {
// user = "crypto"
//}
//privatePem := encodePrivateKeyToPem(key)
//err := keyring.Set(service, user, privatePem)
if err != nil {
log.Println(err)
}
log.Printf("key stored in %s", keyPath)
}
func SignJwt(claims jwt.MapClaims) (encoded string, err error) {
// Create a new token object, specifying signing method and the claims
// you would like it to contain.
token := jwt.NewWithClaims(jwt.SigningMethodES256, claims)
privateKey, err := getPrivateKey()
// Sign and get the complete encoded token as a string using the secret
encoded, err = token.SignedString(privateKey)
return
}
func ReadJWT(tokenString string) (claims jwt.MapClaims, err error) {
// Parse takes the token string and a function for looking up the key. The latter is especially
// useful if you use multiple keys for your application. The standard is to use 'kid' in the
// head of the token to identify which key to use, but the parsed token (head and claims) is provided
// to the callback, providing flexibility.
token, _ := jwt.Parse(tokenString, nil)
if token == nil {
log.Printf("error reading jwt")
err = errors.New("error reading jwt")
return
}
claims, ok := token.Claims.(jwt.MapClaims)
if !ok {
log.Print("error extracting claims from jwt")
err = errors.New("error extracting claims from jwt")
return
}
log.Printf("read JWT")
return
}
func CheckJWTSignature(tokenString string, key *ecdsa.PublicKey) (claims jwt.MapClaims, err error) {
// Parse takes the token string and a function for looking up the key. The latter is especially
// useful if you use multiple keys for your application. The standard is to use 'kid' in the
// head of the token to identify which key to use, but the parsed token (head and claims) is provided
// to the callback, providing flexibility.
token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
// Don't forget to validate the alg is what you expect:
if _, ok := token.Method.(*jwt.SigningMethodECDSA); !ok {
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
}
// hmacSampleSecret is a []byte containing your secret, e.g. []byte("my_secret_key")
return key, nil
})
claims, ok := token.Claims.(jwt.MapClaims)
if ok && token.Valid {
log.Print("JWT validated")
} else {
log.Print(err)
}
return
}
func Init() {
key, err := getPrivateKey()
if err != nil {
log.Println("key not found, generating a new one")
privateKey := newPrivateKey()
storePrivateKey(privateKey)
} else {
log.Println("key found")
log.Println(key.D)
}
}
|
package confsvr
import (
"fmt"
"github.com/oceanho/gw/sdk/confsvr/param"
"sync"
"time"
)
var st *state
type state struct {
sync.Once
state *setting
}
func init() {
st = &state{}
}
type setting struct {
AccessKeyId string
AccessKeySecret string
OnChangedCallback func(data []byte)
Namespace string
Environment string
token string
client *Client
tokenExpiredAt int64
shutdown bool
currentConfigVersion int64
currentConfigData string
locker sync.Mutex
}
func Initial(ak, aks, ns, env string, onDataChangedCallback func(data []byte)) {
st.Once.Do(func() {
st.state = &setting{
AccessKeyId: ak,
AccessKeySecret: aks,
Namespace: ns,
Environment: env,
OnChangedCallback: onDataChangedCallback,
}
})
}
func Sync() ([]byte, error) {
if st.state == nil {
panic("should be call confsvr.Initial(...) At first.")
}
updateToken()
updateConf()
return []byte(st.state.currentConfigData), nil
}
func StartWatcher(opts *Option, shutdownSignal chan struct{}) {
if st.state == nil {
panic("should be call confsvr.Initial(...) At first.")
}
// Initial vars.
st.state.client = NewClient(opts)
// 1. Got token At first.
updateToken()
// 2. Got configuration.
updateConf()
go watchWorker(opts)
go func() {
// wait for showdown server Signal.
<-shutdownSignal
st.state.shutdown = true
}()
}
func updateToken() {
req := param.ReqGetAuth{
AccessKeyId: st.state.AccessKeyId,
AccessKeySecret: st.state.AccessKeySecret,
}
resp := ¶m.RspGetAuth{}
code, err := st.state.client.Do(req, resp)
if err != nil {
fmt.Printf("[configsvr-worker] - [WARNING, updateToken fail, code:%d, err: %v", code, err)
return
}
st.state.token = resp.Payload.Token
st.state.tokenExpiredAt = resp.Payload.ExpiredAt
}
func hasNewVersion() bool {
req := param.ReqCheckConfigVersion{
Token: st.state.token,
}
resp := ¶m.RspCheckConfigVersion{}
code, err := st.state.client.Do(req, resp)
if err != nil {
fmt.Printf("[configsvr-worker] - [WARNING, hasNewVersion fail, code:%d, err: %v", code, err)
}
return resp.Payload.Version > st.state.currentConfigVersion
}
func updateConf() {
req := param.ReqGetConf{
Token: st.state.token,
}
resp := ¶m.RespGetConf{}
code, err := st.state.client.Do(req, resp)
if err != nil {
fmt.Printf("[configsvr-worker] - [WARNING, updateConf fail, code:%d, err: %v", code, err)
return
}
st.state.currentConfigData = resp.Payload.Data
st.state.currentConfigVersion = resp.Payload.Version
}
func watchWorker(opts *Option) {
interval := time.Duration(opts.QueryStateInterval) * time.Second
for {
select {
case <-time.After(interval):
if hasNewVersion() {
updateConf()
}
}
if st.state.shutdown {
break
}
}
}
|
// https://leetcode.com/problems/find-the-town-judge/
package leetcode_go
func findJudge(N int, trust [][]int) int {
indegree := make(map[int]int)
outdegree := make(map[int]int)
for _, line := range trust {
indegree[line[1]]++
outdegree[line[0]]++
}
for i := 1; i <= N; i++ {
if indegree[i] == N-1 && outdegree[i] == 0 {
return i
}
}
return -1
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ecloud
import (
"context"
"fmt"
"yunion.io/x/pkg/errors"
api "yunion.io/x/onecloud/pkg/apis/compute"
"yunion.io/x/onecloud/pkg/cloudprovider"
"yunion.io/x/onecloud/pkg/multicloud"
)
var regionList = map[string]string{
"guangzhou-2": "华南-广州2",
"beijing-1": "华北-北京1",
"hunan-1": "华中-长沙1",
"wuxi-1": "华东-苏州",
"dongguan-1": "华南-广州3",
"yaan-1": "西南-成都",
"zhengzhou-1": "华中-郑州",
"beijing-2": "华北-北京3",
"zhuzhou-1": "华中-长沙2",
"jinan-1": "华东-济南",
"xian-1": "西北-西安",
"shanghai-1": "华东-上海1",
"chongqing-1": "西南-重庆",
"ningbo-1": "华东-杭州",
"tianjin-1": "天津-天津",
"jilin-1": "吉林-长春",
"hubei-1": "湖北-襄阳",
"jiangxi-1": "江西-南昌",
"gansu-1": "甘肃-兰州",
"shanxi-1": "山西-太原",
"liaoning-1": "辽宁-沈阳",
"yunnan-2": "云南-昆明2",
"hebei-1": "河北-石家庄",
"fujian-1": "福建-厦门",
"guangxi-1": "广西-南宁",
"anhui-1": "安徽-淮南",
"huhehaote-1": "华北-呼和浩特",
"guiyang-1": "西南-贵阳",
}
type SRegion struct {
cloudprovider.SFakeOnPremiseRegion
multicloud.SRegion
multicloud.SNoObjectStorageRegion
client *SEcloudClient
storageCache *SStoragecache
ID string `json:"id"`
Name string `json:"Name"`
izones []cloudprovider.ICloudZone
ivpcs []cloudprovider.ICloudVpc
}
func (r *SRegion) GetId() string {
return r.ID
}
func (r *SRegion) GetName() string {
return r.Name
}
func (r *SRegion) GetGlobalId() string {
return fmt.Sprintf("%s/%s", r.client.GetAccessEnv(), r.ID)
}
func (r *SRegion) GetStatus() string {
return api.CLOUD_REGION_STATUS_INSERVER
}
func (r *SRegion) Refresh() error {
// err := r.fetchZones()
// if err != nil {
// return err
// }
// return r.fetchVpcs()
return nil
}
func (r *SRegion) IsEmulated() bool {
return false
}
func (r *SRegion) GetI18n() cloudprovider.SModelI18nTable {
en := fmt.Sprintf("%s %s", CLOUD_PROVIDER_ECLOUD_EN, r.Name)
table := cloudprovider.SModelI18nTable{}
table["name"] = cloudprovider.NewSModelI18nEntry(r.GetName()).CN(r.GetName()).EN(en)
return table
}
// GetLatitude() float32
// GetLongitude() float32
func (r *SRegion) GetGeographicInfo() cloudprovider.SGeographicInfo {
if info, ok := LatitudeAndLongitude[r.ID]; ok {
return info
}
return cloudprovider.SGeographicInfo{}
}
func (r *SRegion) GetIZones() ([]cloudprovider.ICloudZone, error) {
if r.izones == nil {
err := r.fetchZones()
if err != nil {
return nil, err
}
}
return r.izones, nil
}
func (r *SRegion) fetchZones() error {
request := NewNovaRequest(NewApiRequest(r.ID, "/api/v2/region",
map[string]string{"component": "NOVA"}, nil))
zones := make([]SZone, 0)
err := r.client.doList(context.Background(), request, &zones)
if err != nil {
return err
}
izones := make([]cloudprovider.ICloudZone, len(zones))
for i := range zones {
zones[i].region = r
zones[i].host = &SHost{
zone: &zones[i],
}
izones[i] = &zones[i]
}
r.izones = izones
return nil
}
func (r *SRegion) fetchVpcs() error {
vpcs, err := r.getVpcs()
if err != nil {
return err
}
ivpcs := make([]cloudprovider.ICloudVpc, len(vpcs))
for i := range vpcs {
ivpcs[i] = &vpcs[i]
}
r.ivpcs = ivpcs
return nil
}
func (r *SRegion) getVpcs() ([]SVpc, error) {
request := NewConsoleRequest(r.ID, "/api/v2/netcenter/vpc", nil, nil)
vpcs := make([]SVpc, 0)
err := r.client.doList(context.Background(), request, &vpcs)
if err != nil {
return nil, err
}
for i := range vpcs {
vpcs[i].region = r
}
return vpcs, err
}
func (r *SRegion) getVpcById(id string) (*SVpc, error) {
request := NewConsoleRequest(r.ID, fmt.Sprintf("/api/v2/netcenter/vpc/%s", id), nil, nil)
vpc := SVpc{}
err := r.client.doGet(context.Background(), request, &vpc)
if err != nil {
return nil, err
}
vpc.region = r
return &vpc, err
}
func (r *SRegion) getVpcByRouterId(id string) (*SVpc, error) {
request := NewConsoleRequest(r.ID, fmt.Sprintf("/api/v2/netcenter/vpc/router/%s", id), nil, nil)
vpc := SVpc{}
err := r.client.doGet(context.Background(), request, &vpc)
if err != nil {
return nil, err
}
vpc.region = r
return &vpc, err
}
func (r *SRegion) GetIVpcs() ([]cloudprovider.ICloudVpc, error) {
if r.ivpcs == nil {
err := r.fetchVpcs()
if err != nil {
return nil, err
}
}
return r.ivpcs, nil
}
func (r *SRegion) GetIEips() ([]cloudprovider.ICloudEIP, error) {
return nil, cloudprovider.ErrNotSupported
}
func (r *SRegion) GetIVpcById(id string) (cloudprovider.ICloudVpc, error) {
vpc, err := r.getVpcById(id)
if err != nil {
return nil, err
}
vpc.region = r
return vpc, nil
}
func (r *SRegion) GetIZoneById(id string) (cloudprovider.ICloudZone, error) {
izones, err := r.GetIZones()
if err != nil {
return nil, err
}
for i := 0; i < len(izones); i += 1 {
if izones[i].GetGlobalId() == id {
return izones[i], nil
}
}
return nil, cloudprovider.ErrNotFound
}
func (r *SRegion) GetIEipById(id string) (cloudprovider.ICloudEIP, error) {
return nil, cloudprovider.ErrNotImplemented
}
func (r *SRegion) GetIVMById(id string) (cloudprovider.ICloudVM, error) {
vm, err := r.GetInstanceById(id)
if err != nil {
return nil, err
}
zone, err := r.FindZone(vm.Region)
if err != nil {
return nil, err
}
vm.host = &SHost{
zone: zone,
}
return vm, nil
}
func (r *SRegion) GetIDiskById(id string) (cloudprovider.ICloudDisk, error) {
return r.GetDisk(id)
}
func (r *SRegion) GetIHosts() ([]cloudprovider.ICloudHost, error) {
izones, err := r.GetIZones()
if err != nil {
return nil, err
}
iHosts := make([]cloudprovider.ICloudHost, 0, len(izones))
for i := range izones {
hosts, err := izones[i].GetIHosts()
if err != nil {
return nil, err
}
iHosts = append(iHosts, hosts...)
}
return iHosts, nil
}
func (r *SRegion) GetIHostById(id string) (cloudprovider.ICloudHost, error) {
hosts, err := r.GetIHosts()
if err != nil {
return nil, err
}
for i := range hosts {
if hosts[i].GetGlobalId() == id {
return hosts[i], nil
}
}
return nil, cloudprovider.ErrNotFound
}
func (r *SRegion) GetIStorages() ([]cloudprovider.ICloudStorage, error) {
iStores := make([]cloudprovider.ICloudStorage, 0)
izones, err := r.GetIZones()
if err != nil {
return nil, err
}
for i := 0; i < len(izones); i += 1 {
iZoneStores, err := izones[i].GetIStorages()
if err != nil {
return nil, err
}
iStores = append(iStores, iZoneStores...)
}
return iStores, nil
}
func (r *SRegion) GetIStorageById(id string) (cloudprovider.ICloudStorage, error) {
istores, err := r.GetIStorages()
if err != nil {
return nil, err
}
for i := range istores {
if istores[i].GetGlobalId() == id {
return istores[i], nil
}
}
return nil, cloudprovider.ErrNotFound
}
func (r *SRegion) GetIStoragecaches() ([]cloudprovider.ICloudStoragecache, error) {
sc := r.getStoragecache()
return []cloudprovider.ICloudStoragecache{sc}, nil
}
func (r *SRegion) GetIStoragecacheById(id string) (cloudprovider.ICloudStoragecache, error) {
storageCache := r.getStoragecache()
if storageCache.GetGlobalId() == id {
return storageCache, nil
}
return nil, cloudprovider.ErrNotFound
}
func (r *SRegion) GetProvider() string {
return api.CLOUD_PROVIDER_ECLOUD
}
func (r *SRegion) GetCapabilities() []string {
return r.client.GetCapabilities()
}
func (r *SRegion) GetClient() *SEcloudClient {
return r.client
}
func (r *SRegion) FindZone(zoneRegion string) (*SZone, error) {
izones, err := r.GetIZones()
if err != nil {
return nil, errors.Wrap(err, "unable to GetZones")
}
findZone := func(zoneRegion string) *SZone {
for i := range izones {
zone := izones[i].(*SZone)
if zone.Region == zoneRegion {
return zone
}
}
return nil
}
return findZone(zoneRegion), nil
}
|
package main
import (
"crypto/sha256"
"fmt"
)
func popCount(x byte) int {
num := 0
var i uint
for ; i < 8; i++ {
num += int((x >> i) & 1)
}
return num
}
func countDifference(c1 string, c2 string) int {
c1Hash := sha256.Sum256([]byte(c1))
c2Hash := sha256.Sum256([]byte(c2))
num := 0
for index, char := range c1Hash {
res := c2Hash[index] ^ char
num += popCount(res)
}
return num
}
func main() {
fmt.Println(countDifference("x", "X"))
}
|
package main
import (
"fmt"
"net/http"
Controllers "./controllerClasses"
)
func handleRequests() {
fmt.Println("Server started on: http://localhost:8080")
http.HandleFunc("/hash/", Controllers.GetHashedValue)
http.HandleFunc("/hash", Controllers.SetHashedValue)
http.HandleFunc("/stats", Controllers.ReadStats)
http.HandleFunc("/shutdown", Controllers.PrepShutdown)
http.ListenAndServe(":8080", nil)
}
func main() {
handleRequests()
}
|
package Model
import (
_struct "1/struct"
)
func DeleteVideo(uid, vid string) error {
err := DB.Where("id=? and uid=?", vid, uid).Delete(_struct.Video{}).Delete(_struct.VideoInfo{}).Error
//stmt, err := DB.Prepare("DELETE FROM videos WHERE id=? and uid=?")
//if err != nil {
// return err
//}
//defer stmt.Close().Error()
//
//stmt1, err := DB.Prepare("DELETE FROM video_info WHERE vid=? and uid=?")
//if err != nil {
// return err
//}
//
//res, err := stmt.Exec(vid, uid)
//if err != nil {
// return err
//}
//num, err := res.RowsAffected()
//if err != nil {
// return err
//}
//
//res1, err := stmt1.Exec(vid, uid)
//if err != nil {
// return err
//}
//num1, err := res1.RowsAffected()
//if err != nil {
// return err
//}
//
//fmt.Println(num, num1)
//
return err
}
|
package controllers
import (
"net/http"
m "github.com/fullstacktf/Narrativas-Backend/models"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
)
func Register(context *gin.Context) {
var newUser m.User
if err := context.ShouldBindWith(&newUser, binding.JSON); err != nil {
context.JSON(http.StatusBadRequest, gin.H{"error": "bad request"})
return
}
err := newUser.Register()
if err != nil {
context.JSON(http.StatusConflict, gin.H{"error": err.Error()})
} else {
context.Status(http.StatusCreated)
}
}
func Login(context *gin.Context) {
var userData m.User
if err := context.ShouldBindWith(&userData, binding.JSON); err != nil {
context.JSON(http.StatusUnprocessableEntity, gin.H{"error": "invalid json provided"})
return
}
token, err := userData.Login()
if err != nil {
context.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
} else {
context.JSON(http.StatusOK, gin.H{"token": token})
}
}
|
package main
import (
"fmt"
)
type Phone struct{}
func (p *Phone) Call() string {
return "a call"
}
type Camera struct{}
func (c *Camera) TakeAPicture() string {
return "a picture"
}
type CameraPhone struct {
Phone
Camera
}
func main() {
cp := new(CameraPhone)
fmt.Printf("give a call. %s \n", cp.Call())
fmt.Printf("take a picture. %s \n", cp.TakeAPicture())
}
|
package httpbot
import (
"bytes"
"encoding/binary"
"fmt"
"log"
"os"
"strings"
"sync/atomic"
"util"
"webbot"
"golang.org/x/net/websocket"
)
type Client struct {
r *Robot
msgChan chan []byte
errChan chan error
groupMap map[uint64]uint64
name string
clientID uint64
cookie string
debug bool
logger *log.Logger
}
func NewClient(r *Robot, name string, clientID uint64, cookie string, maxBuffer int, debug bool) *Client {
return &Client{
r: r,
msgChan: make(chan []byte, maxBuffer),
errChan: make(chan error, 1),
groupMap: make(map[uint64]uint64),
name: name,
clientID: clientID,
cookie: cookie,
debug: debug,
logger: log.New(os.Stderr, "", log.LstdFlags),
}
}
func (c *Client) Run(ws *websocket.Conn) {
defer c.logf("Client finished!\n")
errChan := make(chan error, 1)
go c.messageInHandler(ws, errChan)
go c.messageOutHandler(ws, errChan)
c.sendCookie()
// Send caps to client.
capArray := c.r.getCaps()
for _, msg := range capArray {
//c.sendMessage(msg)
if err := websocket.Message.Send(ws, msg); err != nil {
// Quick shutdown.
ws.Close()
close(c.msgChan)
<-errChan
<-errChan
return
}
}
c.r.addClient(c)
c.sendOldChats()
c.annouceChat(true)
defer c.annouceChat(false)
c.sendDone()
err := <-errChan
c.logf("Handler error: %v\n", err)
c.logf("Closing socket.\n")
ws.Close()
c.logf("Deregistering client.\n")
c.r.delClient(c)
c.logf("Shutting down message channel.\n")
close(c.msgChan)
c.logf("Waiting for handler.\n")
for err := range errChan {
if err == nil {
break
} else {
c.logf("Handler err: %v\n", err)
}
}
}
func (c *Client) sendCookie() {
msg := []byte(c.cookie)
buf := make([]byte, 0, len(msg)+4)
bb := bytes.NewBuffer(buf)
t := webbot.COOK_CAP
if err := binary.Write(bb, binary.BigEndian, &t); err != nil {
return
}
bb.Write(msg)
{
revision := atomic.AddUint64(&c.r.capTime, 1)
msg, err := util.Encode32TimeHeadBuf(t, revision, bb.Bytes())
if err != nil {
return
}
c.sendMessage(msg)
}
}
func (c *Client) sendDone() {
t := webbot.DONE_CAP
buf := make([]byte, 0, 4)
bb := bytes.NewBuffer(buf)
revision := atomic.AddUint64(&c.r.capTime, 1)
msg, err := util.Encode32TimeHeadBuf(t, revision, bb.Bytes())
if err != nil {
return
}
c.sendMessage(msg)
}
func (c *Client) handleClientMessage(msg []byte) ([]byte, bool) {
bb := bytes.NewBuffer(msg)
var t uint32
if err := binary.Read(bb, binary.BigEndian, &t); err != nil {
return nil, false
}
switch t {
case webbot.CTRL_CAP:
return c.handleClientCtrlCap(bb.Bytes())
case webbot.CHAT_CAP:
return c.handleClientChatCap(bb.Bytes())
default:
c.logf("Received unknown message type (%v) from web client\n", t)
}
return nil, false
}
func (c *Client) sendOldChats() {
c.r.chLock.RLock()
log := c.r.ch.oldChats()
c.r.chLock.RUnlock()
n := (cap(c.msgChan) - len(c.msgChan)) / 2
if n > len(log) {
n = 0
} else {
n = len(log) - n
}
for _, m := range log[n:] {
c.sendMessage(m)
}
}
func (c *Client) annouceChat(in bool) {
c.r.chLock.RLock()
defer c.r.chLock.RUnlock()
if in {
c.r.ch.chat(false, "", c.name, " has joined.")
} else {
c.r.ch.chat(false, "", c.name, " has parted.")
}
}
func (c *Client) handleClientChatCap(msg []byte) ([]byte, bool) {
str, err := util.DecodeUTF16(msg)
if err != nil {
c.logf("FIXME: handle this error: %v\n", err)
return nil, false
}
if strings.HasPrefix(str, "/") {
c.handleClientCommand(str)
return nil, false
}
c.logf("CHAT: %v\n", str)
c.r.chLock.RLock()
defer c.r.chLock.RUnlock()
c.r.ch.chat(true, c.r.name, c.name, str)
return nil, false
}
func (c *Client) handleClientCommand(cmd string) {
if strings.HasPrefix(cmd, "/users") {
c.usersCommand()
return
}
log.Printf("UNKNOWN COMMAND: [%v]\n", cmd)
}
func (c *Client) usersCommand() {
c.r.chLock.RLock()
robots := make([]*Robot, 0, len(c.r.ch.clientMap))
for k, _ := range c.r.ch.clientMap {
robots = append(robots, k)
}
c.r.chLock.RUnlock()
c.sendChatMessage(">", " /users")
c.sendChatMessage(">", " Listing users.")
total := 0
uniqNames := make(map[string]bool)
for _, r := range robots {
r.clientLock.RLock()
names := make([]string, 0, len(r.clients))
for c, _ := range r.clients {
names = append(names, c.name)
}
r.clientLock.RUnlock()
for _, n := range names {
if _, ok := uniqNames[n]; !ok {
c.sendChatMessage("user>", fmt.Sprintf(" %v\n", n))
uniqNames[n] = true
}
total++
}
}
c.sendChatMessage(">", fmt.Sprintf(" %v unique users.\n", len(uniqNames)))
c.sendChatMessage(">", fmt.Sprintf(" %v total users.\n", total))
}
func (c *Client) sendChatMessage(name, msg string) {
chatOrder := atomic.AddUint64(&chatTime, 1)
buf := NewChat(false, "", name, msg, chatOrder)
c.msgChan <- buf
}
func (c *Client) handleClientCtrlCap(msg []byte) ([]byte, bool) {
t := webbot.CTRL_CAP
bb := bytes.NewBuffer(msg)
var id uint32
if err := binary.Read(bb, binary.BigEndian, &id); err != nil {
return nil, false
}
var down uint32
if err := binary.Read(bb, binary.BigEndian, &down); err != nil {
return nil, false
}
i, g, ok, err := c.r.groupFilter(c, id, down)
if err != nil {
c.logf("ERROR: %v\n", err)
return nil, false
} else if !ok {
return nil, false
}
buf := make([]byte, 0, 12)
bb = bytes.NewBuffer(buf)
if err := binary.Write(bb, binary.BigEndian, &t); err != nil {
return nil, false
}
if err := binary.Write(bb, binary.BigEndian, &g); err != nil {
return nil, false
}
if err := binary.Write(bb, binary.BigEndian, &i); err != nil {
return nil, false
}
c.logf("CTRL_CAP: Group: %v, Id: %v\n", g, i)
return bb.Bytes(), true
}
func (c *Client) ReadMessage(ws *websocket.Conn, max uint32) ([]byte, error) {
msg, err := util.ReadMessage(ws, 1024)
// TODO Sanitize messages.
// 1) Convert simple chat message into complex one.
// size:string -> id:size:string
return msg, err
}
func (c *Client) groupFilter(group, id, down uint32) bool {
key := uint64(group)
key = key<<32 | uint64(id)
if down > 0 {
c.groupMap[key] += 1
} else {
c.groupMap[key] -= 1
}
if c.groupMap[key] <= 1 {
return true
}
return false
}
func (c *Client) sendMessage(msg []byte) {
// TODO: handle slow clients, we can't have them lagging us.
// NOTE: The entire server might just be slow not the client.
if len(c.msgChan) == cap(c.msgChan) {
select {
case c.errChan <- fmt.Errorf("Message channel full!"):
default:
c.logf("Message dropped\n")
}
} else {
c.msgChan <- msg
}
}
func (c *Client) messageInHandler(ws *websocket.Conn, errChan chan error) {
c.logf("messageInHanlder started.\n")
defer c.logf("messageInHandler ended.\n")
for {
msg, err := c.ReadMessage(ws, 1024)
if err != nil {
errChan <- err
return
}
if msg, ok := c.handleClientMessage(msg); ok {
c.r.sendMessage(msg)
}
}
}
func (c *Client) messageOutHandler(ws *websocket.Conn, errChan chan error) {
c.logf("messageOutHanlder started.\n")
defer c.logf("messageOutHandler ended.\n")
hadErr := false
for {
select {
case msg, ok := <-c.msgChan:
if !ok {
errChan <- nil
return
}
if !hadErr {
if err := websocket.Message.Send(ws, msg); err != nil {
errChan <- err
hadErr = true
}
}
case err := <-c.errChan:
errChan <- err
hadErr = true
}
}
}
func (c *Client) logf(format string, v ...interface{}) {
if c.debug && c.logger != nil {
l := fmt.Sprintf(format, v...)
c.logger.Printf("C[%p:%v]: %v", c.r, c.name, l)
}
}
|
package inmemory
import (
"context"
"github.com/go-redis/redis/v8"
"github.com/pkg/errors"
"os"
)
type MemStore interface {
init() error
Set(key, value string) error
Get(key string) (string, error)
}
type storage struct {
client *redis.Client
}
func NewStorage() (MemStore, error) {
s := &storage{}
err := s.init()
if err != nil {
return nil, err
}
return s, nil
}
func (s *storage) init() error {
opt, err := redis.ParseURL(os.Getenv("REDIS_URL"))
if err != nil {
return errors.Wrap(err, "invalid Redis url")
}
client := redis.NewClient(opt)
_, err = client.Ping(context.Background()).Result()
if err != nil {
return errors.Wrap(err, "unable to connect to Redis")
}
s.client = client
return nil
}
func (s *storage) Set(key, value string) error {
err := s.client.Set(context.Background(), key, value, 0).Err()
return errors.Wrapf(err, "could not set (%s: %s)", key, value)
}
func (s *storage) Get(key string) (string, error) {
value, err := s.client.Get(context.Background(), key).Result()
if err != nil {
return "", errors.Wrapf(err, "could not get the value of the key: %s", key)
}
return value, nil
}
|
package provider
import (
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/evcc-io/evcc/util"
"github.com/samber/lo"
"github.com/stretchr/testify/assert"
)
type httpHandler struct {
val string
req *http.Request
}
func (h *httpHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
h.req = req
h.val = lo.RandomString(16, lo.LettersCharset)
_, _ = w.Write([]byte(h.val))
}
func TestHttpGet(t *testing.T) {
h := new(httpHandler)
srv := httptest.NewServer(h)
defer srv.Close()
uri := srv.URL + "/foo/bar"
p := NewHTTP(util.NewLogger("foo"), http.MethodGet, uri, false, 1, 0)
uriUrl, _ := url.Parse(uri)
res, err := p.StringGetter()()
assert.NoError(t, err)
assert.Equal(t, uriUrl.Path, h.req.URL.Path)
assert.Equal(t, h.val, res)
}
func TestHttpSet(t *testing.T) {
h := new(httpHandler)
srv := httptest.NewServer(h)
defer srv.Close()
uri := srv.URL + "/foo/bar?baz={{.baz}}"
p := NewHTTP(util.NewLogger("foo"), http.MethodGet, uri, false, 1, 0)
uriUrl, _ := url.Parse(uri)
err := p.StringSetter("baz")("4711")
assert.NoError(t, err)
assert.Equal(t, uriUrl.Path, h.req.URL.Path)
assert.Equal(t, "baz=4711", h.req.URL.RawQuery)
}
|
package main
import "fmt"
//arrays are a number of elements
//of a specific type and cannot change their length(unlike slices)
//arrays are not commonly used
//they are an underlying datatype
//that are relied upon by other datatypes
//such as slices which are the more common option
func main() {
//if you define a number
//it's an array(limited amount of space)
//if you leave the brackets empty
//it's a slice
var x [58]string
fmt.Println(x)
fmt.Println(len(x))
fmt.Println(x[42])
//65 is chosen because in ASCII
//65 is the capital A character
for i := 65; i <= 122; i++ {
x[i-65] = string(i) //puts ascii num in position 0 through X index
}
fmt.Println(x)
fmt.Println(len(x))
fmt.Println(x[42])
}
|
package kth_missing_positive_number
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_findKthPositive(t *testing.T) {
tests := []struct {
arr []int
k int
want int
}{
{
arr: []int{2, 3, 4, 7, 11},
k: 5,
want: 9,
},
{
arr: []int{1, 2, 3, 4},
k: 2,
want: 6,
},
{
arr: []int{9, 10},
k: 1,
want: 1,
},
{
arr: []int{1, 10},
k: 1,
want: 2,
},
{
arr: []int{1, 10},
k: 8,
want: 9,
},
{
arr: []int{1, 10},
k: 19,
want: 21,
},
{
arr: []int{100, 200},
k: 19,
want: 19,
},
{
arr: []int{100, 200},
k: 100,
want: 101,
},
}
for _, tt := range tests {
t.Run("", func(t *testing.T) {
got := findKthPositive(tt.arr, tt.k)
assert.Equal(t, tt.want, got)
})
}
}
|
package libldbrest
import (
"bytes"
"github.com/syndtr/goleveldb/leveldb/opt"
)
func iterate(start []byte, include_start, backwards bool, handle func([]byte, []byte) (bool, error)) error {
iter := db.NewIterator(
nil,
&opt.ReadOptions{
DontFillCache: true,
},
)
if bytes.Equal(start, []byte{}) {
if backwards {
iter.Last()
} else {
iter.First()
}
} else {
iter.Seek(start)
}
var proceed func() bool
if backwards {
proceed = iter.Prev
// Iterator.Seek() seeks to the first key >= its argument, but going
// backwards we need the last key <= the arg, so adjust accordingly
if !iter.Valid() {
iter.Last()
} else if !include_start && !bytes.Equal(iter.Key(), start) {
iter.Prev()
}
} else {
proceed = iter.Next
}
first := true
for ; iter.Valid(); proceed() {
if first && !include_start && bytes.Equal(iter.Key(), start) {
first = false
continue
}
first = false
stop, err := handle(iter.Key(), iter.Value())
if err != nil {
return err
}
if stop {
return nil
}
}
return nil
}
func iterateUntil(start, end []byte, max int, include_start, include_end, backwards bool, handle func([]byte, []byte) error) (bool, error) {
var (
i int
more bool
)
oob := func(key []byte) (bool, bool) { // returns (valid_now, check_more)
cmp := bytes.Compare(key, end)
switch {
case cmp == 0:
return include_end, false
case cmp == -1 && backwards || cmp == 1 && !backwards:
return false, false
default:
return true, true
}
}
err := iterate(start, include_start, backwards, func(key, value []byte) (bool, error) {
if i >= max {
// exceeded max count, indicate if there's more before "end"
more, _ = oob(key)
return true, nil
}
i++
valid, next := oob(key)
if !valid {
return true, nil
}
if err := handle(key, value); err != nil {
return true, err
}
return !next, nil
})
return more, err
}
func iterateN(start []byte, max int, include_start, backwards bool, handle func([]byte, []byte) error) error {
var i int
return iterate(start, include_start, backwards, func(key, value []byte) (bool, error) {
if i >= max {
return true, nil
}
i++
return false, handle(key, value)
})
}
|
package fakes
import (
"sync"
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
)
type ComputeClient struct {
DeleteCall struct {
sync.Mutex
CallCount int
Receives struct {
InstanceID string
}
Returns struct {
Error error
}
Stub func(string) error
}
ListCall struct {
sync.Mutex
CallCount int
Returns struct {
ServerSlice []servers.Server
Error error
}
Stub func() ([]servers.Server, error)
}
}
func (f *ComputeClient) Delete(param1 string) error {
f.DeleteCall.Lock()
defer f.DeleteCall.Unlock()
f.DeleteCall.CallCount++
f.DeleteCall.Receives.InstanceID = param1
if f.DeleteCall.Stub != nil {
return f.DeleteCall.Stub(param1)
}
return f.DeleteCall.Returns.Error
}
func (f *ComputeClient) List() ([]servers.Server, error) {
f.ListCall.Lock()
defer f.ListCall.Unlock()
f.ListCall.CallCount++
if f.ListCall.Stub != nil {
return f.ListCall.Stub()
}
return f.ListCall.Returns.ServerSlice, f.ListCall.Returns.Error
}
|
package virtualmachinevolume
import (
"context"
goerrors "errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
hc "kubevirt-image-service/pkg/apis/hypercloud/v1alpha1"
"kubevirt-image-service/pkg/util"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"time"
)
// ReconcileInterval is an time to reconcile again when in Pending State
const ReconcileInterval = 1 * time.Second
// Add creates a new VirtualMachineVolume Controller and adds it to the Manager. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
return add(mgr, newReconciler(mgr))
}
// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
return &ReconcileVirtualMachineVolume{client: mgr.GetClient(), scheme: mgr.GetScheme()}
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {
c, err := controller.New("virtualmachinevolume-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
if err := c.Watch(&source.Kind{Type: &hc.VirtualMachineVolume{}}, &handler.EnqueueRequestForObject{}); err != nil {
return err
}
if err := c.Watch(&source.Kind{Type: &corev1.PersistentVolumeClaim{}},
&handler.EnqueueRequestForOwner{IsController: true, OwnerType: &hc.VirtualMachineVolume{}}); err != nil {
return err
}
return nil
}
// blank assignment to verify that ReconcileVirtualMachineVolume implements reconcile.Reconciler
var _ reconcile.Reconciler = &ReconcileVirtualMachineVolume{}
// ReconcileVirtualMachineVolume reconciles a VirtualMachineVolume object
type ReconcileVirtualMachineVolume struct {
// This client, initialized using mgr.Client() above, is a split client
// that reads objects from the cache and writes to the apiserver
client client.Client
scheme *runtime.Scheme
volume *hc.VirtualMachineVolume
}
// Reconcile reads that state of the cluster for a VirtualMachineVolume object and makes changes based on the state read
// and what is in the VirtualMachineVolume.Spec
func (r *ReconcileVirtualMachineVolume) Reconcile(request reconcile.Request) (reconcile.Result, error) {
klog.Infof("Start sync VirtualMachineVolume %s", request.NamespacedName)
defer func() {
klog.Infof("End sync VirtualMachineVolume %s", request.NamespacedName)
}()
cachedVolume := &hc.VirtualMachineVolume{}
if err := r.client.Get(context.TODO(), request.NamespacedName, cachedVolume); err != nil {
if errors.IsNotFound(err) {
return reconcile.Result{}, nil // Deleted Volume. Return and don't requeue.
}
return reconcile.Result{}, err
}
r.volume = cachedVolume.DeepCopy()
if err := r.validateVolumeSpec(); err != nil {
if err2 := r.updateStateWithReadyToUse(hc.VirtualMachineVolumeStatePending, corev1.ConditionFalse, "VmVolumeIsInPending", err.Error()); err2 != nil {
return reconcile.Result{}, err2
}
return reconcile.Result{RequeueAfter: ReconcileInterval}, nil
}
if err := r.syncVolumePvc(); err != nil {
if err2 := r.updateStateWithReadyToUse(hc.VirtualMachineVolumeStateError, corev1.ConditionFalse, "VmVolumeIsInError", err.Error()); err2 != nil {
return reconcile.Result{}, err2
}
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
func (r *ReconcileVirtualMachineVolume) validateVolumeSpec() error {
// Validate VirtualMachineImageName
image := &hc.VirtualMachineImage{}
if err := r.client.Get(context.TODO(), types.NamespacedName{Name: r.volume.Spec.VirtualMachineImage.Name, Namespace: r.volume.Namespace}, image); err != nil {
if errors.IsNotFound(err) {
return goerrors.New("VirtualMachineImage is not exists")
}
return err
}
// Check virtualMachineImage state is available
found, cond := util.GetConditionByType(image.Status.Conditions, hc.ConditionReadyToUse)
if !found || cond.Status != corev1.ConditionTrue {
klog.Info("VirtualMachineImage state is not available")
return goerrors.New("VirtualMachineImage state is not available")
}
return nil
}
// updateStateWithReadyToUse updates readyToUse condition type and State.
func (r *ReconcileVirtualMachineVolume) updateStateWithReadyToUse(state hc.VirtualMachineVolumeState, readyToUseStatus corev1.ConditionStatus,
reason, message string) error {
r.volume.Status.Conditions = util.SetConditionByType(r.volume.Status.Conditions, hc.VirtualMachineVolumeConditionReadyToUse, readyToUseStatus, reason, message)
r.volume.Status.State = state
return r.client.Status().Update(context.TODO(), r.volume)
}
|
/*
Copyright 2020 Huawei Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ymodem
import (
"fmt"
"io"
)
type controlByte byte
const (
asciiSOH = 0x01
asciiSTX = 0x02
asciiEOT = 0x04
asciiACK = 0x06
asciiNAK = 0x15
asciiCAN = 0x18
ymodemPOLL = 0x43
)
// String returns the name of the control byte.
func (b controlByte) String() string {
switch b {
case asciiSOH:
return "SOH"
case asciiSTX:
return "STX"
case asciiEOT:
return "EOT"
case asciiACK:
return "ACK"
case asciiNAK:
return "NAK"
case asciiCAN:
return "CAN"
case ymodemPOLL:
return "POLL"
default:
return fmt.Sprintf("%#x", b)
}
}
func readControlByte(reader io.Reader) (controlByte, error) {
var buf [1]byte
var cb controlByte
_, err := reader.Read(buf[:])
if err != nil {
return cb, fmt.Errorf("cannot read control byte: %w", err)
}
cb = controlByte(buf[0])
// fmt.Printf("Read control byte %q\n", cb)
return cb, nil
}
func writeControlByte(writer io.Writer, cb controlByte) error {
_, err := writer.Write([]byte{byte(cb)})
if err != nil {
return fmt.Errorf("cannot write control byte: %w", err)
}
// fmt.Printf("Wrote control byte %q\n", cb)
return nil
}
|
// Copyright 2018 NetApp, Inc. All Rights Reserved.
package rest
import (
"net/http"
"time"
"github.com/rs/xid"
log "github.com/sirupsen/logrus"
)
func Logger(inner http.Handler, name string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
requestId := xid.New()
logRestCallInfo("REST API call received.", r, start, requestId, name)
inner.ServeHTTP(w, r)
logRestCallInfo("REST API call complete.", r, start, requestId, name)
})
}
func logRestCallInfo(msg string, r *http.Request, start time.Time, requestId xid.ID, name string) {
log.WithFields(log.Fields{
"requestID": requestId,
"method": r.Method,
"uri": r.RequestURI,
"route": name,
"duration": time.Since(start),
}).Debug(msg)
}
|
package solutions
func jump(nums []int) int {
maxStep := len(nums) - 1
currentPosition := 0
steps := 0
if maxStep == 0 {
return 0
}
for {
nextMaxStep := nums[currentPosition]
steps++
if nextMaxStep >= maxStep {
break
}
max := 0
nextMove := currentPosition + 1
for i := currentPosition + 1; i <= currentPosition + nextMaxStep && i < maxStep; i++ {
if nums[i] >= max || i+nums[i] > max+nextMove {
max = nums[i]
nextMove = i
}
}
if nextMove + max >= maxStep {
steps++
break
}
currentPosition = nextMove
}
return steps
}
|
package kata
func Arithmetic(a int, b int, operator string) int{
//your code here
switch operator {
case "add":
return a + b
case "subtract":
return a - b
case "multiply":
return a*b
case "divide" :
return a/b
}
return 0
}
|
package main
import (
"io/ioutil"
)
func main() {
http.HandleFunc("/", handler)
http.ListenAndServe(":3000", nil)
}
func handler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html")
dat, err := ioutil.ReadFile("index.html")
check(err)
w.Write(dat)
}
func check(e error) {
if e != nil {
panic(e)
}
}
|
// Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package serviceadapter_test
import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/types"
"github.com/pivotal-cf/on-demand-service-broker/serviceadapter"
sdk "github.com/pivotal-cf/on-demand-services-sdk/serviceadapter"
)
var _ = Describe("Adapter Client", func() {
DescribeTable("ErrorForExitCode",
func(code int, msg string, matchErr types.GomegaMatcher, matchMsg types.GomegaMatcher) {
err := serviceadapter.ErrorForExitCode(code, msg)
Expect(err).To(matchErr)
if err != nil {
Expect(err.Error()).To(matchMsg)
}
},
Entry(
"success",
serviceadapter.SuccessExitCode, "",
BeNil(),
nil,
),
Entry(
"not implemented",
sdk.NotImplementedExitCode, "should not appear",
BeAssignableToTypeOf(serviceadapter.NotImplementedError{}),
Equal("command not implemented by service adapter"),
),
Entry(
"app guid not provided",
sdk.AppGuidNotProvidedErrorExitCode, "should not appear",
BeAssignableToTypeOf(serviceadapter.AppGuidNotProvidedError{}),
Equal("app GUID not provided"),
),
Entry(
"binding already exists",
sdk.BindingAlreadyExistsErrorExitCode, "should not appear",
BeAssignableToTypeOf(serviceadapter.BindingAlreadyExistsError{}),
Equal("binding already exists"),
),
Entry(
"binding not found",
sdk.BindingNotFoundErrorExitCode, "should not appear",
BeAssignableToTypeOf(serviceadapter.BindingNotFoundError{}),
Equal("binding not found"),
),
Entry(
"standard error exit code",
sdk.ErrorExitCode, "some error",
BeAssignableToTypeOf(serviceadapter.UnknownFailureError{}),
Equal("some error"),
),
Entry(
"some other non-zero exit code",
12345, "some other error",
BeAssignableToTypeOf(serviceadapter.UnknownFailureError{}),
Equal("some other error"),
),
)
})
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package job
import (
"context"
"reflect"
"sync"
"github.com/google/gapid/core/event"
"github.com/google/gapid/core/os/device"
"github.com/google/gapid/test/robot/record"
"github.com/google/gapid/test/robot/search"
"github.com/google/gapid/test/robot/search/eval"
)
type local struct {
mu sync.Mutex
devices devices
entries []*Worker
onChange event.Broadcast
}
// NewLocal builds a new job manager that persists it's data in the
// supplied library.
func NewLocal(ctx context.Context, library record.Library) (Manager, error) {
m := &local{}
if err := m.devices.init(ctx, library); err != nil {
return nil, err
}
return m, nil
}
// SearchDevices implements Manager.SearchDevicess
// It searches the set of persisted devices, and supports monitoring of new devices as they are added.
func (m *local) SearchDevices(ctx context.Context, query *search.Query, handler DeviceHandler) error {
return m.devices.search(ctx, query, handler)
}
// SearchWorkers implements Manager.SearchWorkers
// It searches the set of persisted workers, and supports monitoring of workers as they are registered.
func (m *local) SearchWorkers(ctx context.Context, query *search.Query, handler WorkerHandler) error {
filter := eval.Filter(ctx, query, reflect.TypeOf(&Worker{}), event.AsHandler(ctx, handler))
initial := event.AsProducer(ctx, m.entries)
if query.Monitor {
return event.Monitor(ctx, &m.mu, m.onChange.Listen, initial, filter)
}
return event.Feed(ctx, filter, initial)
}
// GetWorker implements Manager.GetWorker
// This attempts to find a worker on a device that matches the supplied host
// controlling a device that matches the supplied target to perform the given operation.
// If none is found, a new worker will be created.
func (m *local) GetWorker(ctx context.Context, host *device.Instance, target *device.Instance, op Operation) (*Worker, error) {
m.mu.Lock()
defer m.mu.Unlock()
h, err := m.devices.get(ctx, host)
if err != nil {
return nil, err
}
t, err := m.devices.get(ctx, target)
if err != nil {
return nil, err
}
for _, entry := range m.entries {
if entry.Host != h.Id {
continue
}
if entry.Target != t.Id {
continue
}
if !entry.Supports(op) {
entry.Operation = append(entry.Operation, op)
m.onChange.Send(ctx, entry)
return entry, nil
}
}
// Not found, add a new worker
entry := &Worker{
Host: h.Id,
Target: t.Id,
Operation: []Operation{op},
}
m.entries = append(m.entries, entry)
m.onChange.Send(ctx, entry)
return entry, nil
}
// Supports is used to test if a worker supports a given operation.
func (w *Worker) Supports(op Operation) bool {
for _, e := range w.Operation {
if e == op {
return true
}
}
return false
}
|
package day08
import (
"errors"
"fmt"
"log"
)
func process(code []string, pc int, acc int) (int, int) {
op := ""
arg := 0
n, err := fmt.Sscanf(code[pc], "%s %d", &op, &arg)
if err != nil {
panic(err)
}
if n != 2 {
panic(fmt.Sprintf("Error parsing instruction on line %v: %q", pc, code[pc]))
}
switch op {
case "nop":
return pc + 1, acc
case "acc":
return pc + 1, acc + arg
case "jmp":
return pc + arg, acc
}
panic(fmt.Sprintf("Unknown op code at %v: %v", pc, op))
}
func run(code []string, pc int, acc int) (int, int, error) {
visited := make([]int, len(code))
for pc < len(code) && visited[pc] == 0 {
visited[pc] = 1
pc, acc = process(code, pc, acc)
}
if pc < len(code) && visited[pc] == 1 {
return pc, acc, errors.New("Infinite Loop detected")
}
return pc, acc, nil
}
func part1(input []string) int {
log.SetPrefix("Day 8: Part 1: ")
log.SetFlags(0)
_, acc, _ := run(input, 0, 0)
log.Printf("Answer: %v", acc)
return acc
}
func part2(input []string) int {
log.SetPrefix("Day 8: Part 2: ")
log.SetFlags(0)
lastModified := 0
acc := 0
var err error = nil
code := make([]string, len(input))
for {
modified := false
for i, line := range input {
if !modified && i > lastModified {
if line[0:3] == "nop" {
line = "jmp" + line[3:]
log.Printf("Modified line %v to: %q", i, line)
lastModified = i
modified = true
} else if line[0:3] == "jmp" {
line = "nop" + line[3:]
log.Printf("Modified line %v to: %q", i, line)
lastModified = i
modified = true
}
}
code[i] = line
}
_, acc, err = run(code, 0, 0)
if err == nil {
break
}
if lastModified+1 == len(input) {
panic("Reached end of the line, but condition not met.")
}
}
log.Printf("Answer: %v", acc)
return acc
}
|
/*
author:admin
createTime:
*/
package main
import "testing"
func TestAdd(t *testing.T) {
sum := Add(1, 2)
if sum == 3 {
t.Log("the result is ok")
} else {
t.Fatal("the result is wrong")
}
}
func Add(i int, i2 int) int {
return i + i2
}
func BenchmarkRemoveEles(b *testing.B) {
b.ResetTimer()
numbs := []int{3, 2, 2, 3}
for i := 0; i < b.N; i++ {
removeElements(numbs, 3)
}
}
// removeElements remove the element which input the number,
// back to a new array that exclude it.
func removeElements(arr []int, k int) []int {
for i := 0; i < len(arr); {
if arr[i] == k {
arr = append(arr[:i], arr[i+1:]...)
} else {
i++
}
}
return arr
}
|
package main
import (
"context"
"fmt"
"github.com/lack-io/vine/service"
pb "github.com/lack-io/vine-example/helloworld/proto"
)
func main() {
srv := service.NewService(service.Name("go.vine.helloworld"))
service := pb.NewHelloworldService("go.vine.helloworld", srv.Client())
rsp, err := service.Call(context.TODO(), &pb.HelloWorldRequest{Name: ""})
if err != nil {
fmt.Println(err)
} else {
fmt.Printf("Call: %v\n", rsp)
}
rsp, err = service.Call(context.TODO(), &pb.HelloWorldRequest{Name: "world"})
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("Call: %v\n", rsp)
rsp1, err := service.MulPath(context.TODO(), &pb.MulPathRequest{})
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("MulPath: %v\n", rsp1)
}
|
package main
import "fmt"
import "unicode/utf8"
func main() {
s := "文旭"
fmt.Printf("% x\n", s) //byte
r := []rune(s) // unicode
fmt.Printf("%x\n", r)
fmt.Println(string(r))
fmt.Println(utf8.RuneCountInString(s))
}
|
package models
type Comment struct {
CommentID int `json:"cid"`
Content string `json:"content"`
OnIssue int `json:"issue"`
}
type Comments []Comment
|
// @title bitsongms API Docs
// @version 0.1
// @description Swagger documentation for the BitSong Media Server service API.
// @contact.name BitSong
// @contact.email hello@bitsong.io
// @license.name CC0
// @license.url https://creativecommons.org/share-your-work/public-domain/cc0/
// @host localhost:8081
// @BasePath /api/v1
package server
|
package configure
import (
"bytes"
_ "embed"
"text/template"
"github.com/Masterminds/sprig/v3"
"github.com/evcc-io/evcc/util/templates"
)
type device struct {
Name string
Title string
Yaml string
ChargerHasMeter bool // only used with chargers to detect if we need to ask for a charge meter
}
type loadpoint struct {
Title string // TODO Perspektivisch können wir was aus core wiederverwenden, für später
Charger string
ChargeMeter string
Vehicle string
Mode string
MinCurrent int
MaxCurrent int
Phases int
ResetOnDisconnect string
}
type globalConfig struct {
Meters []device
Chargers []device
Vehicles []device
Loadpoints []loadpoint
Site struct { // TODO Perspektivisch können wir was aus core wiederverwenden, für später
Title string
Grid string
PVs []string
Batteries []string
}
Hems string
EEBUS string
MQTT string
SponsorToken string
Plant string
Telemetry bool
}
type Configure struct {
config globalConfig
}
// AddDevice adds a device reference of a specific category to the configuration
// e.g. a PV meter to site.PVs
func (c *Configure) AddDevice(d device, category DeviceCategory) {
switch DeviceCategories[category].class {
case templates.Charger:
c.config.Chargers = append(c.config.Chargers, d)
case templates.Meter:
c.config.Meters = append(c.config.Meters, d)
switch DeviceCategories[category].categoryFilter {
case DeviceCategoryGridMeter:
c.config.Site.Grid = d.Name
case DeviceCategoryPVMeter:
c.config.Site.PVs = append(c.config.Site.PVs, d.Name)
case DeviceCategoryBatteryMeter:
c.config.Site.Batteries = append(c.config.Site.Batteries, d.Name)
}
case templates.Vehicle:
c.config.Vehicles = append(c.config.Vehicles, d)
default:
panic("invalid class for category: " + category)
}
}
// DevicesOfClass returns all configured devices of a given DeviceClass
func (c *Configure) DevicesOfClass(class templates.Class) []device {
switch class {
case templates.Charger:
return c.config.Chargers
case templates.Meter:
return c.config.Meters
case templates.Vehicle:
return c.config.Vehicles
default:
panic("invalid class: " + class.String())
}
}
// AddLoadpoint adds a loadpoint to the configuration
func (c *Configure) AddLoadpoint(l loadpoint) {
c.config.Loadpoints = append(c.config.Loadpoints, l)
}
// MetersOfCategory returns the number of configured meters of a given DeviceCategory
func (c *Configure) MetersOfCategory(category DeviceCategory) int {
switch category {
case DeviceCategoryGridMeter:
if c.config.Site.Grid != "" {
return 1
}
case DeviceCategoryPVMeter:
return len(c.config.Site.PVs)
case DeviceCategoryBatteryMeter:
return len(c.config.Site.Batteries)
}
return 0
}
//go:embed configure.tpl
var configTmpl string
// RenderConfiguration creates a yaml configuration
func (c *Configure) RenderConfiguration() ([]byte, error) {
tmpl, err := template.New("yaml").Funcs(sprig.TxtFuncMap()).Parse(configTmpl)
if err != nil {
panic(err)
}
out := new(bytes.Buffer)
err = tmpl.Execute(out, c.config)
return bytes.TrimSpace(out.Bytes()), err
}
|
// Copyright 2021 The Perses Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datasource
import (
"fmt"
"time"
"github.com/perses/common/etcd"
"github.com/perses/perses/internal/api/interface/v1/datasource"
"github.com/perses/perses/internal/api/shared"
"github.com/perses/perses/pkg/model/api"
v1 "github.com/perses/perses/pkg/model/api/v1"
"github.com/sirupsen/logrus"
)
type service struct {
datasource.Service
dao datasource.DAO
}
func NewService(dao datasource.DAO) datasource.Service {
return &service{
dao: dao,
}
}
func (s *service) Create(entity api.Entity) (interface{}, error) {
if datasourceObject, ok := entity.(*v1.Datasource); ok {
return s.create(datasourceObject)
}
return nil, fmt.Errorf("%w: wrong entity format, attempting Datasource format, received '%T'", shared.BadRequestError, entity)
}
func (s *service) create(entity *v1.Datasource) (*v1.Datasource, error) {
// Update the time contains in the entity
entity.Metadata.CreateNow()
if err := s.dao.Create(entity); err != nil {
if etcd.IsKeyConflict(err) {
logrus.Debugf("unable to create the Datasource '%s'. It already exits", entity.Metadata.Name)
return nil, shared.ConflictError
}
logrus.WithError(err).Errorf("unable to perform the creation of the Datasource '%s', something wrong with etcd", entity.Metadata.Name)
return nil, shared.InternalError
}
return entity, nil
}
func (s *service) Update(entity api.Entity, parameters shared.Parameters) (interface{}, error) {
if DatasourceObject, ok := entity.(*v1.Datasource); ok {
return s.update(DatasourceObject, parameters)
}
return nil, fmt.Errorf("%w: wrong entity format, attempting Datasource format, received '%T'", shared.BadRequestError, entity)
}
func (s *service) update(entity *v1.Datasource, parameters shared.Parameters) (*v1.Datasource, error) {
if entity.Metadata.Name != parameters.Name {
logrus.Debugf("name in Datasource '%s' and coming from the http request: '%s' doesn't match", entity.Metadata.Name, parameters.Name)
return nil, fmt.Errorf("%w: metadata.name and the name in the http path request doesn't match", shared.BadRequestError)
}
if len(entity.Metadata.Project) == 0 {
entity.Metadata.Project = parameters.Project
} else if entity.Metadata.Project != parameters.Project {
logrus.Debugf("project in datasource '%s' and coming from the http request: '%s' doesn't match", entity.Metadata.Project, parameters.Project)
return nil, fmt.Errorf("%w: metadata.project and the project name in the http path request doesn't match", shared.BadRequestError)
}
// find the previous version of the Datasource
oldEntity, err := s.Get(parameters)
if err != nil {
return nil, err
}
oldObject := oldEntity.(*v1.Datasource)
// update the immutable field of the newEntity with the old one
entity.Metadata.CreatedAt = oldObject.Metadata.CreatedAt
// update the field UpdatedAt with the new time
entity.Metadata.UpdatedAt = time.Now().UTC()
if err := s.dao.Update(entity); err != nil {
logrus.WithError(err).Errorf("unable to perform the update of the Datasource '%s', something wrong with etcd", entity.Metadata.Name)
return nil, shared.InternalError
}
return entity, nil
}
func (s *service) Delete(parameters shared.Parameters) error {
if err := s.dao.Delete(parameters.Project, parameters.Name); err != nil {
if etcd.IsKeyNotFound(err) {
logrus.Debugf("unable to find the Datasource '%s'", parameters.Name)
return shared.NotFoundError
}
logrus.WithError(err).Errorf("unable to delete the Datasource '%s', something wrong with etcd", parameters.Name)
return shared.InternalError
}
return nil
}
func (s *service) Get(parameters shared.Parameters) (interface{}, error) {
entity, err := s.dao.Get(parameters.Project, parameters.Name)
if err != nil {
if etcd.IsKeyNotFound(err) {
logrus.Debugf("unable to find the Datasource '%s'", parameters.Name)
return nil, shared.NotFoundError
}
logrus.WithError(err).Errorf("unable to find the previous version of the Datasource '%s', something wrong with etcd", parameters.Name)
return nil, shared.InternalError
}
return entity, nil
}
func (s *service) List(q etcd.Query, _ shared.Parameters) (interface{}, error) {
return s.dao.List(q)
}
|
package leetcode73
import "testing"
type matrix [][]int
func TestSetZeroes(t *testing.T) {
tests := []struct {
input matrix
want matrix
}{
{matrix{{}}, matrix{{}}},
{matrix{{1}}, matrix{{1}}},
{matrix{{0}}, matrix{{0}}},
{matrix{{0, 1}, {1, 1}}, matrix{{0, 0}, {0, 1}}},
{matrix{{0, 0}, {1, 1}}, matrix{{0, 0}, {0, 0}}},
{matrix{{0, 1}, {0, 1}}, matrix{{0, 0}, {0, 0}}},
{matrix{{0, 0}, {0, 1}}, matrix{{0, 0}, {0, 0}}},
{matrix{{0, 0}, {0, 0}}, matrix{{0, 0}, {0, 0}}},
}
format := "setZeroes(%v) = %v"
for _, test := range tests {
cop := copyMatrix(test.input)
setZeroes(cop)
if !isEqualMatrix(cop, test.want) {
t.Errorf(format, test.input, cop)
}
}
}
func isEqualMatrix(m1 matrix, m2 matrix) bool {
if len(m1) != len(m2) {
return false
}
for i := 0; i < len(m1); i++ {
if !isEqualRow(m1[i], m2[i]) {
return false
}
}
return true
}
func isEqualRow(row1 []int, row2 []int) bool {
if len(row1) != len(row2) {
return false
}
for j := 0; j < len(row1); j++ {
if row1[j] != row2[j] {
return false
}
}
return true
}
|
package main
import "net"
type _TtcpNodeDataRece struct {
TnrRaddr string // string of net.Addr
TnrLen int
TnrId128 []byte // channel id
TnrK256 []byte // AES key 256 using when receive if not nil
TnrBuf []byte
TnrOffset int64
}
func (__Vtndr *_TtcpNodeDataRece) String() string {
return _Spf(
"addr:%s id:%s k:%s (%d/%d/%d){%s}",
__Vtndr.TnrRaddr,
String5s(&__Vtndr.TnrId128),
String5s(&__Vtndr.TnrK256),
__Vtndr.TnrOffset,
__Vtndr.TnrLen,
__Vtndr.TnrOffset+int64(__Vtndr.TnrLen),
String5s(&__Vtndr.TnrBuf),
)
}
type _TtcpNodeDataSend struct {
tnsToAddr net.Addr
tnsId128 []byte // channel id
tnsK256 []byte // AES key 256 using when send if not nil
tnsLen int
tnsBuf []byte
}
func (__Vtndr *_TtcpNodeDataSend) String() string {
if nil == __Vtndr.tnsToAddr {
return _Spf(
"addr:<null> id:%s k:%s (%d/%d)%s",
String5s(&__Vtndr.tnsId128),
String5s(&__Vtndr.tnsK256),
__Vtndr.tnsLen,
len(__Vtndr.tnsBuf),
String5s(&__Vtndr.tnsBuf),
)
} else {
return _Spf(
"addr:%s id:%s k:%s (%d/%d)%s",
__Vtndr.tnsToAddr.String(),
String5s(&__Vtndr.tnsId128),
String5s(&__Vtndr.tnsK256),
__Vtndr.tnsLen,
len(__Vtndr.tnsBuf),
String5s(&__Vtndr.tnsBuf),
)
}
}
|
package raft
import (
"fmt"
"math/rand"
"os"
"reflect"
"testing"
"github.com/hashicorp/raft"
"github.com/hashicorp/raft-boltdb"
)
// Fuzz tester comparing this to hashicorp/raft-boltdb.
func TestRaft_Fuzz(t *testing.T) {
logdb := assertOpen(t, dbTypes["lock free chunkdb"], false, true, "fuzz")
defer assertClose(t, logdb)
boltdb := assertCreateBoltStore(t, "fuzz")
defer boltdb.Close()
rand := rand.New(rand.NewSource(0))
if err := fuzzLogStore(boltdb, logdb, rand, 256); err != nil {
t.Fatal(err)
}
}
/// FUZZ TESTER
// Compare a "test" implementation with a "spec" implementation by performing a sequence of random operations
// and comparing the outputs.
func fuzzLogStore(spec raft.LogStore, test raft.LogStore, rand *rand.Rand, maxops int) error {
// Keep track of the last log entry generated, so indices and terms will be strictly increasing.
lastLog := raft.Log{Index: 1 + uint64(rand.Intn(10))}
for i := 0; i < maxops; i++ {
action := rand.Intn(4)
switch action {
case 0:
// Generate an index, weighted towards something in range.
first, _ := spec.FirstIndex()
last, _ := spec.LastIndex()
idrange := int64(last) - int64(first)
// It's a little annoying that the Int*n functions can't accept 0 as an upper bound.
index := rand.Int63n(26) - 50 + int64(first)
if idrange > 0 {
index = rand.Int63n(idrange) + index
}
idx := uint64(index)
specLog := new(raft.Log)
specErr := spec.GetLog(idx, specLog)
testLog := new(raft.Log)
testErr := test.GetLog(idx, testLog)
if !compareErrors(specErr, testErr) {
return notExpected("GetLog", fmt.Sprintf("error values inconsistent for ID %v", idx), specErr, testErr)
}
if specErr != nil {
continue
}
if !compareLogs(specLog, testLog) {
return notExpected("GetLog", fmt.Sprintf("log entries not equal for ID %v", idx), specLog, testLog)
}
case 1:
lastLog = randLog(lastLog, rand)
specErr := spec.StoreLog(&lastLog)
testErr := test.StoreLog(&lastLog)
if !compareErrors(specErr, testErr) {
return notExpected("StoreLog", "error values inconsistent", specErr, testErr)
}
case 2:
logs := make([]*raft.Log, rand.Intn(100))
logsV := make([]raft.Log, len(logs))
for i := range logs {
lastLog = randLog(lastLog, rand)
logsV[i] = lastLog
logs[i] = &logsV[i]
}
specErr := spec.StoreLogs(logs)
testErr := test.StoreLogs(logs)
if !compareErrors(specErr, testErr) {
return notExpected("StoreLogs", "error values inconsistent", specErr, testErr)
}
case 3:
// Delete randomly from either the front or back, not the middle. This matches use of
// this method within hashicorp/raft itself.
first, _ := test.FirstIndex()
last, _ := test.LastIndex()
// Same issue here wth rand.Int63n as above.
if first != last {
if rand.Intn(2) == 0 {
first += uint64(rand.Int63n(int64(last - first)))
} else {
last -= uint64(rand.Int63n(int64(last - first)))
}
}
specErr := spec.DeleteRange(first, last)
testErr := test.DeleteRange(first, last)
if !compareErrors(specErr, testErr) {
return notExpected("DeleteRange", "error values inconsistent", specErr, testErr)
}
if specErr != nil {
continue
}
// If there was a rollback, we need to generate earlier indices again.
if last >= lastLog.Index {
idx, _ := test.LastIndex()
lastLog.Index = idx
}
}
// After every operation, check the indices are consistent.
specFirst, specErr := spec.FirstIndex()
testFirst, testErr := test.FirstIndex()
if !compareErrors(specErr, testErr) {
return badInvariant("error values of FirstIndex inconsistent", specErr, testErr)
}
if specErr != nil {
continue
}
if specFirst < testFirst && specFirst != 0 {
return badInvariant("indices not subset (expected >=)", specFirst, testFirst)
}
specLast, specErr := spec.LastIndex()
testLast, testErr := test.LastIndex()
if !compareErrors(specErr, testErr) {
return badInvariant("error values of LastIndex inconsistent", specErr, testErr)
}
if specErr != nil {
continue
}
if specLast != testLast && specLast != 0 {
return badInvariant("last indices not equal", specLast, testLast)
}
}
return nil
}
// Compare two errors by checking both are nil or non-nil.
func compareErrors(err1 error, err2 error) bool {
return !((err1 == nil && err2 != nil) || (err1 != nil && err2 == nil))
}
// Compare two raft log values.
func compareLogs(log1 *raft.Log, log2 *raft.Log) bool {
return log1.Index == log2.Index && log1.Term == log2.Term && log1.Type == log2.Type && reflect.DeepEqual(log1.Data, log2.Data)
}
// Construct an error message.
func notExpected(method, msg string, expected, actual interface{}) error {
return fmt.Errorf("[%s] %s\nexpected: %v\nactual: %v", method, msg, expected, actual)
}
// Construct an error message.
func badInvariant(msg string, expected, actual interface{}) error {
return fmt.Errorf("INVARIANT: %s\nexpected: %v\nactual: %v", msg, expected, actual)
}
// Generate a random log entry. The pair (index,term) is strictly increasing between invocations.
func randLog(lastLog raft.Log, rand *rand.Rand) raft.Log {
index := lastLog.Index + 1
term := lastLog.Term
// Bias towards entries in the same term.
if rand.Intn(5) == 0 {
term++
}
return raft.Log{
Index: index,
Term: term,
Type: raft.LogType(rand.Uint32()),
Data: []byte(fmt.Sprintf("entry %v %v", index, term)),
}
}
/// ASSERTIONS
func assertCreateBoltStore(t testing.TB, testName string) *raftboltdb.BoltStore {
_ = os.RemoveAll("../test_db/raft/" + testName + "_bolt")
db, err := raftboltdb.NewBoltStore("../test_db/raft/" + testName + "_bolt")
if err != nil {
t.Fatal(err)
}
return db
}
|
package yamltojson
import (
"fmt"
// This is a fork of gopkg.in/yaml.v2 that fixes anchors with MapSlice
"github.com/buildkite/yaml"
)
// Unmarshal YAML to map[string]interface{} instead of map[interface{}]interface{}, such that
// we can Marshal cleanly into JSON
// Via https://github.com/go-yaml/yaml/issues/139#issuecomment-220072190
func UnmarshalAsStringMap(in []byte, out interface{}) error {
var res interface{}
if err := yaml.Unmarshal(in, &res); err != nil {
return err
}
*out.(*interface{}) = cleanupMapValue(res)
return nil
}
func cleanupInterfaceArray(in []interface{}) []interface{} {
res := make([]interface{}, len(in))
for i, v := range in {
res[i] = cleanupMapValue(v)
}
return res
}
func cleanupInterfaceMap(in map[interface{}]interface{}) map[string]interface{} {
res := make(map[string]interface{})
for k, v := range in {
res[fmt.Sprintf("%v", k)] = cleanupMapValue(v)
}
return res
}
func cleanupMapValue(v interface{}) interface{} {
switch v := v.(type) {
case []interface{}:
return cleanupInterfaceArray(v)
case map[interface{}]interface{}:
return cleanupInterfaceMap(v)
case nil, bool, string, int, float64:
return v
default:
panic("Unhandled map type " + fmt.Sprintf("%T", v))
}
}
|
package sitter_test
import (
"testing"
sitter "github.com/kiteco/go-tree-sitter"
"github.com/kiteco/go-tree-sitter/javascript"
)
var (
ResultUint32 uint32
ResultSymbol sitter.Symbol
ResultPoint sitter.Point
ResultString string
ResultNode *sitter.Node
)
func BenchmarkNode(b *testing.B) {
src := []byte("let a = 1")
p := sitter.NewParser()
defer p.Close()
p.SetLanguage(javascript.GetLanguage())
tree := p.Parse(src)
defer tree.Close()
root := tree.RootNode()
first := root.Child(0)
b.Run("Symbol", func(b *testing.B) {
for i := 0; i < b.N; i++ {
ResultSymbol = first.Symbol()
}
})
b.Run("StartByte", func(b *testing.B) {
for i := 0; i < b.N; i++ {
ResultUint32 = first.StartByte()
}
})
b.Run("EndByte", func(b *testing.B) {
for i := 0; i < b.N; i++ {
ResultUint32 = first.EndByte()
}
})
b.Run("StartPoint", func(b *testing.B) {
for i := 0; i < b.N; i++ {
ResultPoint = first.StartPoint()
}
})
b.Run("EndPoint", func(b *testing.B) {
for i := 0; i < b.N; i++ {
ResultPoint = first.EndPoint()
}
})
b.Run("ChildCount", func(b *testing.B) {
for i := 0; i < b.N; i++ {
ResultUint32 = first.ChildCount()
}
})
b.Run("Content", func(b *testing.B) {
for i := 0; i < b.N; i++ {
ResultString = first.Content(src)
}
})
b.Run("Type", func(b *testing.B) {
for i := 0; i < b.N; i++ {
ResultString = first.Type()
}
})
b.Run("Parent", func(b *testing.B) {
for i := 0; i < b.N; i++ {
ResultNode = first.Parent()
}
})
b.Run("Child", func(b *testing.B) {
for i := 0; i < b.N; i++ {
ResultNode = first.Child(0)
}
})
}
|
package comutil
// ContainsStr accepts an array of string and another string under test, it returns true if
// string under exists in the given array.
func ContainsStr(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
|
package main
import "fmt"
const myName string = "Aleks"
func main() {
fmt.Println("Hello, my name is", myName)
}
|
// https://tour.golang.org/concurrency/8
package main
import (
"fmt"
"golang.org/x/tour/tree"
)
// Walk walks the tree t sending all values
// from the tree to the channel ch.
func Walk(t *tree.Tree, ch chan int) {
_walk(t, ch)
close(ch)
}
func _walk(t *tree.Tree, ch chan int) {
if t == nil {
return
}
_walk(t.Left, ch)
ch <- t.Value
_walk(t.Right, ch)
}
// Same determines whether the trees
// t1 and t2 contain the same values.
func Same(t1, t2 *tree.Tree) bool {
ch1 := make(chan int)
ch2 := make(chan int)
go Walk(t1, ch1)
go Walk(t2, ch2)
for v1 := range ch1 {
v2, ok := <-ch2
if !ok {
return false
}
if v1 != v2 {
return false
}
}
// ch1 has finished, but ch2 has more.
_, ok := <-ch2
if ok {
return false
}
return true
}
func main() {
isSame := Same(tree.New(3), tree.New(3))
fmt.Println("Is the same: ", isSame)
}
|
package ravendb
import (
"net/http"
)
var _ IVoidMaintenanceOperation = &StartIndexingOperation{}
type StartIndexingOperation struct {
Command *StartIndexingCommand
}
func NewStartIndexingOperation() *StartIndexingOperation {
return &StartIndexingOperation{}
}
func (o *StartIndexingOperation) GetCommand(conventions *DocumentConventions) (RavenCommand, error) {
o.Command = NewStartIndexingCommand()
return o.Command, nil
}
var (
_ RavenCommand = &StartIndexingCommand{}
)
type StartIndexingCommand struct {
RavenCommandBase
}
func NewStartIndexingCommand() *StartIndexingCommand {
cmd := &StartIndexingCommand{
RavenCommandBase: NewRavenCommandBase(),
}
cmd.ResponseType = RavenCommandResponseTypeEmpty
return cmd
}
func (c *StartIndexingCommand) CreateRequest(node *ServerNode) (*http.Request, error) {
url := node.URL + "/databases/" + node.Database + "/admin/indexes/start"
return NewHttpPost(url, nil)
}
|
// Copyright 2018 gf Author(https://gitee.com/johng/gf). All Rights Reserved.
//
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT was not distributed with this file,
// You can obtain one at https://gitee.com/johng/gf.
// 定时任务.
package gcron
import (
"errors"
"fmt"
"gitee.com/johng/gf/g/container/garray"
"gitee.com/johng/gf/g/container/gtype"
"gitee.com/johng/gf/g/os/gtime"
"gitee.com/johng/gf/third/github.com/robfig/cron"
"reflect"
"runtime"
)
// 定时任务项
type Entry struct {
Spec string // 注册定时任务时间格式
Cmd string // 注册定时任务名称
Time *gtime.Time // 注册时间
Name string // 定时任务名称
cron *cron.Cron // 底层定时管理对象
}
var (
// 默认的cron管理对象
defaultCron = cron.New()
// 当前cron的运行状态(0: 未执行; > 0: 运行中)
cronStatus = gtype.NewInt()
// 注册定时任务项
cronEntries = garray.New(0, 0, true)
)
// 添加执行方法,可以给定名字,以便于后续执行删除
func Add(spec string, f func(), name ... string) error {
if len(name) > 0 {
if Search(name[0]) != nil {
return errors.New(fmt.Sprintf(`cron job "%s" already exists`, name[0]))
}
c := cron.New()
if err := c.AddFunc(spec, f); err == nil {
cronEntries.Append(Entry{
Spec : spec,
Cmd : runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name(),
Time : gtime.Now(),
Name : name[0],
cron : c,
})
go c.Run()
} else {
return err
}
} else {
if err := defaultCron.AddFunc(spec, f); err == nil {
if cronStatus.Add(1) == 1 {
go defaultCron.Run()
}
cronEntries.Append(Entry{
Spec : spec,
Cmd : runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name(),
Time : gtime.Now(),
})
} else {
return err
}
}
return nil
}
// 检索指定名称的定时任务
func Search(name string) *Entry {
entry, _ := searchEntry(name)
return entry
}
// 检索指定名称的定时任务
func searchEntry(name string) (*Entry, int) {
entry := (*Entry)(nil)
index := -1
cronEntries.RLockFunc(func(array []interface{}) {
for k, v := range array {
e := v.(Entry)
if e.Name == name {
entry = &e
index = k
break
}
}
})
return entry, index
}
// 根据指定名称删除定时任务
func Remove(name string) {
if entry, index := searchEntry(name); index >= 0 {
entry.cron.Stop()
cronEntries.Remove(index)
}
}
// 获取所有已注册的定时任务项
func Entries() []Entry {
length := cronEntries.Len()
entries := make([]Entry, length)
for i := 0; i < length; i++ {
entries[i] = cronEntries.Get(i).(Entry)
}
return entries
}
|
package data
import (
"database/sql"
"log"
"time"
)
import _ "github.com/go-sql-driver/mysql"
var Db *sql.DB
func init() {
Db, err := sql.Open("mysql", "monstar-lab:password@tcp(localhost:3306)/todo?parseTime=true")
// To avoid client-side timeout
Db.SetConnMaxLifetime(time.Second)
if err != nil {
log.Println("Problem connecting to DB", err)
return
} else {
log.Println("Successfully connected to DB")
}
return
}
|
package altrudos
import (
vinscraper "github.com/Vindexus/go-scraper"
"testing"
"github.com/monstercat/golib/expectm"
)
func TestParseSourceURL(t *testing.T) {
type sourceTest struct {
URL string
ExpectedType string
ExpectedKey string
Error error
ExpectedMeta *expectm.ExpectedM
}
tests := []sourceTest{
{
URL: "https://www.reddit.com/r/vancouver/comments/c78dd0/just_driving_the_wrong_way_on_a_highway_exit_with/",
ExpectedType: vinscraper.SourceRedditPost,
ExpectedKey: "c78dd0",
Error: nil,
ExpectedMeta: &expectm.ExpectedM{
"subreddit": "vancouver",
"author": "shazoocow",
},
},
{
URL: "https://np.reddit.com/r/pathofexile/comments/c6oy9e/to_everyone_that_feels_bored_by_the_game_or/esai27c/?context=3",
ExpectedType: vinscraper.SourceRedditComment,
ExpectedKey: "esai27c",
Error: nil,
ExpectedMeta: &expectm.ExpectedM{
"subreddit": "pathofexile",
},
},
{
URL: "https://www.reddit.com/about",
Error: nil,
ExpectedType: vinscraper.SourceURL,
ExpectedKey: "https://www.reddit.com/about",
},
{
URL: "facebook colin",
Error: vinscraper.ErrSourceInvalidURL,
},
{
URL: "twitter.com/@whatever",
Error: vinscraper.ErrSourceInvalidURL,
},
{
URL: "http://twitter.com/@whatever",
Error: nil,
ExpectedType: vinscraper.SourceURL,
ExpectedKey: "http://twitter.com/@whatever",
},
}
for i, test := range tests {
url := test.URL
source, err := ParseSourceURL(url)
if err != nil {
if test.Error == nil {
t.Fatal(err)
} else {
if test.Error != err {
t.Errorf("#%d: Expected err %v but got %v", i, test.Error, err)
}
continue
}
}
if source.Type != test.ExpectedType {
t.Errorf("[%d] Type should be %v, found %v", i, test.ExpectedType, source.Type)
}
if source.Key != test.ExpectedKey {
t.Errorf("[%d] Key should be %v, found %v", i, test.ExpectedKey, source.Key)
}
if test.ExpectedMeta != nil {
if err != nil {
t.Errorf("[%d] Error getting meta: %s", i, err)
} else if err := expectm.CheckJSON(source.Meta, test.ExpectedMeta); err != nil {
t.Errorf("[%d] %s", i, err)
}
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.