text
stringlengths 11
4.05M
|
|---|
package main
import (
"dandandowlonad"
"os"
)
func main() {
dondoko := dandandowlonad.NewDanDownload(os.Args[1])
if len(os.Args) == 1 {
os.Exit(dondoko.Run())
} else {
os.Exit(dondoko.Run2())
}
}
|
package main
import (
"encoding/json"
"fmt"
"log"
)
type Response struct {
Code int `json:"code"`
Result string `json:"result"`
}
func main() {
res := Response{
Code: 200,
Result: "Hello Charlie",
}
json, err := json.Marshal(res)
if err != nil {
log.Fatal(err)
}
fmt.Println(string(json))
}
|
package main
import (
"fmt"
"encoding/xml"
)
type Gpx struct {
Creator string `xml:"creator,attr"`
Time string `xml:"metadata>time"`
Title string `xml:"trk>name"`
TrackPoints []TrackPoint `xml:"trk>trkseg>trkpt"`
}
type TrackPoint struct {
Lat float64 `xml:"lat,attr"`
Lon float64 `xml:"lon,attr"`
Elevation float32 `xml:"ele"`
Time string `xml:"time"`
Temperature int `xml:"extensions>TrackPointExtension>atemp"`
}
func main() {
data := `<gpx creator="StravaGPX" version="1.1" xmlns="http://www.topografix.com/GPX/1/1"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd http://www.garmin.com/xmlschemas/GpxExtensions/v3 http://www.garmin.com/xmlschemas/GpxExtensionsv3.xsd http://www.garmin.com/xmlschemas/TrackPointExtension/v1 http://www.garmin.com/xmlschemas/TrackPointExtensionv1.xsd http://www.garmin.com/xmlschemas/GpxExtensions/v3 http://www.garmin.com/xmlschemas/GpxExtensionsv3.xsd http://www.garmin.com/xmlschemas/TrackPointExtension/v1 http://www.garmin.com/xmlschemas/TrackPointExtensionv1.xsd" xmlns:gpxtpx="http://www.garmin.com/xmlschemas/TrackPointExtension/v1" xmlns:gpxx="http://www.garmin.com/xmlschemas/GpxExtensions/v3">
<metadata>
<time>2013-02-16T10:11:25Z</time>
</metadata>
<trk>
<name>Demo Data</name>
<trkseg>
<trkpt lat="51.6395658" lon="-3.3623858">
<ele>111.6</ele>
<time>2013-02-16T10:11:25Z</time>
<extensions>
<gpxtpx:TrackPointExtension>
<gpxtpx:atemp>8</gpxtpx:atemp>
<gpxtpx:hr>136</gpxtpx:hr>
<gpxtpx:cad>0</gpxtpx:cad>
</gpxtpx:TrackPointExtension>
</extensions>
</trkpt>
</trkseg>
</trk>
`
g := &Gpx{}
_ = xml.Unmarshal([]byte(data), g)
fmt.Printf("len: %d\n", len(g.TrackPoints))
fmt.Printf("temp: %v\n", g.TrackPoints[0].Temperature)
}
|
package main
import (
"fmt"
"image/color"
"math"
)
type vector struct {
x, y, z float64
}
type ray struct {
origin, dir vector
transformMatrix m44
}
type m44 [4][4]float64
type matrix [][]float64
func NewNormalized(x, y, z float64) vector {
l := math.Sqrt(x*x + y*y + z*z)
if l == 0 {
return vector{0, 0, 0}
}
return vector{x / l, y / l, z / l}
}
func (v vector) Add(u vector) vector {
return vector{v.x + u.x, v.y + u.y, v.z + u.z}
}
func (v vector) Sub(u vector) vector {
return vector{v.x - u.x, v.y - u.y, v.z - u.z}
}
func (v vector) Multiply(a float64) vector {
return vector{v.x * a, v.y * a, v.z * a}
}
func (v vector) EntrywiseProduct(a vector) vector {
return vector{v.x * a.x, v.y * a.y, v.z * a.z}
}
func (v vector) DotProduct(u vector) float64 {
return v.x*u.x + v.y*u.y + v.z*u.z
}
func (v vector) CrossProduct(u vector) vector {
return vector{v.y*u.z - v.z*u.y, v.z*u.x - v.x*u.z, v.x * u.y - v.y*u.x}
}
func (v vector) Normalize() vector {
return NewNormalized(v.x, v.y, v.z)
}
func (v vector) Length() float64 {
return math.Sqrt(v.x*v.x + v.y*v.y + v.z*v.z)
}
func (v vector) IsZero() bool {
return v.x == 0 && v.y == 0 && v.z == 0
}
func (v vector) TransformPoint(m m44) vector {
var x, y, z, w float64
x = v.x * m[0][0] + v.y * m[1][0] + v.z*m[2][0] + m[3][0]
y = v.x * m[0][1] + v.y * m[1][1] + v.z*m[2][1] + m[3][1]
z = v.x * m[0][2] + v.y * m[1][2] + v.z*m[2][2] + m[3][2]
w = v.x * m[0][3] + v.y * m[1][3] + v.z*m[2][3] + m[3][3]
return vector{x/w, y/w, z/w}
}
func (v vector) TransformDir(m m44) vector {
var x, y, z float64
x = v.x * m[0][0] + v.y * m[1][0] + v.z*m[2][0]
y = v.x * m[0][1] + v.y * m[1][1] + v.z*m[2][1]
z = v.x * m[0][2] + v.y * m[1][2] + v.z*m[2][2]
return vector{x, y, z}
}
func (v vector) Reflect(n vector) vector {
// assuming v is normalized
return v.Sub(n.Multiply(2.0 * v.DotProduct(n)))
}
func (v vector) Refract(normale vector, refractiveIndex float64) vector {
if refractiveIndex == 1 {
// shortcut for non-refracting materials
return v
}
// assuming v is normalized and refraction happens on material/vacuum or vacuum/material boundary
r := 1 / refractiveIndex
cosTheta := -math.Max(-1.0, math.Min(1.0, v.DotProduct(normale)))
if cosTheta < 0 {
// refraction from material to vacuum, invert normal and refractiveIndex
normale = normale.Multiply(-1)
r = refractiveIndex
}
k := 1.0 - r*r*(1-cosTheta*cosTheta)
if k < 0 {
// total internal reflection
//return v.Reflect(normale)
return vector{0, 0, 0}
}
return v.Multiply(r).Add(normale.Multiply(r*cosTheta - math.Sqrt(k)))
}
func (v vector) Offset(dir, normale vector, dist float64) vector {
if dir.DotProduct(normale) < 0 {
return v.Sub(normale.Multiply(1e-3))
}
return v.Add(normale.Multiply(1e-3))
}
func (v vector) toNRGBA64() color.NRGBA64 {
var s float64 = 1<<16 - 1
R := v.x * s
G := v.y * s
B := v.z * s
if R > s {
R = s
}
if G > s {
G = s
}
if B > s {
B = s
}
return color.NRGBA64{uint16(R), uint16(G), uint16(B), uint16(s)}
}
func (a matrix) Multiply(b matrix) matrix {
ra := len(a)
ca := len(a[0])
rb := len(b)
cb := len(b[0])
if ca != rb {
panic(fmt.Sprintf("cannot multiply %dx%d and %dx%d", ra, ca, rb, cb))
}
r := make(matrix, ra)
for i := 0; i < ra; i++ {
r[i] = make([]float64, cb)
for j := 0; j < cb; j++ {
for k := 0; k < ca; k++ {
r[i][j] += a[i][k]*b[k][j]
}
}
}
return r
}
func (m m44) Dump() {
for _, r := range m {
for _, v := range r {
fmt.Printf("%v\t", v)
}
fmt.Print("\n")
}
}
// calculateTransformMatrix calculates transformation matrix
// to convert from ray basis to world basis
// in ray basis z-axis goes in -r.dir
func (r *ray) calculateTransformMatrix() {
tmp := vector{0,1,0}
forward := r.dir.Multiply(-1).Normalize()
right := tmp.CrossProduct(forward)
up := forward.CrossProduct(right)
r.transformMatrix[0][0] = right.x
r.transformMatrix[0][1] = right.y
r.transformMatrix[0][2] = right.z
r.transformMatrix[0][3] = 0
r.transformMatrix[1][0] = up.x
r.transformMatrix[1][1] = up.y
r.transformMatrix[1][2] = up.z
r.transformMatrix[1][3] = 0
r.transformMatrix[2][0] = forward.x
r.transformMatrix[2][1] = forward.y
r.transformMatrix[2][2] = forward.z
r.transformMatrix[2][3] = 0
r.transformMatrix[3][0] = r.origin.x
r.transformMatrix[3][1] = r.origin.y
r.transformMatrix[3][2] = r.origin.z
r.transformMatrix[3][3] = 1
}
|
package types
type ZoneIngressTokenRequest struct {
Zone string `json:"zone"`
}
|
package goil
import "testing"
func Test_AvailableGroups(t *testing.T) {
skipIfNoSession(t)
groups, err := session.GetAvailableGroups()
if err != nil {
t.Fatal(err)
}
if len(groups) == 0 {
t.Fatal("No groups returned when there should at least be one")
}
t.Log("These are the available groups")
for id, name := range groups {
t.Logf("%d: %s\n", id, name)
}
}
|
package main
import (
"fmt"
"testing"
)
type tester struct {
id int
values []int
}
func newTesterStruct(id int, value []int) tester {
t := tester{id: id, values: make([]int, len(value))}
copy(t.values, value)
return t
}
func newTesterPointer(id int, values []int) *tester {
t := &tester{id: id, values: make([]int, len(values))}
copy(t.values, values)
return t
}
func (t tester) compareStructs(u tester) int {
switch {
case t.id < u.id:
return -1
case u.id < t.id:
return 1
default:
lent, lenu := len(t.values), len(u.values)
min := lent
if lenu < min {
min = lenu
}
for i := 0; i < min; i++ {
switch {
case t.values[i] < u.values[i]:
return -1
case u.values[i] < t.values[i]:
return 1
}
}
switch {
case lent < lenu:
return -1
case lenu < lent:
return 1
default:
return 0
}
}
}
func (t *tester) comparePointers(u *tester) int {
switch {
case t.id < u.id:
return -1
case u.id < t.id:
return 1
default:
lent, lenu := len(t.values), len(u.values)
min := lent
if lenu < min {
min = lenu
}
for i := 0; i < min; i++ {
switch {
case t.values[i] < u.values[i]:
return -1
case u.values[i] < t.values[i]:
return 1
}
}
switch {
case lent < lenu:
return -1
case lenu < lent:
return 1
default:
return 0
}
}
}
func TestComparers(t *testing.T) {
var (
id = 1
value = []int{1, 2, 3}
s, p = newTesterStruct(id, value), newTesterPointer(id, value)
r int
)
if r = s.compareStructs(*p); r != 0 {
t.Fatalf("expected 0\nreceived %d\n", r)
}
if r = s.comparePointers(p); r != 0 {
t.Fatalf("expected 0\nreceived %d\n", r)
}
if r = (&s).comparePointers(p); r != 0 {
t.Fatalf("expected 0\nreceived %d\n", r)
}
}
func BenchmarkNewStruct(b0 *testing.B) {
for i := 0; i < 10; i++ {
values := make([]int, 0, i)
for j := 0; j < i; j++ {
values = append(values, j)
}
b0.Run(
fmt.Sprintf("values = [0,%d]", i),
func(b1 *testing.B) {
for j := 0; j < b1.N; j++ {
newTesterStruct(i, values)
}
},
)
}
}
func BenchmarkNewPointer(b0 *testing.B) {
for i := 0; i < 10; i++ {
values := make([]int, 0, i)
for j := 0; j < i; j++ {
values = append(values, j)
}
b0.Run(
fmt.Sprintf("values = [0,%d]", i),
func(b1 *testing.B) {
for j := 0; j < b1.N; j++ {
newTesterPointer(i, values)
}
},
)
}
}
func BenchmarkSliceStructs(b0 *testing.B) {
var (
maxNumTesters = 10
numValues = 10
)
for i := 0; i < maxNumTesters; i++ {
b0.Run(
fmt.Sprintf("%d testers, %d values", i, numValues),
func(b1 *testing.B) {
for j := 0; j < b1.N; j++ {
s := make([]tester, 0, i)
for k := 0; k < i; k++ {
s = append(s, newTesterStruct(k, make([]int, numValues)))
}
}
},
)
}
}
func BenchmarkSlicePointers(b0 *testing.B) {
var (
maxNumTesters = 10
numValues = 10
)
for i := 0; i < maxNumTesters; i++ {
b0.Run(
fmt.Sprintf("%d testers, %d values", i, numValues),
func(b1 *testing.B) {
for j := 0; j < b1.N; j++ {
s := make([]*tester, 0, i)
for k := 0; k < i; k++ {
s = append(s, newTesterPointer(k, make([]int, numValues)))
}
}
},
)
}
}
func BenchmarkSliceStructs1(b0 *testing.B) {
var (
numTesters = 10
maxNumValues = 10
)
for i := 0; i < maxNumValues; i++ {
b0.Run(
fmt.Sprintf("%d testers, %d values", numTesters, i),
func(b1 *testing.B) {
for j := 0; j < b1.N; j++ {
s := make([]tester, 0, numTesters)
for k := 0; k < numTesters; k++ {
s = append(s, newTesterStruct(k, make([]int, i)))
}
}
},
)
}
}
func BenchmarkSlicePointers1(b0 *testing.B) {
var (
numTesters = 10
maxNumValues = 10
)
for i := 0; i < maxNumValues; i++ {
b0.Run(
fmt.Sprintf("%d testers, %d values", numTesters, i),
func(b1 *testing.B) {
for j := 0; j < b1.N; j++ {
s := make([]*tester, 0, numTesters)
for k := 0; k < numTesters; k++ {
s = append(s, newTesterPointer(k, make([]int, i)))
}
}
},
)
}
}
func BenchmarkSliceStructs2(b0 *testing.B) {
var (
maxNumTesters = 10
maxNumValues = 10
)
for h := 0; h < maxNumTesters; h++ {
for i := 0; i < maxNumValues; i++ {
b0.Run(
fmt.Sprintf("%d testers, %d values", h, i),
func(b1 *testing.B) {
for j := 0; j < b1.N; j++ {
s := make([]tester, 0, h)
for k := 0; k < h; k++ {
s = append(s, newTesterStruct(k, make([]int, i)))
}
}
},
)
}
}
}
func BenchmarkSlicePointers2(b0 *testing.B) {
var (
maxNumTesters = 10
maxNumValues = 10
)
for h := 0; h < maxNumTesters; h++ {
for i := 0; i < maxNumValues; i++ {
b0.Run(
fmt.Sprintf("%d testers, %d values", h, i),
func(b1 *testing.B) {
for j := 0; j < b1.N; j++ {
s := make([]tester, 0, h)
for k := 0; k < h; k++ {
s = append(s, newTesterStruct(k, make([]int, i)))
}
}
},
)
}
}
}
|
/*
Copyright IBM Corporation 2020
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sourcetypes
// DockerCompose reads docker compose files
type DockerCompose struct {
Version string `yaml:"version"`
DCServices map[string]DCService `yaml:"services"`
}
// DCService reads service
type DCService struct {
Image string `yaml:"image,omitempty"`
}
|
package queries
import (
"log"
"github.com/jmoiron/sqlx"
"gitlab.com/semestr-6/projekt-grupowy/backend/obsluga-formularzy/configuration"
"gitlab.com/semestr-6/projekt-grupowy/backend/obsluga-formularzy/energy_resources/models"
)
const GET_GUS_ID_SQL = `
SELECT
g."GUSResourceId"
,g."GUSResourceNamePl"
,g."GUSId"
FROM
energy_resources."GUSResources" g;`
func GetGUSResourcesId() (gusResources []models.GUSResourceId, err error) {
db, err := sqlx.Open("postgres", configuration.ConnectionString)
defer db.Close()
if err != nil {
log.Fatal(err)
return
}
err = db.Select(&gusResources, GET_GUS_ID_SQL)
if err != nil {
log.Fatal(err)
return
}
return
}
|
package rest
import (
"code.huawei.com/cse/assets/consumer-gosdk/schemas/rest/service"
"code.huawei.com/cse/model"
"code.huawei.com/cse/route/consumer"
"github.com/go-chassis/go-chassis/core/lager"
"github.com/go-chassis/go-chassis/core/server"
"net"
"net/http"
)
const Name = "http"
func init() {
server.InstallPlugin(Name, NewServer)
}
type ChassisInvoker struct {
}
func (c *ChassisInvoker) Invoke(p *model.InvokeOption) error {
service.Handle(p)
return nil
}
type HttpServer struct {
Opts server.Options
}
func (h *HttpServer) Register(interface{}, ...server.RegisterOption) (string, error) {
return "", nil
}
func (h *HttpServer) Start() error {
_, _, err := net.SplitHostPort(h.Opts.Address)
if err != nil {
return err
}
handler := consumer.NewRouter(&ChassisInvoker{})
go func() {
s := http.Server{
Addr: h.Opts.Address,
Handler: handler,
}
if err := s.ListenAndServe(); err != nil {
server.ErrRuntime <- err
return
}
}()
lager.Logger.Warnf("Http server listening on: %s", h.Opts.Address)
return nil
}
func (h *HttpServer) Stop() error {
return nil
}
func (h *HttpServer) String() string {
return Name
}
func NewServer(opts server.Options) server.ProtocolServer {
return &HttpServer{
Opts: opts,
}
}
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package funcframework is a Functions Framework implementation for Go. It allows you to register
// HTTP and event functions, then start an HTTP server serving those functions.
package funcframework
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"reflect"
"runtime/debug"
"strings"
"github.com/GoogleCloudPlatform/functions-framework-go/internal/registry"
cloudevents "github.com/cloudevents/sdk-go/v2"
)
const (
functionStatusHeader = "X-Google-Status"
crashStatus = "crash"
errorStatus = "error"
panicMessageTmpl = "A panic occurred during %s. Please see logs for more details."
fnErrorMessageStderrTmpl = "Function error: %v"
)
var errorType = reflect.TypeOf((*error)(nil)).Elem()
// recoverPanic recovers from a panic in a consistent manner. panicSrc should
// describe what was happening when the panic was encountered, for example
// "user function execution". w is an http.ResponseWriter to write a generic
// response body to that does not expose the details of the panic; w can be
// nil to skip this.
func recoverPanic(w http.ResponseWriter, panicSrc string) {
if r := recover(); r != nil {
genericMsg := fmt.Sprintf(panicMessageTmpl, panicSrc)
fmt.Fprintf(os.Stderr, "%s\npanic message: %v\nstack trace: \n%s", genericMsg, r, debug.Stack())
if w != nil {
writeHTTPErrorResponse(w, http.StatusInternalServerError, crashStatus, genericMsg)
}
}
}
// RegisterHTTPFunction registers fn as an HTTP function.
// Maintained for backward compatibility. Please use RegisterHTTPFunctionContext instead.
func RegisterHTTPFunction(path string, fn interface{}) {
defer recoverPanic(nil, "function registration")
fnHTTP, ok := fn.(func(http.ResponseWriter, *http.Request))
if !ok {
panic("expected function to have signature func(http.ResponseWriter, *http.Request)")
}
ctx := context.Background()
if err := RegisterHTTPFunctionContext(ctx, path, fnHTTP); err != nil {
panic(fmt.Sprintf("unexpected error in RegisterEventFunctionContext: %v", err))
}
}
// RegisterEventFunction registers fn as an event function.
// Maintained for backward compatibility. Please use RegisterEventFunctionContext instead.
func RegisterEventFunction(path string, fn interface{}) {
ctx := context.Background()
defer recoverPanic(nil, "function registration")
if err := RegisterEventFunctionContext(ctx, path, fn); err != nil {
panic(fmt.Sprintf("unexpected error in RegisterEventFunctionContext: %v", err))
}
}
// RegisterHTTPFunctionContext registers fn as an HTTP function.
func RegisterHTTPFunctionContext(ctx context.Context, path string, fn func(http.ResponseWriter, *http.Request)) error {
return registry.Default().RegisterHTTP(fn, registry.WithPath(path))
}
// RegisterEventFunctionContext registers fn as an event function. The function must have two arguments, a
// context.Context and a struct type depending on the event, and return an error. If fn has the
// wrong signature, RegisterEventFunction returns an error.
func RegisterEventFunctionContext(ctx context.Context, path string, fn interface{}) error {
return registry.Default().RegisterEvent(fn, registry.WithPath(path))
}
// RegisterCloudEventFunctionContext registers fn as an cloudevent function.
func RegisterCloudEventFunctionContext(ctx context.Context, path string, fn func(context.Context, cloudevents.Event) error) error {
return registry.Default().RegisterCloudEvent(fn, registry.WithPath(path))
}
// Start serves an HTTP server with registered function(s).
func Start(port string) error {
server, err := initServer()
if err != nil {
return err
}
return http.ListenAndServe(":"+port, server)
}
func initServer() (*http.ServeMux, error) {
server := http.NewServeMux()
// If FUNCTION_TARGET is set, only serve this target function at path "/".
// If not set, serve all functions at the registered paths.
if target := os.Getenv("FUNCTION_TARGET"); len(target) > 0 {
var targetFn *registry.RegisteredFunction
fn, ok := registry.Default().GetRegisteredFunction(target)
if ok {
targetFn = fn
} else if lastFnWithoutName := registry.Default().GetLastFunctionWithoutName(); lastFnWithoutName != nil {
// If no function was found with the target name, assume the last function that's not registered declaratively
// should be served at '/'.
targetFn = lastFnWithoutName
} else {
return nil, fmt.Errorf("no matching function found with name: %q", target)
}
h, err := wrapFunction(targetFn)
if err != nil {
return nil, fmt.Errorf("failed to serve function %q: %v", target, err)
}
server.Handle("/", h)
return server, nil
}
fns := registry.Default().GetAllFunctions()
for _, fn := range fns {
h, err := wrapFunction(fn)
if err != nil {
return nil, fmt.Errorf("failed to serve function at path %q: %v", fn.Path, err)
}
server.Handle(fn.Path, h)
}
return server, nil
}
func wrapFunction(fn *registry.RegisteredFunction) (http.Handler, error) {
// Check if we have a function resource set, and if so, log progress.
if os.Getenv("FUNCTION_TARGET") == "" {
fmt.Printf("Serving function: %q\n", fn.Name)
}
if fn.HTTPFn != nil {
handler, err := wrapHTTPFunction(fn.HTTPFn)
if err != nil {
return nil, fmt.Errorf("unexpected error in wrapHTTPFunction: %v", err)
}
return handler, nil
} else if fn.CloudEventFn != nil {
handler, err := wrapCloudEventFunction(context.Background(), fn.CloudEventFn)
if err != nil {
return nil, fmt.Errorf("unexpected error in wrapCloudEventFunction: %v", err)
}
return handler, nil
} else if fn.EventFn != nil {
handler, err := wrapEventFunction(fn.EventFn)
if err != nil {
return nil, fmt.Errorf("unexpected error in wrapEventFunction: %v", err)
}
return handler, nil
} else if fn.TypedFn != nil {
handler, err := wrapTypedFunction(fn.TypedFn)
if err != nil {
return nil, fmt.Errorf("unexpected error in wrapTypedFunction: %v", err)
}
return handler, nil
}
return nil, fmt.Errorf("missing function entry in %v", fn)
}
func wrapHTTPFunction(fn func(http.ResponseWriter, *http.Request)) (http.Handler, error) {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if os.Getenv("K_SERVICE") != "" {
// Force flush of logs after every function trigger when running on GCF.
defer fmt.Println()
defer fmt.Fprintln(os.Stderr)
}
defer recoverPanic(w, "user function execution")
fn(w, r)
}), nil
}
func wrapEventFunction(fn interface{}) (http.Handler, error) {
err := validateEventFunction(fn)
if err != nil {
return nil, err
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if os.Getenv("K_SERVICE") != "" {
// Force flush of logs after every function trigger when running on GCF.
defer fmt.Println()
defer fmt.Fprintln(os.Stderr)
}
if shouldConvertCloudEventToBackgroundRequest(r) {
if err := convertCloudEventToBackgroundRequest(r); err != nil {
writeHTTPErrorResponse(w, http.StatusBadRequest, crashStatus, fmt.Sprintf("error converting CloudEvent to Background Event: %v", err))
}
}
handleEventFunction(w, r, fn)
}), nil
}
func wrapTypedFunction(fn interface{}) (http.Handler, error) {
inputType, err := validateTypedFunction(fn)
if err != nil {
return nil, err
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
body, err := readHTTPRequestBody(r)
if err != nil {
writeHTTPErrorResponse(w, http.StatusBadRequest, crashStatus, fmt.Sprintf("%v", err))
return
}
argVal := inputType
if err := json.Unmarshal(body, argVal.Interface()); err != nil {
writeHTTPErrorResponse(w, http.StatusBadRequest, crashStatus, fmt.Sprintf("Error while converting input data. %s", err.Error()))
return
}
defer recoverPanic(w, "user function execution")
funcReturn := reflect.ValueOf(fn).Call([]reflect.Value{
argVal.Elem(),
})
handleTypedReturn(w, funcReturn)
}), nil
}
func handleTypedReturn(w http.ResponseWriter, funcReturn []reflect.Value) {
if len(funcReturn) == 0 {
return
}
errorVal := funcReturn[len(funcReturn)-1].Interface() // last return must be of type error
if errorVal != nil && reflect.TypeOf(errorVal).AssignableTo(errorType) {
writeHTTPErrorResponse(w, http.StatusInternalServerError, errorStatus, fmtFunctionError(errorVal))
return
}
firstVal := funcReturn[0].Interface()
if !reflect.TypeOf(firstVal).AssignableTo(errorType) {
returnVal, _ := json.Marshal(firstVal)
fmt.Fprintf(w, string(returnVal))
}
}
func validateTypedFunction(fn interface{}) (*reflect.Value, error) {
ft := reflect.TypeOf(fn)
if ft.NumIn() != 1 {
return nil, fmt.Errorf("expected function to have one parameters, found %d", ft.NumIn())
}
if ft.NumOut() > 2 {
return nil, fmt.Errorf("expected function to have maximum two return values")
}
if ft.NumOut() > 0 && !ft.Out(ft.NumOut()-1).AssignableTo(errorType) {
return nil, fmt.Errorf("expected last return type to be of error")
}
var inputType = reflect.New(ft.In(0))
return &inputType, nil
}
func wrapCloudEventFunction(ctx context.Context, fn func(context.Context, cloudevents.Event) error) (http.Handler, error) {
p, err := cloudevents.NewHTTP()
if err != nil {
return nil, fmt.Errorf("failed to create protocol: %v", err)
}
// Always log errors returned by the function to stderr
logErrFn := func(ctx context.Context, ce cloudevents.Event) error {
err := fn(ctx, ce)
if err != nil {
fmt.Fprintf(os.Stderr, fmtFunctionError(err))
}
return err
}
h, err := cloudevents.NewHTTPReceiveHandler(ctx, p, logErrFn)
if err != nil {
return nil, fmt.Errorf("failed to create handler: %v", err)
}
return convertBackgroundToCloudEvent(h), nil
}
func handleEventFunction(w http.ResponseWriter, r *http.Request, fn interface{}) {
body, err := readHTTPRequestBody(r)
if err != nil {
writeHTTPErrorResponse(w, http.StatusBadRequest, crashStatus, fmt.Sprintf("%v", err))
return
}
// Background events have data and an associated metadata, so parse those and run if present.
if metadata, data, err := getBackgroundEvent(body, r.URL.Path); err != nil {
writeHTTPErrorResponse(w, http.StatusBadRequest, crashStatus, fmt.Sprintf("Error: %s, parsing background event: %s", err.Error(), string(body)))
return
} else if data != nil && metadata != nil {
runBackgroundEvent(w, r, metadata, data, fn)
return
}
// Otherwise, we assume the body is a JSON blob containing the user-specified data structure.
runUserFunction(w, r, body, fn)
}
func readHTTPRequestBody(r *http.Request) ([]byte, error) {
if r.Body == nil {
return nil, fmt.Errorf("request body not found")
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, fmt.Errorf("could not read request body %s: %v", r.Body, err)
}
return body, nil
}
func runUserFunction(w http.ResponseWriter, r *http.Request, data []byte, fn interface{}) {
runUserFunctionWithContext(r.Context(), w, r, data, fn)
}
func runUserFunctionWithContext(ctx context.Context, w http.ResponseWriter, r *http.Request, data []byte, fn interface{}) {
argVal := reflect.New(reflect.TypeOf(fn).In(1))
if err := json.Unmarshal(data, argVal.Interface()); err != nil {
writeHTTPErrorResponse(w, http.StatusBadRequest, crashStatus, fmt.Sprintf("Error: %s, while converting event data: %s", err.Error(), string(data)))
return
}
defer recoverPanic(w, "user function execution")
userFunErr := reflect.ValueOf(fn).Call([]reflect.Value{
reflect.ValueOf(ctx),
argVal.Elem(),
})
if userFunErr[0].Interface() != nil {
writeHTTPErrorResponse(w, http.StatusInternalServerError, errorStatus, fmtFunctionError(userFunErr[0].Interface()))
return
}
}
func fmtFunctionError(err interface{}) string {
formatted := fmt.Sprintf(fnErrorMessageStderrTmpl, err)
if !strings.HasSuffix(formatted, "\n") {
formatted += "\n"
}
return formatted
}
func writeHTTPErrorResponse(w http.ResponseWriter, statusCode int, status, msg string) {
// Ensure logs end with a newline otherwise they are grouped incorrectly in SD.
if !strings.HasSuffix(msg, "\n") {
msg += "\n"
}
fmt.Fprint(os.Stderr, msg)
// Flush stdout and stderr when running on GCF. This must be done before writing
// the HTTP response in order for all logs to appear in GCF.
if os.Getenv("K_SERVICE") != "" {
fmt.Println()
fmt.Fprintln(os.Stderr)
}
w.Header().Set(functionStatusHeader, status)
w.WriteHeader(statusCode)
fmt.Fprint(w, msg)
}
|
// simple RPC demo function which run a calculation app
// exec: go run client.go -op 1 -a 10 -b 20
package main
import (
"flag"
"fmt"
"log"
"net"
"github.com/zaynjarvis/fyp/rpc/protocol"
)
var (
op = flag.Int64("op", 0, "operation for calculation")
a = flag.Int64("a", 0, "first operand")
b = flag.Int64("b", 0, "second operand")
)
func main() {
flag.Parse()
conn, err := net.Dial("tcp", "localhost:9700")
if err != nil {
panic(err)
}
res, err := Call(conn, protocol.CalculatorMethod, &protocol.CalcRequest{Op: protocol.Operator(*op), A: *a, B: *b})
if err != nil {
panic(err)
}
fmt.Printf("%#v\n", res)
if err := conn.Close(); err != nil {
panic(err)
}
}
func Call(conn net.Conn, method protocol.Method, req interface{}) (interface{}, error) {
errCh := make(chan error)
resCh := make(chan interface{})
go func() {
res, err := protocol.ReadResponse(conn)
if err != nil {
errCh <- err
return
}
resCh <- res
}()
data, err := protocol.MethodMap[method].Marshal(req)
if err != nil {
return nil, err
}
written, err := conn.Write(data)
if err != nil {
return nil, err
}
log.Printf("%v bytes sent", written)
select {
case err := <-errCh:
return nil, err
case res := <-resCh:
return res, nil
}
}
|
/*
Your task is to write a program, in any language, that adds two floating point numbers together WITHOUT using any fractional or floating point maths. Integer maths is allowed.
Format
The format for the numbers are strings containing 1's and 0's which represent the binary value of a IEEE 754 32-bit float. For example the number 2.54 would be represented by the string "01000000001000101000111101011100".
Goal
You program should input two numbers in the above mentioned format, add them together, and output the result in the same format. The shortest answer in any language wins!
Rules
Absolutly no floating point, decimal, or any kind of non-integer maths functions are allowed.
You can assume that the input is clean (i.e. contains only 1's and 0's).
You can assume that the inputs are numbers, and not Inf, -Inf, or NaN or subnormal. However, if the result is greater than the max value or smaller than the min value, you should return Inf and -Inf respectively. A subnormal (denormal) result may be flushed to 0.
You do not have to handle rounding properly. Don't worry if your results are a few bits out.
Tests
To test your programs, you can convert between decimal and floating point binary numbers using this tool.
1000 + 0.5 = 1000.5
01000100011110100000000000000000 + 00111111000000000000000000000000 = 01000100011110100010000000000000
float.MaxValue + float.MaxValue = Infinity
01111111011111111111111111111111 + 01111111011111111111111111111111 = 01111111100000000000000000000000
321.123 + -123.321 = 197.802
01000011101000001000111110111110 + 11000010111101101010010001011010= 01000011010001011100110101010000
Good luck!
*/
package main
import (
"math"
)
func main() {
test(0b01000100011110100000000000000000, 0b00111111000000000000000000000000, 0b01000100011110100010000000000000)
test(0b01111111011111111111111111111111, 0b01111111011111111111111111111111, 0b01111111100000000000000000000000)
test(0b01000011101000001000111110111110, 0b11000010111101101010010001011010, 0b01000011010001011100110101010000)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func test(x, y, r uint32) {
z := add(x, y)
assert(z == r || z+1 == r)
}
func add(x, y uint32) uint32 {
return math.Float32bits(math.Float32frombits(x) + math.Float32frombits(y))
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oam
import (
"context"
"strings"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"github.com/kubevela/workflow/pkg/cue/model/value"
"github.com/kubevela/workflow/pkg/mock"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
)
func TestParser(t *testing.T) {
r := require.New(t)
p := &provider{
apply: simpleComponentApplyForTest,
}
act := &mock.Action{}
v, err := value.NewValue("", nil, "")
r.NoError(err)
err = p.ApplyComponent(nil, nil, v, act)
r.Equal(err.Error(), "failed to lookup value: var(path=value) not exist")
v.FillObject(map[string]interface{}{}, "value")
err = p.ApplyComponent(nil, nil, v, act)
r.NoError(err)
output, err := v.LookupValue("output")
r.NoError(err)
outStr, err := output.String()
r.NoError(err)
r.Equal(outStr, `apiVersion: "v1"
kind: "Pod"
metadata: {
labels: {
app: "web"
}
name: "rss-site"
}
`)
outputs, err := v.LookupValue("outputs")
r.NoError(err)
outsStr, err := outputs.String()
r.NoError(err)
r.Equal(outsStr, `service: {
apiVersion: "v1"
kind: "Service"
metadata: {
labels: {
"trait.oam.dev/resource": "service"
}
name: "service"
}
}
`)
r.Equal(act.Phase, "Wait")
testHealthy = true
act = &mock.Action{}
_, err = value.NewValue("", nil, "")
r.NoError(err)
r.Equal(act.Phase, "")
}
func TestLoadComponent(t *testing.T) {
r := require.New(t)
p := &provider{
app: &v1beta1.Application{
Spec: v1beta1.ApplicationSpec{
Components: []common.ApplicationComponent{
{
Name: "c1",
Type: "web",
Properties: &runtime.RawExtension{Raw: []byte(`{"image": "busybox"}`)},
},
},
},
},
}
v, err := value.NewValue(``, nil, "")
r.NoError(err)
err = p.LoadComponent(nil, nil, v, nil)
r.NoError(err)
s, err := v.String()
r.NoError(err)
r.Equal(s, `value: {
c1: {
name: *"c1" | _
type: *"web" | _
properties: {
image: *"busybox" | _
}
}
}
`)
overrideApp := `app: {
apiVersion: "core.oam.dev/v1beta1"
kind: "Application"
metadata: {
name: "test"
namespace: "default"
}
spec: {
components: [{
name: "c2"
type: "web"
properties: {
image: "busybox"
}
}]
}
}
`
overrideValue, err := value.NewValue(overrideApp, nil, "")
r.NoError(err)
err = p.LoadComponent(nil, nil, overrideValue, nil)
r.NoError(err)
_, err = overrideValue.LookupValue("value", "c2")
r.NoError(err)
}
func TestLoadComponentInOrder(t *testing.T) {
r := require.New(t)
p := &provider{
app: &v1beta1.Application{
Spec: v1beta1.ApplicationSpec{
Components: []common.ApplicationComponent{
{
Name: "c1",
Type: "web",
Properties: &runtime.RawExtension{Raw: []byte(`{"image": "busybox"}`)},
},
{
Name: "c2",
Type: "web2",
Properties: &runtime.RawExtension{Raw: []byte(`{"image": "busybox"}`)},
},
},
},
},
}
v, err := value.NewValue(``, nil, "")
r.NoError(err)
err = p.LoadComponentInOrder(nil, nil, v, nil)
r.NoError(err)
s, err := v.String()
r.NoError(err)
r.Equal(s, `value: [{
name: "c1"
type: "web"
properties: {
image: "busybox"
}
}, {
name: "c2"
type: "web2"
properties: {
image: "busybox"
}
}]
`)
}
var testHealthy bool
func simpleComponentApplyForTest(_ context.Context, comp common.ApplicationComponent, _ *value.Value, _, _ string) (*unstructured.Unstructured, []*unstructured.Unstructured, bool, error) {
workload := new(unstructured.Unstructured)
workload.UnmarshalJSON([]byte(`{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "rss-site",
"labels": {
"app": "web"
}
}
}`))
if comp.Name != "" {
workload.SetName(comp.Name)
if strings.Contains(comp.Name, "error") {
return nil, nil, false, errors.Errorf("bad component")
}
}
trait := new(unstructured.Unstructured)
trait.UnmarshalJSON([]byte(`{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "service",
"labels": {
"trait.oam.dev/resource": "service"
}
}
}`))
if comp.Name != "" {
trait.SetName(comp.Name)
}
traits := []*unstructured.Unstructured{trait}
return workload, traits, testHealthy, nil
}
|
// Copyright © 2018 NAME HERE <EMAIL ADDRESS>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
entity "github.com/7cthunder/agenda/entity"
"github.com/spf13/cobra"
)
// addMeetingUserCmd represents the addMeetingUser command
var addMeetingUserCmd = &cobra.Command{
Use: "addmu -t=[title] [participators]",
Short: "Add meeting members to the meeting which current user created",
Long: `Add meeting members into the meeting:
1. Make sure you have sponsored the meeting with the title
2. Make sure the participators have not repeat and have not been in the meeting
3. Make sure there aren't conflicts between participators' time and meeting's time `,
Run: func(cmd *cobra.Command, args []string) {
logger := entity.NewLogger("[addmu]")
instance := entity.GetStorage()
curU := instance.GetCurUser()
title, _ := cmd.Flags().GetString("title")
participators := cmd.Flags().Args()
logger.Println("You are calling addmu -t="+title+" ", participators)
if curU.GetName() == "" {
logger.Println("ERROR: You have not logged in yet, please log in first!")
return
}
if title == "" {
logger.Println("ERROR: Please input the title of the meeting you want to delete")
return
}
if len(participators) == 0 {
logger.Println("ERROR: You must add someone")
return
}
for i := 0; i < len(participators); i++ {
filter := func(u *entity.User) bool {
return u.GetName() == participators[i]
}
if len(instance.QueryUser(filter)) == 0 {
logger.Println("ERROR: " + participators[i] + " isn't existed")
return
}
}
filter1 := func(m *entity.Meeting) bool {
return curU.GetName() == m.GetSponsor() && title == m.GetTitle()
}
meeting := instance.QueryMeeting(filter1)
if len(meeting) == 0 {
logger.Println("ERROR: You don't sponsor this meeting")
return
}
for i := 0; i < len(participators); i++ {
for j := i + 1; j < len(participators); j++ {
if participators[i] == participators[j] {
logger.Println("ERROR: The participators you add can't repeat")
return
}
}
}
for _, p := range participators {
if meeting[0].IsParticipator(p) {
logger.Println("ERROR: " + p + " is in the meeting")
return
}
}
for _, p := range participators {
if curU.GetName() == p {
logger.Println("ERROR: You add yourself wrongly")
return
}
}
startTime := meeting[0].GetStartTime()
endTime := meeting[0].GetEndTime()
for _, p := range participators {
filter2 := func(m *entity.Meeting) bool {
mST := m.GetStartTime()
mET := m.GetEndTime()
if (m.IsParticipator(p) || m.GetSponsor() == p) &&
((startTime.IsGreaterThanEqual(mST) && startTime.IsLess(mET)) ||
(endTime.IsGreater(mST) && endTime.IsLessThanEqual(mET)) ||
(startTime.IsLessThanEqual(mST) && endTime.IsGreaterThanEqual(mET))) {
return true
}
return false
}
if len(instance.QueryMeeting(filter2)) > 0 {
logger.Println("ERROR: There are conflicts between " + p + "'s time and meeting's time ")
return
}
}
for _, p := range participators {
mSwitch := func(m *entity.Meeting) {
m.AddParticipator(p)
}
instance.UpdateMeeting(filter1, mSwitch)
}
logger.Println("addmu successfully!")
},
}
func init() {
rootCmd.AddCommand(addMeetingUserCmd)
addMeetingUserCmd.Flags().StringP("title", "t", "", "meeting title")
}
|
package main
import (
"context"
"flag"
"fmt"
"kafkaAPI/kafkaUtils"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/rs/zerolog/log"
"github.com/segmentio/kafka-go"
)
var (
// kafka
kafkaBrokerURL string
kafkaVerbose bool
kafkaTopicIn string
kafkaTopicOut string
kafkaConsumerGroup string
kafkaClientID string
)
var (
// kafka
)
func main() {
flag.StringVar(&kafkaBrokerURL, "kafka-brokers", "localhost:19092,localhost:29092,localhost:39092", "Kafka brokers in comma separated value")
flag.BoolVar(&kafkaVerbose, "kafka-verbose", true, "Kafka verbose logging")
flag.StringVar(&kafkaTopicIn, "kafka-topicIn", "foo", "Kafka topic. Only one topic per worker.")
flag.StringVar(&kafkaTopicOut, "kafka-topicOut", "foo2", "Kafka topic. Only one topic per worker.")
flag.StringVar(&kafkaConsumerGroup, "kafka-consumer-group", "consumer-group", "Kafka consumer group")
flag.StringVar(&kafkaClientID, "kafka-client-id", "my-client-id", "Kafka client id")
flag.Parse()
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
brokers := strings.Split(kafkaBrokerURL, ",")
// make a new reader that consumes from topic-A
config := kafka.ReaderConfig{
Brokers: brokers,
GroupID: kafkaClientID,
Topic: kafkaTopicIn,
MinBytes: 10e3, // 10KB
MaxBytes: 10e6, // 10MB
MaxWait: 1 * time.Second, // Maximum amount of time to wait for new data to come when fetching batches of messages from kafka.
ReadLagInterval: -1,
}
reader := kafka.NewReader(config)
// connect to kafka
kafkaProducer, err := kafkaUtils.Configure(strings.Split(kafkaBrokerURL, ","), kafkaClientID, kafkaTopicOut)
if err != nil {
log.Error().Str("error", err.Error()).Msg("unable to configure kafkaProducer")
return
}
defer kafkaProducer.Close()
defer reader.Close()
for {
log.Debug().Msg("Inside for loop1")
m, err := reader.ReadMessage(context.Background())
log.Debug().Msg("Inside for loop2")
if err != nil {
log.Error().Msgf("error while receiving message: %s", err.Error())
continue
}
log.Debug().Msg("Inside for loop2")
value := m.Value
log.Debug().Msg("Got a Message")
// if m.CompressionCodec == snappy.NewCompressionCodec() {
// _, err = snappy.NewCompressionCodec().Decode(value, m.Value)
// }
var ctx = context.Background()
err = kafkaUtils.Push(ctx, nil, m.Value)
if err != nil {
log.Error().Msg("Kafka write to topic Out failed")
}
log.Debug().Msg("Inside for loop3")
if err != nil {
log.Error().Msgf("error while receiving message: %s", err.Error())
continue
}
log.Debug().Msg("Printing Msg4")
fmt.Printf("message at topic/partition/offset %v/%v/%v: %s\n", m.Topic, m.Partition, m.Offset, string(value))
}
log.Debug().Msgf("Closing Down")
}
|
/*
chan : bidirectional
chan<- send unidirectional only write
<-chan recieve unidirectional only read
*/
package main
import "fmt"
func main() {
ch := make(chan int, 3)
processWrite(ch)
processRead(ch)
close(ch)
}
func processWrite(ch chan<- int) {
ch <- 2
}
func processRead(ch <-chan int) {
fmt.Println(<-ch)
}
|
package action
import (
"errors"
"github.com/agiledragon/trans-dsl"
"github.com/agiledragon/trans-dsl/test/context"
)
type StubConnectServer struct {
}
func (this *StubConnectServer) Exec(transInfo *transdsl.TransInfo) error {
stubInfo := transInfo.AppInfo.(*context.StubInfo)
if stubInfo.Y == -1 {
return errors.New("panic")
}
if stubInfo.FailTimes > 0 {
stubInfo.FailTimes--
return errors.New("failed")
}
return nil
}
func (this *StubConnectServer) Rollback(transInfo *transdsl.TransInfo) {
}
|
package moxxiConf
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestHandlerLocFlag(t *testing.T) {
var testData = []string{
"/one",
"/two",
"three",
"/four",
}
var expected = []string{
"/one",
"/two",
"/four",
}
testWork := new(HandlerLocFlag)
for _, each := range testData {
err := testWork.Set(each)
assert.NoError(t, err, "there should not have been a problem adding an item")
}
assert.Equal(t, "/one /two /four", testWork.String(), "the test input and current value of the test should be equal")
for i := 0; i < len(expected); i++ {
assert.Equal(t, expected[i], testWork.GetOne(i), "one item from the test was incorrect")
}
junkTest := new(HandlerLocFlag)
assert.Equal(t, "", junkTest.String(), "should be empty")
junkTest.Set("/some/real/junk")
assert.Equal(t, "/some/real/junk", junkTest.String(), "should be empty")
}
func TestIsNotAlphaNum(t *testing.T) {
testData := []struct {
in string
out bool
}{
{
in: "abcde",
out: false,
}, {
in: "12345",
out: false,
}, {
in: "abcd123",
out: false,
}, {
in: "a$c&1^3",
out: true,
},
}
for id, each := range testData {
assert.Equal(t, each.out, isNotAlphaNum.MatchString(each.in),
"test #%d - results did not match expected - input %s", id, each.in)
}
}
|
package demo
// // #include <stdio.h>
// // #include <stdlib.h>
// /*
// void print(char *s){
// printf("print used by C: %s\n", s);
// };
// void SayHello(const char* s);
// */
// import "C"
// import "unsafe"
// // 代码通过import "C"语句启用CGO特性,紧邻这行语句前面注释是一种特殊语法,里面包含的是正常的C语言代码。
// func main() {
// s := "Hello"
// cs := C.CString(s)
// defer C.free(unsafe.Pointer(cs))
// C.print(cs)
// C.SayHello(C.CString("Hello World\n"))
// }
|
package lib
type Error struct {
cause error
t int
msgOverride string
}
const (
ResourceUnreachable = iota
UnsupportedContentType
TransformationFailure
EncodingFailure
InvalidParams
)
var codeMap = map[int]int{
ResourceUnreachable: 404,
UnsupportedContentType: 400,
TransformationFailure: 500,
EncodingFailure: 500,
InvalidParams: 400,
}
var msgMap = map[int]string{
ResourceUnreachable: "Unable to access specified url",
UnsupportedContentType: "Content type is not supported, supported formats: jpeg, gif, png",
TransformationFailure: "Sorry, but something went wrong, our support engineers are already notified",
EncodingFailure: "Sorry, but something went wrong, our support engineers are already notified",
InvalidParams: "Request params are invalid, please, verify that url is a valid url, width and height are positive integers",
}
func NewError(cause error, t int, msgOverride ...string) Error {
msg := ""
if len(msgOverride) > 0 {
msg = msgOverride[0]
}
return Error{
cause: cause,
t: t,
msgOverride: msg,
}
}
func (e Error) Error() string {
if e.msgOverride != "" {
return e.msgOverride
}
if e.cause != nil {
return e.cause.Error()
}
return ""
}
func (e Error) Code() int {
code, ok := codeMap[e.t]
if ok {
return code
}
return 500
}
const GenericMsg = "Sorry, but something went wrong, our support engineers are already notified"
func (e Error) Msg() string {
if e.msgOverride != "" {
return e.msgOverride
}
msg, ok := msgMap[e.t]
if ok {
return msg
}
return GenericMsg
}
|
package gt2d
import (
"testing"
)
func TestVector2DAdd(t *testing.T){
vector1 := Vector2D{1, 1}
vector2 := Vector2D{5, 6}
vector3 := vector1.Add(&vector2)
if vector3.X != 6 || vector3.Y != 7 {
t.FailNow()
}
vector1.AddIP(&vector2)
if vector1.X != 6 || vector1.Y != 7 {
t.FailNow()
}
}
func TestVector2DSub(t *testing.T){
vector1 := Vector2D{1, 1}
vector2 := Vector2D{5, 6}
vector3 := vector1.Sub(&vector2)
if vector3.X != -4 || vector3.Y != -5 {
t.FailNow()
}
vector1.SubIP(&vector2)
if vector1.X != -4 || vector1.Y != -5 {
t.FailNow()
}
}
func TestVector2DDot(t *testing.T){
vector1 := Vector2D{1, 1}
vector2 := Vector2D{5, 6}
if vector1.Dot(&vector2) != 11 {
t.FailNow()
}
}
|
package Core
type Object struct {
guid GUID
className string
propMgr PropertyManager
recordMgr RecordManager
}
func NewObject(guid GUID){
obj := new(Object)
obj.guid = guid
}
func (p *Object)GetPropInt(prop string)int{
return p.propMgr.GetPropertyInt(prop)
}
func (p *Object) SetPropertyValue(propName string, value interface{}) {
p.propMgr.SetProperty(propName,value)
}
|
package utils
import (
"math/rand"
"time"
)
const charset string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
const idSize int = 32
const firstJan2014 = 1388534400
func GenerateToken() string {
nanotime := time.Now().UTC().UnixNano()
b62 := nanotimeToBaseN(nanotime, charset)
rand.Seed(nanotime)
random := rand.Int63()
base64Rand := baseN(random, charset)
if len(base64Rand) > idSize-len(b62) {
base64Rand = base64Rand[(len(base64Rand) - idSize + len(b62)):]
}
id := b62 + base64Rand
return id
}
func baseN(num int64, charset string) string {
res := ""
length := int64(len(charset))
for {
res = string(charset[num%length]) + res
num = int64(num / length)
if num == 0 {
break
}
}
return res
}
func nanotimeToBaseN(nanotime int64, charset string) string {
// Timestmap of 1st Jan 2014!!
// 1388534400
epochTs := int64(firstJan2014 * 1000 * 1000 * 1000)
nanotime -= epochTs
bN := baseN(nanotime, charset)
return bN
}
|
package collection_test
import (
"bufio"
"os"
"strings"
"testing"
p2 "go.jlucktay.dev/golang-workbench/interfaces/pp2a-asg2"
)
func BenchmarkSearchOAL(b *testing.B) {
b.StopTimer()
runSearchBenchmark(&p2.OrdArrayLinear{}, b)
}
func BenchmarkSearchOAB(b *testing.B) {
b.StopTimer()
runSearchBenchmark(&p2.OrdArrayBinary{}, b)
}
func BenchmarkSearchOLL(b *testing.B) {
b.StopTimer()
runSearchBenchmark(&p2.OrdLinkedList{}, b)
}
func BenchmarkSearchUBST(b *testing.B) {
b.StopTimer()
runSearchBenchmark(&p2.UnbalBinarySearchTree{}, b)
}
func runSearchBenchmark(wc p2.WordCollection, b *testing.B) {
fillCollection(wc, mustOpen(dictionary), b)
b.ResetTimer()
for i := 0; i < b.N; i++ {
searchCollection(wc, mustOpen(book1), b)
searchCollection(wc, mustOpen(book2), b)
searchCollection(wc, mustOpen(book3), b)
}
wc.FreeCollection()
}
// searchCollection takes three arguments:
// 1) an initialised WordCollection containing dictionary words
// 2) a pointer to an open text file descriptor
// 3) a pointer to a testing benchmark struct
//
// searchCollection searches the WordCollection for each of the words in the
// text file, where a 'word' is defined as what is delimited/tokenised on each
// line by the 'delims' constant and split() function.
// searchCollection also logs some timings of its own, in addition to the
// standard benchmark timings.
func searchCollection(wc p2.WordCollection, book *os.File, b *testing.B) {
defer book.Close()
scanner := bufio.NewScanner(book)
scanner.Split(bufio.ScanLines)
b.StartTimer() // code to be timed begins below here
for scanner.Scan() {
words := strings.FieldsFunc(strings.ToLower(scanner.Text()), split)
for _, needle := range words {
wc.SearchCollection(needle)
}
}
if errScan := scanner.Err(); errScan != nil {
b.Fatal(errScan)
}
b.StopTimer() // timing ends here
}
|
// Package parse implements parsing of the BUILD files via an embedded Python interpreter.
//
// The actual work here is done by an embedded PyPy instance. Various rules are built in to
// the binary itself using go-bindata to embed the .py files; these are always available to
// all programs which is rather nice, but it does mean that must be run before 'go run' etc
// will work as expected.
package parse
import (
"fmt"
"path"
"sync"
"core"
)
// Parse parses the package corresponding to a single build label. The label can be :all to add all targets in a package.
// It is not an error if the package has already been parsed.
//
// By default, after the package is parsed, any targets that are now needed for the build and ready
// to be built are queued, and any new packages are queued for parsing. When a specific label is requested
// this is straightforward, but when parsing for pseudo-targets like :all and ..., various flags affect it:
// If 'noDeps' is true, then no new packages will be added and no new targets queued.
// 'include' and 'exclude' refer to the labels of targets to be added. If 'include' is non-empty then only
// targets with at least one matching label are added. Any targets with a label in 'exclude' are not added.
// 'forSubinclude' is set when the parse is required for a subinclude target so should proceed
// even when we're not otherwise building targets.
func Parse(tid int, state *core.BuildState, label, dependor core.BuildLabel, noDeps bool, include, exclude []string, forSubinclude bool) {
defer func() {
if r := recover(); r != nil {
state.LogBuildError(tid, label, core.ParseFailed, fmt.Errorf("%s", r), "Failed to parse package")
}
}()
// First see if this package already exists; once it's in the graph it will have been parsed.
pkg := state.Graph.Package(label.PackageName)
if pkg != nil {
// Does exist, all we need to do is toggle on this target
activateTarget(state, pkg, label, dependor, noDeps, forSubinclude, include, exclude)
return
}
// We use the name here to signal undeferring of a package. If we get that we need to retry the package regardless.
if dependor.Name != "_UNDEFER_" && !firstToParse(label, dependor) {
// Check this again to avoid a potential race
if pkg = state.Graph.Package(label.PackageName); pkg != nil {
activateTarget(state, pkg, label, dependor, noDeps, forSubinclude, include, exclude)
} else if forSubinclude {
// Need to make sure this guy happens, so re-add him to the queue.
// It should be essentially idempotent but we need to make sure that the task with
// forSubinclude = true is processed at some point, not just ones where it's false.
log.Debug("Re-adding pending parse for %s", label)
core.State.AddPendingParse(label, dependor, true)
} else {
log.Debug("Skipping pending parse for %s", label)
}
return
}
// If we get here then it falls to us to parse this package
state.LogBuildResult(tid, label, core.PackageParsing, "Parsing...")
pkg = parsePackage(state, label, dependor)
if pkg == nil {
state.LogBuildResult(tid, label, core.PackageParsed, "Deferred")
return
}
// Now add any lurking pending targets for this package.
pendingTargetMutex.Lock()
pending := pendingTargets[label.PackageName] // Must be present.
pendingTargets[label.PackageName] = map[string][]core.BuildLabel{} // Empty this to free memory, but leave a sentinel
log.Debug("Retrieved %d pending targets for %s", len(pending), label)
pendingTargetMutex.Unlock() // Nothing will look up this package in the map again.
for targetName, dependors := range pending {
for _, dependor := range dependors {
log.Debug("Undeferring pending target %s now we've got %s", dependor, targetName)
lbl := core.BuildLabel{PackageName: label.PackageName, Name: targetName}
activateTarget(state, pkg, lbl, dependor, noDeps, forSubinclude, include, exclude)
}
}
state.LogBuildResult(tid, label, core.PackageParsed, "Parsed")
}
// activateTarget marks a target as active (ie. to be built) and adds its dependencies as pending parses.
func activateTarget(state *core.BuildState, pkg *core.Package, label, dependor core.BuildLabel, noDeps, forSubinclude bool, include, exclude []string) {
if !label.IsAllTargets() && state.Graph.Target(label) == nil {
msg := fmt.Sprintf("Parsed build file %s/BUILD but it doesn't contain target %s", label.PackageName, label.Name)
if dependor != core.OriginalTarget {
msg += fmt.Sprintf(" (depended on by %s)", dependor)
}
panic(msg + suggestTargets(pkg, label, dependor))
}
if noDeps && !dependor.IsAllTargets() { // IsAllTargets indicates requirement for parse
return // Some kinds of query don't need a full recursive parse.
} else if label.IsAllTargets() {
for _, target := range pkg.Targets {
// Don't activate targets that were added in a post-build function; that causes a race condition
// between the post-build functions running and other things trying to activate them too early.
if target.ShouldInclude(include, exclude) && !target.AddedPostBuild {
// Must always do this for coverage because we need to calculate sources of
// non-test targets later on.
if !state.NeedTests || target.IsTest || state.NeedCoverage {
addDep(state, target.Label, dependor, false, dependor.IsAllTargets())
}
}
}
} else {
for _, l := range state.Graph.DependentTargets(dependor, label) {
// We use :all to indicate a dependency needed for parse.
addDep(state, l, dependor, false, forSubinclude || dependor.IsAllTargets())
}
}
}
// Used to arbitrate single access to these maps
var pendingTargetMutex sync.Mutex
// Map of package name -> target name -> label that requested parse
var pendingTargets = map[string]map[string][]core.BuildLabel{}
// Map of package name -> target name -> package names that're waiting for it
var deferredParses = map[string]map[string][]string{}
// firstToParse returns true if the caller is the first to parse a given package and hence should
// continue parsing that file. It only returns true once for each package but stores subsequent
// targets in the pendingTargets map.
func firstToParse(label, dependor core.BuildLabel) bool {
pendingTargetMutex.Lock()
defer pendingTargetMutex.Unlock()
if pkg, present := pendingTargets[label.PackageName]; present {
pkg[label.Name] = append(pkg[label.Name], dependor)
return false
}
pendingTargets[label.PackageName] = map[string][]core.BuildLabel{label.Name: {dependor}}
return true
}
// deferParse defers the parsing of a package until the given label has been built.
// Returns true if it was deferred, or false if it's already built.
func deferParse(label core.BuildLabel, pkg *core.Package) bool {
pendingTargetMutex.Lock()
defer pendingTargetMutex.Unlock()
if target := core.State.Graph.Target(label); target != nil && target.State() >= core.Built {
return false
}
log.Debug("Deferring parse of %s pending %s", pkg.Name, label)
if m, present := deferredParses[label.PackageName]; present {
m[label.Name] = append(m[label.Name], pkg.Name)
} else {
deferredParses[label.PackageName] = map[string][]string{label.Name: {pkg.Name}}
}
log.Debug("Adding pending parse for %s", label)
core.State.AddPendingParse(label, core.BuildLabel{PackageName: pkg.Name, Name: "all"}, true)
return true
}
// UndeferAnyParses un-defers the parsing of a package if it depended on some subinclude target being built.
func UndeferAnyParses(state *core.BuildState, target *core.BuildTarget) {
pendingTargetMutex.Lock()
defer pendingTargetMutex.Unlock()
if m, present := deferredParses[target.Label.PackageName]; present {
if s, present := m[target.Label.Name]; present {
for _, deferredPackageName := range s {
log.Debug("Undeferring parse of %s", deferredPackageName)
state.AddPendingParse(
core.BuildLabel{PackageName: deferredPackageName, Name: getDependingTarget(deferredPackageName)},
core.BuildLabel{PackageName: deferredPackageName, Name: "_UNDEFER_"},
false,
)
}
delete(m, target.Label.Name) // Don't need this any more
}
}
}
// getDependingTarget returns the name of any one target in packageName that required parsing.
func getDependingTarget(packageName string) string {
// We need to supply a label in this package that actually needs to be built.
// Fortunately there must be at least one of these in the pending target map...
if m, present := pendingTargets[packageName]; present {
for target := range m {
return target
}
}
// We shouldn't really get here, of course.
log.Errorf("No pending target entry for %s at deferral. Must assume :all.", packageName)
return "all"
}
// parsePackage performs the initial parse of a package.
// It's assumed that the caller used firstToParse to ascertain that they only call this once per package.
func parsePackage(state *core.BuildState, label, dependor core.BuildLabel) *core.Package {
packageName := label.PackageName
pkg := core.NewPackage(packageName)
if pkg.Filename = buildFileName(state, packageName); pkg.Filename == "" {
exists := core.PathExists(packageName)
// Handle quite a few cases to provide more obvious error messages.
if dependor != core.OriginalTarget && exists {
panic(fmt.Sprintf("%s depends on %s, but there's no BUILD file in %s/", dependor, label, packageName))
} else if dependor != core.OriginalTarget {
panic(fmt.Sprintf("%s depends on %s, but the directory %s doesn't exist", dependor, label, packageName))
} else if exists {
panic(fmt.Sprintf("Can't build %s; there's no BUILD file in %s/", label, packageName))
}
panic(fmt.Sprintf("Can't build %s; the directory %s doesn't exist", label, packageName))
}
if parsePackageFile(state, pkg.Filename, pkg) {
return nil // Indicates deferral
}
for _, target := range pkg.Targets {
state.Graph.AddTarget(target)
if target.IsFilegroup {
// At least register these guys as outputs.
// It's difficult to handle non-file sources because we don't know if they're
// parsed yet - recall filegroups are a special case for this since they don't
// explicitly declare their outputs but can re-output other rules' outputs.
for _, src := range target.AllLocalSources() {
pkg.MustRegisterOutput(src, target)
}
} else {
for _, out := range target.DeclaredOutputs() {
pkg.MustRegisterOutput(out, target)
}
for _, out := range target.TestOutputs {
if !core.IsGlob(out) {
pkg.MustRegisterOutput(out, target)
}
}
}
}
// Do this in a separate loop so we get intra-package dependencies right now.
for _, target := range pkg.Targets {
for _, dep := range target.DeclaredDependencies() {
state.Graph.AddDependency(target.Label, dep)
}
}
state.Graph.AddPackage(pkg) // Calling this means nobody else will add entries to pendingTargets for this package.
return pkg
}
func buildFileName(state *core.BuildState, pkgName string) string {
// Bazel defines targets in its "external" package from its WORKSPACE file.
// We will fake this by treating that as an actual package file...
if state.Config.Bazel.Compatibility && pkgName == "external" {
return "WORKSPACE"
}
for _, buildFileName := range state.Config.Parse.BuildFileName {
if filename := path.Join(pkgName, buildFileName); core.FileExists(filename) {
return filename
}
}
return ""
}
// Adds a single target to the build queue.
func addDep(state *core.BuildState, label, dependor core.BuildLabel, rescan, forceBuild bool) {
// Stop at any package that's not loaded yet
if state.Graph.Package(label.PackageName) == nil {
if forceBuild {
log.Debug("Adding forced pending parse of %s", label)
}
state.AddPendingParse(label, dependor, forceBuild)
return
}
target := state.Graph.Target(label)
if target == nil {
log.Fatalf("Target %s (referenced by %s) doesn't exist\n", label, dependor)
}
if forceBuild {
log.Debug("Forcing build of %s", label)
}
if target.State() >= core.Active && !rescan && !forceBuild {
return // Target is already tagged to be built and likely on the queue.
}
// Only do this bit if we actually need to build the target
if !target.SyncUpdateState(core.Inactive, core.Semiactive) && !rescan && !forceBuild {
return
}
if state.NeedBuild || forceBuild {
if target.SyncUpdateState(core.Semiactive, core.Active) {
state.AddActiveTarget()
if target.IsTest && state.NeedTests {
state.AddActiveTarget() // Tests count twice if we're gonna run them.
}
}
}
// If this target has no deps, add it to the queue now, otherwise handle its deps.
// Only add if we need to build targets (not if we're just parsing) but we might need it to parse...
if target.State() == core.Active && state.Graph.AllDepsBuilt(target) {
if target.SyncUpdateState(core.Active, core.Pending) {
state.AddPendingBuild(label, dependor.IsAllTargets())
}
if !rescan {
return
}
}
for _, dep := range target.DeclaredDependencies() {
// Check the require/provide stuff; we may need to add a different target.
if len(target.Requires) > 0 {
if depTarget := state.Graph.Target(dep); depTarget != nil && len(depTarget.Provides) > 0 {
for _, provided := range depTarget.ProvideFor(target) {
addDep(state, provided, label, false, forceBuild)
}
continue
}
}
if forceBuild {
log.Debug("Forcing build of dep %s -> %s", label, dep)
}
addDep(state, dep, label, false, forceBuild)
}
}
// RunPreBuildFunction runs a pre-build callback function registered on a build target via pre_build = <...>.
//
// This is called before the target is built. It doesn't receive any output like the post-build one does but can
// be useful for other things; for example if you want to investigate a target's transitive labels to adjust
// its build command, you have to do that here (because in general the transitive dependencies aren't known
// when the rule is evaluated).
func RunPreBuildFunction(tid int, state *core.BuildState, target *core.BuildTarget) error {
state.LogBuildResult(tid, target.Label, core.PackageParsing,
fmt.Sprintf("Running pre-build function for %s", target.Label))
pkg := state.Graph.Package(target.Label.PackageName)
changed, err := pkg.EnterBuildCallback(func() error {
return runPreBuildFunction(pkg, target)
})
if err != nil {
state.LogBuildError(tid, target.Label, core.ParseFailed, err, "Failed pre-build function for %s", target.Label)
} else {
rescanDeps(state, changed)
state.LogBuildResult(tid, target.Label, core.TargetBuilding,
fmt.Sprintf("Finished pre-build function for %s", target.Label))
}
return err
}
// RunPostBuildFunction runs a post-build callback function registered on a build target via post_build = <...>.
//
// This is called after the target has been built and it is given the combined stdout/stderr of
// the build process. This output is passed to the post-build Python function which can then
// generate new targets or add dependencies to existing unbuilt targets.
func RunPostBuildFunction(tid int, state *core.BuildState, target *core.BuildTarget, out string) error {
state.LogBuildResult(tid, target.Label, core.PackageParsing,
fmt.Sprintf("Running post-build function for %s", target.Label))
pkg := state.Graph.Package(target.Label.PackageName)
changed, err := pkg.EnterBuildCallback(func() error {
log.Debug("Running post-build function for %s. Build output:\n%s", target.Label, out)
return runPostBuildFunction(pkg, target, out)
})
if err != nil {
state.LogBuildError(tid, target.Label, core.ParseFailed, err, "Failed post-build function for %s", target.Label)
} else {
rescanDeps(state, changed)
state.LogBuildResult(tid, target.Label, core.TargetBuilding,
fmt.Sprintf("Finished post-build function for %s", target.Label))
}
return err
}
func rescanDeps(state *core.BuildState, changed map[*core.BuildTarget]struct{}) {
// Run over all the changed targets in this package and ensure that any newly added dependencies enter the build queue.
for target := range changed {
if !state.Graph.AllDependenciesResolved(target) {
for _, dep := range target.DeclaredDependencies() {
state.Graph.AddDependency(target.Label, dep)
}
}
if s := target.State(); s < core.Built && s > core.Inactive {
addDep(state, target.Label, core.OriginalTarget, true, false)
}
}
}
|
package controllers
import (
"fmt"
"github.com/kataras/iris/context"
"gocherry-api-gateway/admin/models"
"gocherry-api-gateway/components/utils"
)
type UserSaveReq struct {
UserName string `json:"user_name"`
Phone string `json:"phone" validate:"required"`
Pwd string `json:"pwd" validate:"required"`
Level int `json:"level" validate:"required"`
}
func (c *UserController) GetList(ctx context.Context) {
var users []models.AdminAccount
accounts := new(models.AdminAccount)
list := accounts.GetUserList(1, 100, users)
fmt.Println(list)
c.RenderJson(ctx, list)
}
func (c *UserController) Save(ctx context.Context) {
var req models.AdminAccount
c.GetRequest(ctx, &req)
var user models.AdminAccount
user.Phone = req.Phone
user.Pwd = utils.GetMd5(req.Pwd)
user.Level = req.Level
user.UserName = req.UserName
user.State = 1
accounts := new(models.AdminAccount)
ret := accounts.SaveUser(user)
c.RenderJson(ctx, ret)
}
func (c *UserController) Del(ctx context.Context) {
var req models.AdminAccount
c.GetRequest(ctx, &req)
accounts := new(models.AdminAccount)
ret := accounts.DelUser(req)
c.RenderJson(ctx, ret)
}
|
package main
import "fmt"
func main() {
a := 42
fmt.Println(a)
fmt.Println(&a)
var b *int = &a
fmt.Println(b)
}
// 42
// 0xc0000140b0
// 0xc0000140b0
/*
The above code makes "b" pointer to memory address where an int is stored
"b" is of type "int pointer"
"*int"
the * is part of the type
b is of type "*int"
*/
|
package models
import (
"time"
"github.com/jinzhu/gorm"
)
// Coupon Model
type Coupon struct {
gorm.Model
Code int `json:"pid" gorm:"default:0"`
Name string `json:"name" gorm:"not null" binding:"required"`
Desc string `json:"desc" gorm:"type:text"`
ValidFrom time.Time `json:"valid_from" gorm:"type:datetime; not null" binding:"required"`
ValidTo time.Time `json:"valid_to" gorm:"type:datetime; not null" binding:"required"`
CouponValue float64 `json:"coupon_value" gorm:"type:decimal; not null" binding:"required"`
CouponPercentage float32 `json:"coupon_percentage" gorm:"type:decimal"`
Limit int `json:"limit" gorm:"not null" binding:"required"`
LimitTerms int `json:"limit_terms" gorm:"not null" binding:"required"`
Categories []*Category `gorm:"many2many:coupon_categories"`
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parse
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"math"
"github.com/klauspost/compress/zstd"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/parser/charset"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/server/internal/handshake"
util2 "github.com/pingcap/tidb/server/internal/util"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/dbterror"
"github.com/pingcap/tidb/util/hack"
"github.com/pingcap/tidb/util/logutil"
"go.uber.org/zap"
)
var errUnknownFieldType = dbterror.ClassServer.NewStd(errno.ErrUnknownFieldType)
// maxFetchSize constants
const (
maxFetchSize = 1024
)
// ExecArgs parse execute arguments to datum slice.
func ExecArgs(sc *stmtctx.StatementContext, params []expression.Expression, boundParams [][]byte,
nullBitmap, paramTypes, paramValues []byte, enc *util2.InputDecoder) (err error) {
pos := 0
var (
tmp interface{}
v []byte
n int
isNull bool
)
if enc == nil {
enc = util2.NewInputDecoder(charset.CharsetUTF8)
}
args := make([]types.Datum, len(params))
for i := 0; i < len(args); i++ {
// if params had received via ComStmtSendLongData, use them directly.
// ref https://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html
// see clientConn#handleStmtSendLongData
if boundParams[i] != nil {
args[i] = types.NewBytesDatum(enc.DecodeInput(boundParams[i]))
continue
}
// check nullBitMap to determine the NULL arguments.
// ref https://dev.mysql.com/doc/internals/en/com-stmt-execute.html
// notice: some client(e.g. mariadb) will set nullBitMap even if data had be sent via ComStmtSendLongData,
// so this check need place after boundParam's check.
if nullBitmap[i>>3]&(1<<(uint(i)%8)) > 0 {
var nilDatum types.Datum
nilDatum.SetNull()
args[i] = nilDatum
continue
}
if (i<<1)+1 >= len(paramTypes) {
return mysql.ErrMalformPacket
}
tp := paramTypes[i<<1]
isUnsigned := (paramTypes[(i<<1)+1] & 0x80) > 0
switch tp {
case mysql.TypeNull:
var nilDatum types.Datum
nilDatum.SetNull()
args[i] = nilDatum
continue
case mysql.TypeTiny:
if len(paramValues) < (pos + 1) {
err = mysql.ErrMalformPacket
return
}
if isUnsigned {
args[i] = types.NewUintDatum(uint64(paramValues[pos]))
} else {
args[i] = types.NewIntDatum(int64(int8(paramValues[pos])))
}
pos++
continue
case mysql.TypeShort, mysql.TypeYear:
if len(paramValues) < (pos + 2) {
err = mysql.ErrMalformPacket
return
}
valU16 := binary.LittleEndian.Uint16(paramValues[pos : pos+2])
if isUnsigned {
args[i] = types.NewUintDatum(uint64(valU16))
} else {
args[i] = types.NewIntDatum(int64(int16(valU16)))
}
pos += 2
continue
case mysql.TypeInt24, mysql.TypeLong:
if len(paramValues) < (pos + 4) {
err = mysql.ErrMalformPacket
return
}
valU32 := binary.LittleEndian.Uint32(paramValues[pos : pos+4])
if isUnsigned {
args[i] = types.NewUintDatum(uint64(valU32))
} else {
args[i] = types.NewIntDatum(int64(int32(valU32)))
}
pos += 4
continue
case mysql.TypeLonglong:
if len(paramValues) < (pos + 8) {
err = mysql.ErrMalformPacket
return
}
valU64 := binary.LittleEndian.Uint64(paramValues[pos : pos+8])
if isUnsigned {
args[i] = types.NewUintDatum(valU64)
} else {
args[i] = types.NewIntDatum(int64(valU64))
}
pos += 8
continue
case mysql.TypeFloat:
if len(paramValues) < (pos + 4) {
err = mysql.ErrMalformPacket
return
}
args[i] = types.NewFloat32Datum(math.Float32frombits(binary.LittleEndian.Uint32(paramValues[pos : pos+4])))
pos += 4
continue
case mysql.TypeDouble:
if len(paramValues) < (pos + 8) {
err = mysql.ErrMalformPacket
return
}
args[i] = types.NewFloat64Datum(math.Float64frombits(binary.LittleEndian.Uint64(paramValues[pos : pos+8])))
pos += 8
continue
case mysql.TypeDate, mysql.TypeTimestamp, mysql.TypeDatetime:
if len(paramValues) < (pos + 1) {
err = mysql.ErrMalformPacket
return
}
// See https://dev.mysql.com/doc/internals/en/binary-protocol-value.html
// for more details.
length := paramValues[pos]
pos++
switch length {
case 0:
tmp = types.ZeroDatetimeStr
case 4:
pos, tmp = binaryDate(pos, paramValues)
case 7:
pos, tmp = binaryDateTime(pos, paramValues)
case 11:
pos, tmp = binaryTimestamp(pos, paramValues)
case 13:
pos, tmp = binaryTimestampWithTZ(pos, paramValues)
default:
err = mysql.ErrMalformPacket
return
}
args[i] = types.NewDatum(tmp) // FIXME: After check works!!!!!!
continue
case mysql.TypeDuration:
if len(paramValues) < (pos + 1) {
err = mysql.ErrMalformPacket
return
}
// See https://dev.mysql.com/doc/internals/en/binary-protocol-value.html
// for more details.
length := paramValues[pos]
pos++
switch length {
case 0:
tmp = "0"
case 8:
isNegative := paramValues[pos]
if isNegative > 1 {
err = mysql.ErrMalformPacket
return
}
pos++
pos, tmp = binaryDuration(pos, paramValues, isNegative)
case 12:
isNegative := paramValues[pos]
if isNegative > 1 {
err = mysql.ErrMalformPacket
return
}
pos++
pos, tmp = binaryDurationWithMS(pos, paramValues, isNegative)
default:
err = mysql.ErrMalformPacket
return
}
args[i] = types.NewDatum(tmp)
continue
case mysql.TypeNewDecimal:
if len(paramValues) < (pos + 1) {
err = mysql.ErrMalformPacket
return
}
v, isNull, n, err = util2.ParseLengthEncodedBytes(paramValues[pos:])
pos += n
if err != nil {
return
}
if isNull {
args[i] = types.NewDecimalDatum(nil)
} else {
var dec types.MyDecimal
err = sc.HandleTruncate(dec.FromString(v))
if err != nil {
return err
}
args[i] = types.NewDecimalDatum(&dec)
}
continue
case mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob:
if len(paramValues) < (pos + 1) {
err = mysql.ErrMalformPacket
return
}
v, isNull, n, err = util2.ParseLengthEncodedBytes(paramValues[pos:])
pos += n
if err != nil {
return
}
if isNull {
args[i] = types.NewBytesDatum(nil)
} else {
args[i] = types.NewBytesDatum(v)
}
continue
case mysql.TypeUnspecified, mysql.TypeVarchar, mysql.TypeVarString, mysql.TypeString,
mysql.TypeEnum, mysql.TypeSet, mysql.TypeGeometry, mysql.TypeBit:
if len(paramValues) < (pos + 1) {
err = mysql.ErrMalformPacket
return
}
v, isNull, n, err = util2.ParseLengthEncodedBytes(paramValues[pos:])
pos += n
if err != nil {
return
}
if !isNull {
v = enc.DecodeInput(v)
tmp = string(hack.String(v))
} else {
tmp = nil
}
args[i] = types.NewDatum(tmp)
continue
default:
err = errUnknownFieldType.GenWithStack("stmt unknown field type %d", tp)
return
}
}
for i := range params {
ft := new(types.FieldType)
types.InferParamTypeFromUnderlyingValue(args[i].GetValue(), ft)
params[i] = &expression.Constant{Value: args[i], RetType: ft}
}
return
}
func binaryDate(pos int, paramValues []byte) (int, string) {
year := binary.LittleEndian.Uint16(paramValues[pos : pos+2])
pos += 2
month := paramValues[pos]
pos++
day := paramValues[pos]
pos++
return pos, fmt.Sprintf("%04d-%02d-%02d", year, month, day)
}
func binaryDateTime(pos int, paramValues []byte) (int, string) {
pos, date := binaryDate(pos, paramValues)
hour := paramValues[pos]
pos++
minute := paramValues[pos]
pos++
second := paramValues[pos]
pos++
return pos, fmt.Sprintf("%s %02d:%02d:%02d", date, hour, minute, second)
}
func binaryTimestamp(pos int, paramValues []byte) (int, string) {
pos, dateTime := binaryDateTime(pos, paramValues)
microSecond := binary.LittleEndian.Uint32(paramValues[pos : pos+4])
pos += 4
return pos, fmt.Sprintf("%s.%06d", dateTime, microSecond)
}
func binaryTimestampWithTZ(pos int, paramValues []byte) (int, string) {
pos, timestamp := binaryTimestamp(pos, paramValues)
tzShiftInMin := int16(binary.LittleEndian.Uint16(paramValues[pos : pos+2]))
tzShiftHour := tzShiftInMin / 60
tzShiftAbsMin := tzShiftInMin % 60
if tzShiftAbsMin < 0 {
tzShiftAbsMin = -tzShiftAbsMin
}
pos += 2
return pos, fmt.Sprintf("%s%+02d:%02d", timestamp, tzShiftHour, tzShiftAbsMin)
}
func binaryDuration(pos int, paramValues []byte, isNegative uint8) (int, string) {
sign := ""
if isNegative == 1 {
sign = "-"
}
days := binary.LittleEndian.Uint32(paramValues[pos : pos+4])
pos += 4
hours := paramValues[pos]
pos++
minutes := paramValues[pos]
pos++
seconds := paramValues[pos]
pos++
return pos, fmt.Sprintf("%s%d %02d:%02d:%02d", sign, days, hours, minutes, seconds)
}
func binaryDurationWithMS(pos int, paramValues []byte,
isNegative uint8) (int, string) {
pos, dur := binaryDuration(pos, paramValues, isNegative)
microSecond := binary.LittleEndian.Uint32(paramValues[pos : pos+4])
pos += 4
return pos, fmt.Sprintf("%s.%06d", dur, microSecond)
}
// StmtFetchCmd parse COM_STMT_FETCH command
func StmtFetchCmd(data []byte) (stmtID uint32, fetchSize uint32, err error) {
if len(data) != 8 {
return 0, 0, mysql.ErrMalformPacket
}
// Please refer to https://dev.mysql.com/doc/internals/en/com-stmt-fetch.html
stmtID = binary.LittleEndian.Uint32(data[0:4])
fetchSize = binary.LittleEndian.Uint32(data[4:8])
if fetchSize > maxFetchSize {
fetchSize = maxFetchSize
}
return
}
// HandshakeResponseHeader parses the common header of SSLRequest and Response41.
func HandshakeResponseHeader(ctx context.Context, packet *handshake.Response41, data []byte) (parsedBytes int, err error) {
// Ensure there are enough data to read:
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
if len(data) < 4+4+1+23 {
logutil.Logger(ctx).Error("got malformed handshake response", zap.ByteString("packetData", data))
return 0, mysql.ErrMalformPacket
}
offset := 0
// capability
capability := binary.LittleEndian.Uint32(data[:4])
packet.Capability = capability
offset += 4
// skip max packet size
offset += 4
// charset, skip, if you want to use another charset, use set names
packet.Collation = data[offset]
offset++
// skip reserved 23[00]
offset += 23
return offset, nil
}
// HandshakeResponseBody parse the HandshakeResponse (except the common header part).
func HandshakeResponseBody(ctx context.Context, packet *handshake.Response41, data []byte, offset int) (err error) {
defer func() {
// Check malformat packet cause out of range is disgusting, but don't panic!
if r := recover(); r != nil {
logutil.Logger(ctx).Error("handshake panic", zap.ByteString("packetData", data))
err = mysql.ErrMalformPacket
}
}()
// user name
packet.User = string(data[offset : offset+bytes.IndexByte(data[offset:], 0)])
offset += len(packet.User) + 1
if packet.Capability&mysql.ClientPluginAuthLenencClientData > 0 {
// MySQL client sets the wrong capability, it will set this bit even server doesn't
// support ClientPluginAuthLenencClientData.
// https://github.com/mysql/mysql-server/blob/5.7/sql-common/client.c#L3478
if data[offset] == 0x1 { // No auth data
offset += 2
} else {
num, null, off := util2.ParseLengthEncodedInt(data[offset:])
offset += off
if !null {
packet.Auth = data[offset : offset+int(num)]
offset += int(num)
}
}
} else if packet.Capability&mysql.ClientSecureConnection > 0 {
// auth length and auth
authLen := int(data[offset])
offset++
packet.Auth = data[offset : offset+authLen]
offset += authLen
} else {
packet.Auth = data[offset : offset+bytes.IndexByte(data[offset:], 0)]
offset += len(packet.Auth) + 1
}
if packet.Capability&mysql.ClientConnectWithDB > 0 {
if len(data[offset:]) > 0 {
idx := bytes.IndexByte(data[offset:], 0)
packet.DBName = string(data[offset : offset+idx])
offset += idx + 1
}
}
if packet.Capability&mysql.ClientPluginAuth > 0 {
idx := bytes.IndexByte(data[offset:], 0)
s := offset
f := offset + idx
if s < f { // handle unexpected bad packets
packet.AuthPlugin = string(data[s:f])
}
offset += idx + 1
}
if packet.Capability&mysql.ClientConnectAtts > 0 {
if len(data[offset:]) == 0 {
// Defend some ill-formated packet, connection attribute is not important and can be ignored.
return nil
}
if num, null, intOff := util2.ParseLengthEncodedInt(data[offset:]); !null {
offset += intOff // Length of variable length encoded integer itself in bytes
row := data[offset : offset+int(num)]
attrs, err := parseAttrs(row)
if err != nil {
logutil.Logger(ctx).Warn("parse attrs failed", zap.Error(err))
return nil
}
packet.Attrs = attrs
offset += int(num) // Length of attributes
}
}
if packet.Capability&mysql.ClientZstdCompressionAlgorithm > 0 {
packet.ZstdLevel = zstd.EncoderLevelFromZstd(int(data[offset]))
}
return nil
}
func parseAttrs(data []byte) (map[string]string, error) {
attrs := make(map[string]string)
pos := 0
for pos < len(data) {
key, _, off, err := util2.ParseLengthEncodedBytes(data[pos:])
if err != nil {
return attrs, err
}
pos += off
value, _, off, err := util2.ParseLengthEncodedBytes(data[pos:])
if err != nil {
return attrs, err
}
pos += off
attrs[string(key)] = string(value)
}
return attrs, nil
}
|
package leetcode
/*Given a string s, find the longest palindromic substring in s. You may assume that the maximum length of s is 1000.*/
func longestPalindrome(s string) string {
res := 0
index := 0
for i := 0; i < len(s); i++ {
length := maxLength(s, i, i)
if length > res {
res = length
index = i - length/2
}
}
for i := 0; i < len(s)-1; i++ {
length := maxLength(s, i, i+1)
if length > res {
res = length
index = i - length/2 + 1
}
}
return s[index : index+res]
}
func maxLength(s string, left, right int) int {
for left >= 0 && right < len(s) && s[left] == s[right] {
left--
right++
}
return right - left - 1
}
|
// Copyright © 2019 Banzai Cloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controllers
import (
"context"
"fmt"
"reflect"
"strings"
"time"
"emperror.dev/errors"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"github.com/banzaicloud/kafka-operator/api/v1beta1"
"github.com/banzaicloud/kafka-operator/pkg/errorfactory"
"github.com/banzaicloud/kafka-operator/pkg/k8sutil"
"github.com/banzaicloud/kafka-operator/pkg/scale"
ccutils "github.com/banzaicloud/kafka-operator/pkg/util/cruisecontrol"
apiErrors "k8s.io/apimachinery/pkg/api/errors"
kafkav1beta1 "github.com/banzaicloud/kafka-operator/api/v1beta1"
)
// CruiseControlTaskReconciler reconciles a kafka cluster object
type CruiseControlTaskReconciler struct {
client.Client
Scheme *runtime.Scheme
Log logr.Logger
}
// +kubebuilder:rbac:groups=kafka.banzaicloud.io,resources=kafkaclusters/status,verbs=get;update;patch
func (r *CruiseControlTaskReconciler) Reconcile(request ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
log := r.Log.WithValues("clusterName", request.Name, "clusterNamespace", request.Namespace)
// Fetch the KafkaCluster instance
instance := &v1beta1.KafkaCluster{}
err := r.Get(ctx, request.NamespacedName, instance)
if err != nil {
if apiErrors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
return reconciled()
}
// Error reading the object - requeue the request.
return requeueWithError(r.Log, err.Error(), err)
}
log.V(1).Info("Reconciling")
brokersWithRunningCCTask := make(map[string]v1beta1.BrokerState)
brokerVolumesWithRunningCCTask := make(map[string]map[string]v1beta1.VolumeState)
for brokerId, brokerStatus := range instance.Status.BrokersState {
if brokerStatus.GracefulActionState.CruiseControlState.IsRunningState() {
brokersWithRunningCCTask[brokerId] = brokerStatus
}
volumesState := make(map[string]v1beta1.VolumeState)
for mountPath, volumeState := range brokerStatus.GracefulActionState.VolumeStates {
if volumeState.CruiseControlVolumeState == v1beta1.GracefulDiskRebalanceRunning {
volumesState[mountPath] = volumeState
}
}
if len(volumesState) > 0 {
brokerVolumesWithRunningCCTask[brokerId] = volumesState
}
}
if len(brokersWithRunningCCTask) > 0 {
err = r.checkCCTaskState(instance, brokersWithRunningCCTask, log)
}
if err == nil && len(brokerVolumesWithRunningCCTask) > 0 {
err = r.checkVolumeCCTaskState(instance, brokerVolumesWithRunningCCTask, log)
}
if err != nil {
switch errors.Cause(err).(type) {
case errorfactory.CruiseControlNotReady, errorfactory.ResourceNotReady:
return ctrl.Result{
RequeueAfter: time.Duration(15) * time.Second,
}, nil
case errorfactory.CruiseControlTaskRunning:
return ctrl.Result{
RequeueAfter: time.Duration(20) * time.Second,
}, nil
case errorfactory.CruiseControlTaskTimeout, errorfactory.CruiseControlTaskFailure:
return ctrl.Result{
RequeueAfter: time.Duration(20) * time.Second,
}, nil
default:
return requeueWithError(log, err.Error(), err)
}
}
var brokersWithDownscaleRequired []string
var brokersWithUpscaleRequired []string
brokersWithDiskRebalanceRequired := make(map[string][]string)
for brokerId, brokerStatus := range instance.Status.BrokersState {
if brokerStatus.GracefulActionState.CruiseControlState == v1beta1.GracefulUpscaleRequired {
brokersWithUpscaleRequired = append(brokersWithUpscaleRequired, brokerId)
} else if brokerStatus.GracefulActionState.CruiseControlState == v1beta1.GracefulDownscaleRequired {
brokersWithDownscaleRequired = append(brokersWithDownscaleRequired, brokerId)
}
for mountPath, volumeState := range brokerStatus.GracefulActionState.VolumeStates {
if volumeState.CruiseControlVolumeState == v1beta1.GracefulDiskRebalanceRequired {
brokersWithDiskRebalanceRequired[brokerId] = append(brokersWithDiskRebalanceRequired[brokerId], mountPath)
}
}
}
var taskId, startTime string
if len(brokersWithUpscaleRequired) > 0 {
err = r.handlePodAddCCTask(instance, brokersWithUpscaleRequired, log)
} else if len(brokersWithDownscaleRequired) > 0 {
err = r.handlePodDeleteCCTask(instance, brokersWithDownscaleRequired, log)
} else if len(brokersWithDiskRebalanceRequired) > 0 {
// create new cc task, set status to running
cc := scale.NewCruiseControlScaler(instance.Namespace, instance.Spec.GetKubernetesClusterDomain(), instance.Spec.CruiseControlConfig.CruiseControlEndpoint, instance.Name)
taskId, startTime, err = cc.RebalanceDisks(brokersWithDiskRebalanceRequired)
if err != nil {
log.Error(err, "executing disk rebalance cc task failed")
} else {
var brokerIds []string
brokersVolumeStates := make(map[string]map[string]v1beta1.VolumeState, len(brokersWithDiskRebalanceRequired))
for brokerId, mountPaths := range brokersWithDiskRebalanceRequired {
brokerVolumeState := make(map[string]v1beta1.VolumeState, len(mountPaths))
for _, mountPath := range mountPaths {
brokerVolumeState[mountPath] = kafkav1beta1.VolumeState{
CruiseControlTaskId: taskId,
TaskStarted: startTime,
CruiseControlVolumeState: v1beta1.GracefulDiskRebalanceRunning,
}
}
if len(brokerVolumeState) > 0 {
brokersVolumeStates[brokerId] = brokerVolumeState
brokerIds = append(brokerIds, brokerId)
}
}
if len(brokersVolumeStates) > 0 {
err = k8sutil.UpdateBrokerStatus(r.Client, brokerIds, instance, brokersVolumeStates, log)
}
}
}
if err != nil {
switch errors.Cause(err).(type) {
case errorfactory.CruiseControlNotReady:
return ctrl.Result{
RequeueAfter: time.Duration(15) * time.Second,
}, nil
case errorfactory.CruiseControlTaskRunning:
return ctrl.Result{
RequeueAfter: time.Duration(20) * time.Second,
}, nil
case errorfactory.CruiseControlTaskTimeout, errorfactory.CruiseControlTaskFailure:
return ctrl.Result{
RequeueAfter: time.Duration(20) * time.Second,
}, nil
default:
return requeueWithError(log, err.Error(), err)
}
}
return reconciled()
}
func (r *CruiseControlTaskReconciler) handlePodAddCCTask(kafkaCluster *v1beta1.KafkaCluster, brokerIds []string, log logr.Logger) error {
cc := scale.NewCruiseControlScaler(kafkaCluster.Namespace, kafkaCluster.Spec.GetKubernetesClusterDomain(), kafkaCluster.Spec.CruiseControlConfig.CruiseControlEndpoint, kafkaCluster.Name)
uTaskId, taskStartTime, scaleErr := cc.UpScaleCluster(brokerIds)
if scaleErr != nil {
log.Info("Cannot upscale broker(s)", "brokerId(s)", brokerIds, "error", scaleErr.Error())
return errorfactory.New(errorfactory.CruiseControlNotReady{}, scaleErr, fmt.Sprintf("broker id(s): %s", brokerIds))
}
statusErr := k8sutil.UpdateBrokerStatus(r.Client, brokerIds, kafkaCluster,
v1beta1.GracefulActionState{CruiseControlTaskId: uTaskId, CruiseControlState: v1beta1.GracefulUpscaleRunning,
TaskStarted: taskStartTime}, log)
if statusErr != nil {
return errors.WrapIfWithDetails(statusErr, "could not update status for broker", "id(s)", brokerIds)
}
return nil
}
func (r *CruiseControlTaskReconciler) handlePodDeleteCCTask(kafkaCluster *v1beta1.KafkaCluster, brokerIds []string, log logr.Logger) error {
cc := scale.NewCruiseControlScaler(kafkaCluster.Namespace, kafkaCluster.Spec.GetKubernetesClusterDomain(), kafkaCluster.Spec.CruiseControlConfig.CruiseControlEndpoint, kafkaCluster.Name)
uTaskId, taskStartTime, err := cc.DownsizeCluster(brokerIds)
if err != nil {
log.Info("cruise control communication error during downscaling broker(s)", "id(s)", brokerIds)
return errorfactory.New(errorfactory.CruiseControlNotReady{}, err, fmt.Sprintf("broker(s) id(s): %s", brokerIds))
}
err = k8sutil.UpdateBrokerStatus(r.Client, brokerIds, kafkaCluster,
v1beta1.GracefulActionState{CruiseControlTaskId: uTaskId, CruiseControlState: v1beta1.GracefulDownscaleRunning,
TaskStarted: taskStartTime}, log)
if err != nil {
return errors.WrapIfWithDetails(err, "could not update status for broker(s)", "id(s)", brokerIds)
}
return nil
}
func (r *CruiseControlTaskReconciler) checkCCTaskState(kafkaCluster *v1beta1.KafkaCluster, brokersState map[string]v1beta1.BrokerState, log logr.Logger) error {
if len(brokersState) == 0 {
return nil
}
// a CC task may run for one or multiple brokers (e.g add_broker for multiple brokers)
// check that all brokers that we check the CC task status for have the same task id
var ccTaskId string
for _, brokerState := range brokersState {
if ccTaskId == "" {
ccTaskId = brokerState.GracefulActionState.CruiseControlTaskId
} else if ccTaskId != brokerState.GracefulActionState.CruiseControlTaskId {
return errors.New("multiple CC task ids found")
}
}
if ccTaskId == "" {
return errors.New("no CC task id provided to be checked")
}
// check cc task status
cc := scale.NewCruiseControlScaler(kafkaCluster.Namespace, kafkaCluster.Spec.GetKubernetesClusterDomain(), kafkaCluster.Spec.CruiseControlConfig.CruiseControlEndpoint, kafkaCluster.Name)
status, err := cc.GetCCTaskState(ccTaskId)
if err != nil {
log.Info("Cruise control communication error checking running task", "taskId", ccTaskId)
return errorfactory.New(errorfactory.CruiseControlNotReady{}, err, "cc communication error")
}
if status == v1beta1.CruiseControlTaskNotFound || status == v1beta1.CruiseControlTaskCompletedWithError {
// CC task failed or not found in CC,
// reschedule it by marking broker CruiseControlState= GracefulUpscaleRequired or GracefulDownscaleRequired
var brokerIds []string
requiredBrokerCCState := make(map[string]v1beta1.GracefulActionState, len(brokersState))
for brokerId, brokerState := range brokersState {
requiredCCState, err := r.getCorrectRequiredCCState(brokerState.GracefulActionState.CruiseControlState)
if err != nil {
return err
}
brokerIds = append(brokerIds, brokerId)
requiredBrokerCCState[brokerId] = v1beta1.GracefulActionState{
CruiseControlState: requiredCCState,
ErrorMessage: "Previous cc task status invalid",
CruiseControlTaskId: ccTaskId,
}
}
err = k8sutil.UpdateBrokerStatus(r.Client, brokerIds, kafkaCluster, requiredBrokerCCState, log)
if err != nil {
return errors.WrapIfWithDetails(err, "could not update status for broker(s)", "id(s)", strings.Join(brokerIds, ","))
}
return errorfactory.New(errorfactory.CruiseControlTaskFailure{}, err, "CC task failed", fmt.Sprintf("cc task id: %s", ccTaskId))
}
if status == v1beta1.CruiseControlTaskCompleted {
// cc task completed successfully
var brokerIds []string
completedBrokerCCState := make(map[string]v1beta1.GracefulActionState, len(brokersState))
for brokerId, brokerState := range brokersState {
brokerIds = append(brokerIds, brokerId)
completedBrokerCCState[brokerId] = v1beta1.GracefulActionState{
CruiseControlState: brokerState.GracefulActionState.CruiseControlState.Complete(),
TaskStarted: brokerState.GracefulActionState.TaskStarted,
CruiseControlTaskId: brokerState.GracefulActionState.CruiseControlTaskId,
}
}
err = k8sutil.UpdateBrokerStatus(r.Client, brokerIds, kafkaCluster, completedBrokerCCState, log)
if err != nil {
return errors.WrapIfWithDetails(err, "could not update status for broker(s)", "id(s)", strings.Join(brokerIds, ","))
}
return nil
}
var brokersWithTimedOutCCTask []string
timedOutBrokerCCState := make(map[string]v1beta1.GracefulActionState)
for brokerId, brokerState := range brokersState {
if brokerState.GracefulActionState.CruiseControlState.IsRunningState() {
parsedTime, err := ccutils.ParseTimeStampToUnixTime(brokerState.GracefulActionState.TaskStarted)
if err != nil {
return errors.WrapIf(err, "could not parse timestamp")
}
if time.Now().Sub(parsedTime).Minutes() > kafkaCluster.Spec.CruiseControlConfig.CruiseControlTaskSpec.GetDurationMinutes() {
brokersWithTimedOutCCTask = append(brokersWithTimedOutCCTask, brokerId)
requiredCCState, err := r.getCorrectRequiredCCState(brokerState.GracefulActionState.CruiseControlState)
if err != nil {
return err
}
timedOutBrokerCCState[brokerId] = v1beta1.GracefulActionState{
CruiseControlState: requiredCCState,
CruiseControlTaskId: brokerState.GracefulActionState.CruiseControlTaskId,
ErrorMessage: "Timed out waiting for the task to complete",
TaskStarted: brokerState.GracefulActionState.TaskStarted,
}
}
}
}
// task timed out
if len(brokersWithTimedOutCCTask) > 0 {
log.Info("Killing Cruise control task", "taskId", ccTaskId)
cc := scale.NewCruiseControlScaler(kafkaCluster.Namespace, kafkaCluster.Spec.GetKubernetesClusterDomain(), kafkaCluster.Spec.CruiseControlConfig.CruiseControlEndpoint, kafkaCluster.Name)
err = cc.KillCCTask()
if err != nil {
return errorfactory.New(errorfactory.CruiseControlNotReady{}, err, "cc communication error")
}
err = k8sutil.UpdateBrokerStatus(r.Client, brokersWithTimedOutCCTask, kafkaCluster, timedOutBrokerCCState, log)
if err != nil {
return errors.WrapIfWithDetails(err, "could not update status for broker(s)", "id(s)", strings.Join(brokersWithTimedOutCCTask, ","))
}
return errorfactory.New(errorfactory.CruiseControlTaskTimeout{}, errors.New("cc task timed out"), fmt.Sprintf("cc task id: %s", ccTaskId))
}
// cc task still in progress
log.Info("Cruise control task is still running", "taskId", ccTaskId)
return errorfactory.New(errorfactory.CruiseControlTaskRunning{}, errors.New("cc task is still running"), fmt.Sprintf("cc task id: %s", ccTaskId))
}
// getCorrectRequiredCCState returns the correct Required CC state based on that we upscale or downscale
func (r *CruiseControlTaskReconciler) getCorrectRequiredCCState(ccState kafkav1beta1.CruiseControlState) (kafkav1beta1.CruiseControlState, error) {
if ccState.IsDownscale() {
return kafkav1beta1.GracefulDownscaleRequired, nil
} else if ccState.IsUpscale() {
return kafkav1beta1.GracefulUpscaleRequired, nil
}
return ccState, errors.NewWithDetails("could not determine if cruise control state is upscale or downscale", "ccState", ccState)
}
//TODO merge with checkCCTaskState into one func (hi-im-aren)
func (r *CruiseControlTaskReconciler) checkVolumeCCTaskState(kafkaCluster *v1beta1.KafkaCluster, brokersVolumesState map[string]map[string]v1beta1.VolumeState, log logr.Logger) error {
if len(brokersVolumesState) == 0 {
return nil
}
var ccTaskId string
for _, brokerVolumesState := range brokersVolumesState {
for _, volumeState := range brokerVolumesState {
if ccTaskId == "" {
ccTaskId = volumeState.CruiseControlTaskId
} else if ccTaskId != volumeState.CruiseControlTaskId {
return errors.New("multiple rebalance disk CC task ids found")
}
}
}
if ccTaskId == "" {
return errors.New("no CC task id provided to be checked")
}
// check cc task status
cc := scale.NewCruiseControlScaler(kafkaCluster.Namespace, kafkaCluster.Spec.GetKubernetesClusterDomain(), kafkaCluster.Spec.CruiseControlConfig.CruiseControlEndpoint, kafkaCluster.Name)
status, err := cc.GetCCTaskState(ccTaskId)
if err != nil {
log.Info("Cruise control communication error checking running task", "taskId", ccTaskId)
return errorfactory.New(errorfactory.CruiseControlNotReady{}, err, "cc communication error")
}
if status == v1beta1.CruiseControlTaskNotFound || status == v1beta1.CruiseControlTaskCompletedWithError {
// CC task failed or not found in CC,
// reschedule it by marking volume CruiseControlVolumeState=GracefulDiskRebalanceRequired
var brokerIds []string
requiredBrokerVolumesCCState := make(map[string]map[string]v1beta1.VolumeState, len(brokersVolumesState))
for brokerId, volumesState := range brokersVolumesState {
brokerIds = append(brokerIds, brokerId)
volumesState := make(map[string]v1beta1.VolumeState, len(volumesState))
for mountPath := range volumesState {
volumesState[mountPath] = kafkav1beta1.VolumeState{
CruiseControlVolumeState: v1beta1.GracefulDiskRebalanceRequired,
ErrorMessage: "Previous disk rebalance cc task status invalid",
}
}
requiredBrokerVolumesCCState[brokerId] = volumesState
}
err = k8sutil.UpdateBrokerStatus(r.Client, brokerIds, kafkaCluster, requiredBrokerVolumesCCState, log)
if err != nil {
return errors.WrapIfWithDetails(err, "could not update status for broker volume(s)", "id(s)", strings.Join(brokerIds, ","))
}
return errorfactory.New(errorfactory.CruiseControlTaskFailure{}, err, "CC task failed", fmt.Sprintf("cc task id: %s", ccTaskId))
}
if status == v1beta1.CruiseControlTaskCompleted {
// cc task completed successfully
var brokerIds []string
rebalanceCompletedVolumesState := make(map[string]map[string]v1beta1.VolumeState, len(brokersVolumesState))
for brokerId, volumesState := range brokersVolumesState {
brokerIds = append(brokerIds, brokerId)
volumesStateSucceeded := make(map[string]v1beta1.VolumeState, len(volumesState))
for mountPath, volumeState := range volumesState {
volumesStateSucceeded[mountPath] = kafkav1beta1.VolumeState{
CruiseControlVolumeState: v1beta1.GracefulDiskRebalanceSucceeded,
TaskStarted: volumeState.TaskStarted,
CruiseControlTaskId: volumeState.CruiseControlTaskId,
}
}
rebalanceCompletedVolumesState[brokerId] = volumesStateSucceeded
}
err = k8sutil.UpdateBrokerStatus(r.Client, brokerIds, kafkaCluster, rebalanceCompletedVolumesState, log)
if err != nil {
return errors.WrapIfWithDetails(err, "could not update status for broker(s)", "id(s)", strings.Join(brokerIds, ","))
}
return nil
}
var brokersWithTimedOutCCTask []string
brokersVolumesStateWithTimedOutDiskCCTask := make(map[string]map[string]v1beta1.VolumeState)
for brokerId, volumesState := range brokersVolumesState {
volumesStateWithTimedOutDiskCCTask := make(map[string]v1beta1.VolumeState)
for mountPath, volumeState := range volumesState {
if volumeState.CruiseControlVolumeState == v1beta1.GracefulDiskRebalanceRunning {
parsedTime, err := ccutils.ParseTimeStampToUnixTime(volumeState.TaskStarted)
if err != nil {
return errors.WrapIf(err, "could not parse timestamp")
}
if time.Now().Sub(parsedTime).Minutes() > kafkaCluster.Spec.CruiseControlConfig.CruiseControlTaskSpec.GetDurationMinutes() {
volumesStateWithTimedOutDiskCCTask[mountPath] = kafkav1beta1.VolumeState{
CruiseControlVolumeState: v1beta1.GracefulDiskRebalanceRequired,
CruiseControlTaskId: volumeState.CruiseControlTaskId,
ErrorMessage: "Timed out waiting for the disk rebalance cc task to complete",
TaskStarted: volumeState.TaskStarted,
}
}
}
}
if len(volumesStateWithTimedOutDiskCCTask) > 0 {
brokersWithTimedOutCCTask = append(brokersWithTimedOutCCTask, brokerId)
brokersVolumesStateWithTimedOutDiskCCTask[brokerId] = volumesStateWithTimedOutDiskCCTask
}
}
// task timed out
if len(brokersWithTimedOutCCTask) > 0 {
log.Info("Killing Cruise control task", "taskId", ccTaskId)
cc := scale.NewCruiseControlScaler(kafkaCluster.Namespace, kafkaCluster.Spec.GetKubernetesClusterDomain(), kafkaCluster.Spec.CruiseControlConfig.CruiseControlEndpoint, kafkaCluster.Name)
err = cc.KillCCTask()
if err != nil {
return errorfactory.New(errorfactory.CruiseControlNotReady{}, err, "cc communication error")
}
err = k8sutil.UpdateBrokerStatus(r.Client, brokersWithTimedOutCCTask, kafkaCluster, brokersVolumesStateWithTimedOutDiskCCTask, log)
if err != nil {
return errors.WrapIfWithDetails(err, "could not update status for broker(s)", "id(s)", strings.Join(brokersWithTimedOutCCTask, ","))
}
return errorfactory.New(errorfactory.CruiseControlTaskTimeout{}, errors.New("cc task timed out"), fmt.Sprintf("cc task id: %s", ccTaskId))
}
// cc task still in progress
log.Info("Cruise control task is still running", "taskId", ccTaskId)
return errorfactory.New(errorfactory.CruiseControlTaskRunning{}, errors.New("cc task is still running"), fmt.Sprintf("cc task id: %s", ccTaskId))
}
// SetupCruiseControlWithManager registers cruise control controller to the manager
func SetupCruiseControlWithManager(mgr ctrl.Manager) *ctrl.Builder {
builder := ctrl.NewControllerManagedBy(mgr).For(&kafkav1beta1.KafkaCluster{}).Named("CruiseControl")
builder.WithEventFilter(
predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
object, err := meta.Accessor(e.ObjectNew)
if err != nil {
return false
}
if _, ok := object.(*v1beta1.KafkaCluster); ok {
old := e.ObjectOld.(*v1beta1.KafkaCluster)
new := e.ObjectNew.(*v1beta1.KafkaCluster)
if !reflect.DeepEqual(old.Status.BrokersState, new.Status.BrokersState) ||
old.GetDeletionTimestamp() != new.GetDeletionTimestamp() ||
old.GetGeneration() != new.GetGeneration() {
return true
}
return false
}
return true
},
})
return builder
}
|
// Copyright 2018 Diego Bernardes. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flare
import (
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestPaginationValid(t *testing.T) {
Convey("Feature: Validate the Pagination", t, func() {
Convey("Given a list of valid paginations", func() {
tests := []Pagination{
{},
{Limit: 1},
{Offset: 1},
{Limit: 30, Offset: 10},
}
Convey("Should not return a error", func() {
for _, tt := range tests {
So(tt.Valid(), ShouldBeNil)
}
})
})
Convey("Given a list of invalid paginations", func() {
tests := []struct {
title string
pagination Pagination
}{
{
"Should have a invalid offset (1)",
Pagination{Offset: -1},
},
{
"Should have a invalid offset (2)",
Pagination{Limit: 1, Offset: -1},
},
{
"Should have a invalid limit (1)",
Pagination{Limit: -1},
},
{
"Should have a invalid limit (2)",
Pagination{Offset: 1, Limit: -1},
},
}
for _, tt := range tests {
Convey(tt.title, func() {
So(tt.pagination.Valid(), ShouldNotBeNil)
})
}
})
})
}
|
package sql
import (
"github.com/gremlinsapps/avocado_server/dal/model"
"log"
)
func AutoMigrate() {
conn := Connect()
log.Print("AutoMigrating db.")
conn.db.AutoMigrate(
&dalmodel.Hashtag{},
&dalmodel.Notification{},
&dalmodel.User{},
&dalmodel.Clinic{},
&dalmodel.Chat{},
&dalmodel.Message{},
&dalmodel.Measurement{},
&dalmodel.MeasurementResult{},
&dalmodel.Ingredient{},
&dalmodel.Recipe{},
&dalmodel.Portion{},
&dalmodel.PortionType{},
&dalmodel.Reply{},
&dalmodel.Post{},
&dalmodel.Resource{},
)
log.Print("Done AutoMigrating db.")
}
|
package main
import (
"backend/internal/adapters/httpapi"
"backend/internal/adapters/httpapi/gameapi"
"backend/internal/adapters/httpapi/gamesapi"
"backend/internal/adapters/httpapi/sessions"
"backend/internal/adapters/httpapi/signupapi"
"backend/internal/adapters/inmemoryrepo"
"backend/internal/domain"
"backend/internal/usecase/createplayerusecase"
"backend/internal/usecase/findgameusecase"
"backend/internal/usecase/startgameusecase"
"log"
"net/url"
)
func main() {
// Repositories
playerRepository := inmemoryrepo.NewPlayerRepository()
gameRepository := inmemoryrepo.NewGameRepository()
// Domain
playerIdGenerator := domain.NewUUIDPlayerIdGenerator()
// Use cases
findGameUseCase := findgameusecase.New(inmemoryrepo.NewGameRepository())
createPlayerUseCase := createplayerusecase.New(playerIdGenerator, playerRepository)
startGameUseCase := startgameusecase.New(gameRepository)
// Http
baseUrl, _ := url.Parse("http://") // todo
playersApi := signupapi.New(createPlayerUseCase, sessions.NewFakeStore()) // todo
gamesApi := gamesapi.NewGamesApi(baseUrl, startGameUseCase)
gameApi := gameapi.NewGameApi(findGameUseCase)
serverFactory := httpapi.NewServerFactory(playersApi, gamesApi, gameApi)
server := serverFactory.NewServer()
log.Printf("Starting to listen, addr: %s", server.Addr)
log.Fatal(server.ListenAndServe())
}
|
package main
import "fmt"
type Any interface{}
type State interface {
Begin(a Any)
End(a Any)
Update(a Any)
}
type Vector2 struct {
x float64
y float64
}
type Vector3 struct {
Vector2
z float64
}
type MapObject struct {
Vector3
dir uint16
}
const (
state_init = iota
state_battle
state_sizeof
)
type Monster struct {
MapObject
state int
id int
}
type StateInit struct {}
func (s *StateInit) Begin(a Any) {
fmt.Println("init begin")
}
func (s *StateInit) Update(a Any) {
fmt.Println("init update")
}
func (s *StateInit) End(a Any) {
fmt.Println("init end")
}
type StateBattle struct {}
func (s *StateBattle) Begin(a Any) {
fmt.Println("battle begin")
}
func (s *StateBattle) Update(a Any) {
fmt.Println("battle update")
}
func (s *StateBattle) End(a Any) {
m := a.(*Monster)
m.x = 100
m.z = 101
fmt.Println(a)
fmt.Println("battle end")
}
type StateMachine struct {
current State
}
func (sm *StateMachine) GetState() State {
return sm.current
}
func (sm *StateMachine) _SetState(state State) {
sm.current = state
}
func (sm *StateMachine) SetState(state State, a Any) {
if sm.current != nil {
sm.current.End(a);
}
sm._SetState(state)
if sm.current != nil {
sm.current.Begin(a);
}
}
func (sm *StateMachine) Update(a Any) {
if sm.current != nil {
sm.current.Update(a);
}
}
type StateManager struct {
sm StateMachine
state int
state_table []State
}
func (m *StateManager) Add(idx int, state State) {
m.state_table[idx] = state
}
func (m *StateManager) Update(a Any) {
m.sm.Update(a)
}
func (m *StateManager) GetState() int {
return m.state
}
func (m *StateManager) SetState(idx int, a Any) {
m.state = idx
m.sm.SetState(m.state_table[idx], a)
}
func NewStateManager(max int) *StateManager {
m := new(StateManager)
m.state = 0
m.sm = StateMachine{}
m.state_table = make([]State, max)
return m
}
func main() {
var m Monster = Monster{}
fsm := NewStateManager(state_sizeof)
fsm.Add(state_init, &StateInit{})
fsm.Add(state_battle, &StateBattle{})
fsm.SetState(state_init, &m);
fsm.Update(&m)
fsm.SetState(state_battle, &m);
fsm.Update(&m)
}
|
package models
import "time"
type Db struct {
Id int `gorm:"column:id"`
Name string `gorm:"column:name"`
DBSchema string `gorm:"column:dbschema"`
Host string `gorm:"column:host"`
Username string `gorm:"column:username"`
Password string `gorm:"column:password"`
Port string `gorm:"column:port"`
UserId int `gorm:"column:userId"`
CreatedAt time.Time `gorm:"column:created_at"`
UpdatedAt time.Time `gorm:"column:updated_at"`
}
type Dbs []Db
|
package main
import (
"fmt"
"net/http"
)
func main() {
http.HandleFunc("/", foo)
http.ListenAndServe(":8080", nil)
}
// visit http://localhost:8080?q=abc
func foo(res http.ResponseWriter, req *http.Request) {
q := req.FormValue("q")
fmt.Fprintf(res, "Do my search: %v ?", q)
}
|
//Incompatible function return type
package main;
func main () {
}
func pain () string {
return true;
}
|
package main
import (
"bytes"
"io"
"log"
"os"
)
func cancels(x, y byte) bool {
if x > y {
x, y = y, x
}
return y-x == 32 && x >= 65 && x <= 90
}
func polyLength(r io.ByteReader, lc, uc byte) int {
var bs []byte
var b byte
var err error
for b, err = r.ReadByte(); err == nil; b, err = r.ReadByte() {
if b == lc || b == uc || b < 65 || (b > 90 && b < 97) || b > 122 {
continue
}
bs = append(bs, b)
l := len(bs)
if l > 1 && cancels(bs[l-2], bs[l-1]) {
bs = bs[:l-2]
}
}
return len(bs)
}
func main() {
buf := &bytes.Buffer{}
if _, err := buf.ReadFrom(os.Stdin); err != nil {
log.Fatalln("Unable to read input:", err)
}
r := bytes.NewReader(buf.Bytes())
l := polyLength(r, 0, 0)
log.Println("Polymer length:", l)
for b := byte(65); b <= 90; b++ {
r.Seek(0, io.SeekStart)
limp := polyLength(r, b, b+32)
if limp < l {
l = limp
}
}
log.Println("Improved polymer length:", l)
}
|
package main
import (
"context"
"flag"
"fmt"
"io"
"log"
"math/rand"
pb "github.com/serhatcetinkaya/grpc-demo-app/proto/math"
"time"
"google.golang.org/grpc"
)
var sleepTime = rand.Intn(4000) + 1000
func main() {
rand.Seed(time.Now().Unix())
var host = flag.String("h", "localhost", "Address of the server")
var port = flag.Int("p", 50005, "Port of the server")
flag.Parse()
serverAddr := fmt.Sprintf("%s:%d", *host, *port)
// dial server
time.Sleep(time.Duration(sleepTime) * time.Millisecond)
conn, err := grpc.Dial(serverAddr, grpc.WithInsecure())
if err != nil {
log.Fatalf("can not connect with server %v", err)
}
// create stream
client := pb.NewMathClient(conn)
stream, err := client.Max(context.Background())
if err != nil {
log.Fatalf("openn stream error %v", err)
}
var max int32
ctx := stream.Context()
done := make(chan bool)
// first goroutine sends random increasing numbers to stream
go func() {
for i := 1; i <= 20000000; i++ {
// generate random nummber and send it to stream
rnd := int32(rand.Intn(i))
req := pb.Request{Num: rnd}
if err := stream.Send(&req); err != nil {
log.Fatalf("can not send %v", err)
}
log.Printf("%d sent", req.Num)
time.Sleep(time.Second)
}
if err := stream.CloseSend(); err != nil {
log.Println(err)
}
}()
// second goroutine receives data from stream
// and saves result in max variable
//
// if stream is finished it closes done channel
go func() {
for {
resp, err := stream.Recv()
if err == io.EOF {
close(done)
return
}
if err != nil {
log.Fatalf("can not receive %v", err)
}
max = resp.Result
log.Printf("new max %d received", max)
}
}()
// third goroutine closes done channel
// if context is done
go func() {
<-ctx.Done()
if err := ctx.Err(); err != nil {
log.Println(err)
}
close(done)
}()
<-done
log.Printf("finished with max=%d", max)
}
|
package http
import (
"encoding/json"
"fmt"
"laravel-go/app/http/routes"
systemLog "laravel-go/app/service/system/log"
systemPolice "laravel-go/app/service/system/police"
"laravel-go/pkg/libs"
"net/http"
"runtime"
"strconv"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
)
// 实例化 HTTP 服务
func New(port string) {
e := echo.New()
e.Debug = false
// 配置 API 路由
routes.SetApiRoute(e)
// 请求日志 Debug
// e.Use(middleware.Logger())
// Panic 恢复处理
// e.Use(middleware.Recover())
e.Use(middleware.RecoverWithConfig(middleware.RecoverConfig{
DisablePrintStack: true,
}))
// 自定义异常处理
// 捕获到 panic、error 错误接口响应 500
e.HTTPErrorHandler = HTTPErrorHandler
e.Start(port)
}
// HTTP 错误处理
func HTTPErrorHandler(err error, c echo.Context) {
he, ok := err.(*echo.HTTPError)
if ok {
if he.Internal != nil {
if herr, ok := he.Internal.(*echo.HTTPError); ok {
he = herr
}
}
} else {
he = &echo.HTTPError{
Code: http.StatusInternalServerError,
Message: http.StatusText(http.StatusInternalServerError),
}
}
code := he.Code
message := he.Message
if m, ok := he.Message.(string); ok {
message = echo.Map{"message": m, "error": err.Error()}
}
if !c.Response().Committed {
if c.Request().Method == http.MethodHead {
err = c.NoContent(he.Code)
} else {
libs.NewApi().Error(c, err, code, message)
}
// 记录日志
if err != nil {
createHTTPErrorLog(c, err)
}
}
}
// 记录 HTTP 错误日志
func createHTTPErrorLog(c echo.Context, err error) {
// 请求参数
req := c.Request()
formParams, _ := c.FormParams()
params, _ := json.Marshal(map[string]interface{}{
"remote_ip": c.RealIP(),
"host": req.Host,
"uri": req.RequestURI,
"method": req.Method,
"path": req.URL.Path,
"protocol": req.Proto,
"user_agent": req.UserAgent(),
"query_params": c.QueryParams(),
"form_params": formParams,
})
// 错误堆栈
stack := make([]byte, 2<<10) // 2KB 堆栈
length := runtime.Stack(stack, true)
message := fmt.Sprintf("[%v] %s\n", err, stack[:length])
// 写入日志
insertId, _ := systemLog.NewLogUpdateService().Create(string(params), message)
newInsertId := strconv.Itoa(int(insertId.(uint)))
// 发送报警信息
errStr := err.Error()
msg := errStr + "-ID:" + string(newInsertId)
systemPolice.NewPoliceUpdateService().Send(msg)
}
|
package main
import (
"bytes"
"database/sql"
"fmt"
"html/template"
"regexp"
"sort"
"strings"
_ "github.com/lib/pq" // postgres
"github.com/pkg/errors"
)
// Queryer database/sql compatible query interface
type Queryer interface {
Exec(string, ...interface{}) (sql.Result, error)
Query(string, ...interface{}) (*sql.Rows, error)
QueryRow(string, ...interface{}) *sql.Row
}
// OpenDB opens database connection
func OpenDB(connStr string) (*sql.DB, error) {
conn, err := sql.Open("postgres", connStr)
if err != nil {
return nil, errors.Wrap(err, "failed to connect to database")
}
return conn, nil
}
// Column postgres columns
type Column struct {
FieldOrdinal int
Name string
Comment sql.NullString
DataType string
DDLType string
NotNull bool
IsPrimaryKey bool
IsForeignKey bool
}
// ForeignKey foreign key
type ForeignKey struct {
ConstraintName string
SourceTableName string
SourceColName string
IsSourceColPrimaryKey bool
SourceTable *Table
SourceColumn *Column
TargetTableName string
TargetColName string
IsTargetColPrimaryKey bool
TargetTable *Table
TargetColumn *Column
}
// IsOneToOne returns true if one to one relation
// - in case of composite pk
// * one to one
// * source table is composite pk && target table is composite pk
// * source table fks to target table are all pks
// * other cases are one to many
func (k *ForeignKey) IsOneToOne() bool {
switch {
case k.SourceTable.IsCompositePK() && k.TargetTable.IsCompositePK():
var targetFks []*ForeignKey
for _, fk := range k.SourceTable.ForeingKeys {
if fk.TargetTableName == k.TargetTableName {
targetFks = append(targetFks, fk)
}
}
for _, tfk := range targetFks {
if !tfk.IsSourceColPrimaryKey || !tfk.IsTargetColPrimaryKey {
return false
}
}
return true
case !k.SourceTable.IsCompositePK() && k.SourceColumn.IsPrimaryKey && k.TargetColumn.IsPrimaryKey:
return true
default:
return false
}
}
// Table postgres table
type Table struct {
Schema string
Name string
Comment sql.NullString
AutoGenPk bool
Columns []*Column
ForeingKeys []*ForeignKey
}
// IsCompositePK check if table is composite pk
func (t *Table) IsCompositePK() bool {
cnt := 0
for _, c := range t.Columns {
if c.IsPrimaryKey {
cnt++
}
if cnt >= 2 {
return true
}
}
return false
}
func stripCommentSuffix(s string) string {
if tok := strings.SplitN(s, "\t", 2); len(tok) == 2 {
return tok[0]
}
return s
}
// FindTableByName find table by name
func FindTableByName(tbls []*Table, name string) (*Table, bool) {
for _, tbl := range tbls {
if tbl.Name == name {
return tbl, true
}
}
return nil, false
}
// FindColumnByName find table by name
func FindColumnByName(tbls []*Table, tableName, colName string) (*Column, bool) {
for _, tbl := range tbls {
if tbl.Name == tableName {
for _, col := range tbl.Columns {
if col.Name == colName {
return col, true
}
}
}
}
return nil, false
}
// LoadColumnDef load Postgres column definition
func LoadColumnDef(db Queryer, schema, table string) ([]*Column, error) {
colDefs, err := db.Query(columDefSQL, schema, table)
if err != nil {
return nil, errors.Wrap(err, "failed to load table def")
}
var cols []*Column
for colDefs.Next() {
var c Column
err := colDefs.Scan(
&c.FieldOrdinal,
&c.Name,
&c.Comment,
&c.DataType,
&c.NotNull,
&c.IsPrimaryKey,
&c.DDLType,
)
c.Comment.String = stripCommentSuffix(c.Comment.String)
if err != nil {
return nil, errors.Wrap(err, "failed to scan")
}
cols = append(cols, &c)
}
return cols, nil
}
// LoadForeignKeyDef load Postgres fk definition
func LoadForeignKeyDef(db Queryer, schema string, tbls []*Table, tbl *Table) ([]*ForeignKey, error) {
fkDefs, err := db.Query(fkDefSQL, schema, tbl.Name)
if err != nil {
return nil, errors.Wrap(err, "failed to load fk def")
}
var fks []*ForeignKey
for fkDefs.Next() {
fk := ForeignKey{
SourceTableName: tbl.Name,
SourceTable: tbl,
}
err := fkDefs.Scan(
&fk.SourceColName,
&fk.TargetTableName,
&fk.TargetColName,
&fk.ConstraintName,
&fk.IsTargetColPrimaryKey,
&fk.IsSourceColPrimaryKey,
)
if err != nil {
return nil, err
}
fks = append(fks, &fk)
}
for _, fk := range fks {
targetTbl, found := FindTableByName(tbls, fk.TargetTableName)
if !found {
return nil, errors.Errorf("%s not found", fk.TargetTableName)
}
fk.TargetTable = targetTbl
targetCol, found := FindColumnByName(tbls, fk.TargetTableName, fk.TargetColName)
if !found {
return nil, errors.Errorf("%s.%s not found", fk.TargetTableName, fk.TargetColName)
}
fk.TargetColumn = targetCol
sourceCol, found := FindColumnByName(tbls, fk.SourceTableName, fk.SourceColName)
if !found {
return nil, errors.Errorf("%s.%s not found", fk.SourceTableName, fk.SourceColName)
}
sourceCol.IsForeignKey = true
fk.SourceColumn = sourceCol
}
return fks, nil
}
// LoadTableDef load Postgres table definition
func LoadTableDef(db Queryer, schema string) ([]*Table, error) {
tbDefs, err := db.Query(tableDefSQL, schema)
if err != nil {
return nil, errors.Wrap(err, "failed to load table def")
}
var tbls []*Table
for tbDefs.Next() {
t := &Table{Schema: schema}
err := tbDefs.Scan(
&t.Name,
&t.Comment,
)
if err != nil {
return nil, errors.Wrap(err, "failed to scan")
}
cols, err := LoadColumnDef(db, schema, t.Name)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("failed to get columns of %s", t.Name))
}
t.Columns = cols
tbls = append(tbls, t)
}
for _, tbl := range tbls {
fks, err := LoadForeignKeyDef(db, schema, tbls, tbl)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("failed to get fks of %s", tbl.Name))
}
tbl.ForeingKeys = fks
}
return tbls, nil
}
// TableToUMLEntry table entry
func TableToUMLEntry(tbls []*Table) ([]byte, error) {
tpl, err := template.New("entry").Parse(entryTmpl)
if err != nil {
return nil, err
}
var src []byte
for _, tbl := range tbls {
buf := new(bytes.Buffer)
if err := tpl.Execute(buf, tbl); err != nil {
return nil, errors.Wrapf(err, "failed to execute template: %s", tbl.Name)
}
src = append(src, buf.Bytes()...)
}
return src, nil
}
// ForeignKeyToUMLRelation relation
func ForeignKeyToUMLRelation(tbls []*Table) ([]byte, error) {
tpl, err := template.New("relation").Parse(relationTmpl)
if err != nil {
return nil, err
}
var src []byte
for _, tbl := range tbls {
for _, fk := range tbl.ForeingKeys {
buf := new(bytes.Buffer)
if err := tpl.Execute(buf, fk); err != nil {
return nil, errors.Wrapf(err, "failed to execute template: %s", fk.ConstraintName)
}
src = append(src, buf.Bytes()...)
}
}
return src, nil
}
func contains(v string, r []*regexp.Regexp) bool {
for _, e := range r {
if e != nil && e.MatchString(v) {
return true
}
}
return false
}
// FilterTables filter tables
func FilterTables(match bool, tbls []*Table, tblNames []string) []*Table {
sort.Strings(tblNames)
var tblExps []*regexp.Regexp
for _, tn := range tblNames {
str := fmt.Sprintf(`([\\/])?%s([\\/])?`, tn)
r := regexp.MustCompile(str)
tblExps = append(tblExps, r)
}
var target []*Table
for _, tbl := range tbls {
if contains(tbl.Name, tblExps) == match {
var fks []*ForeignKey
for _, fk := range tbl.ForeingKeys {
if contains(fk.TargetTableName, tblExps) == match {
fks = append(fks, fk)
}
}
tbl.ForeingKeys = fks
target = append(target, tbl)
}
}
return target
}
|
package main
import (
"fmt"
)
func partition2(s string) [][]string {
str := []byte(s)
res := make([][]string,0)
backTracking(str,[]string{},&res)
return res
}
func backTracking(s []byte,temp []string,res *[][]string){
if len(s) == 0{
tmp := make([]string,len(temp))
copy(tmp,temp)
*res = append(*res,tmp)
return
}
for i:=1;i<=len(s);i++{
if isPali(string(s[:i])){
backTracking(s[i:],append(temp, string(s[:i])),res)
}
}
}
func isPali(s string) bool{
i :=0
j:=len(s)-1
for i<j{
if s[i] != s[j]{
return false
}
i++
j--
}
return true
}
func main(){
res := make([][]string,0)
s := []byte("cbbbcc")
backTracking(s,[]string{},&res)
fmt.Println(res)
}
|
package main
import (
"context"
"demo/grpc_test/proto/chat"
"demo/grpc_test/proto/helloworld"
"google.golang.org/grpc"
"io"
"log"
"sync/atomic"
"time"
)
const (
address = "localhost:50051"
//address = "192.168.1.201:50051"
)
var (
count int64
conn *grpc.ClientConn
)
func TestGreeter() {
//ctx, cancel := context.WithTimeout(context.Background(), time.Second)
//defer cancel()
ctx := context.Background()
client := helloworld.NewGreeterClient(conn)
for i := 0; i < 100; i++ {
begin := time.Now()
reply, er := client.SayHello(ctx, &helloworld.HelloRequest{Name: "world"})
if er != nil {
log.Printf("did not connect:%v", er)
return
}
atomic.AddInt64(&count, 1)
log.Printf("[%d] [%v] Greeting: %s", count, time.Now().Sub(begin), reply.Message)
}
}
func TestMathService() {
ctx := context.Background()
client := helloworld.NewMathServiceClient(conn)
{
reply, er := client.Add(ctx, &helloworld.AddRequest{A: 10, B: 30})
if er != nil {
log.Printf("did not connect:%v", er)
return
}
atomic.AddInt64(&count, 1)
log.Printf("Add: [%d] [%v]", count, reply.C)
}
{
reply, er := client.Sub(ctx, &helloworld.SubRequest{A: 10, B: 100})
if er != nil {
log.Printf("did not connect:%v", er)
return
}
atomic.AddInt64(&count, 1)
log.Printf("Sub: [%d] [%v]", count, reply.C)
}
}
func TestChat() {
client := chat.NewChatClient(conn)
stream, er := client.BidStream(context.Background())
if er != nil {
log.Println(er)
return
}
var requestId int32 = 0
go func() {
for {
atomic.AddInt32(&requestId, 1)
if er = stream.SendMsg(&chat.Request{MainId: 1, SubId: 10000, RequestId: requestId, Input: "message-1"}); er != nil {
log.Println(er)
return
}
//time.Sleep(time.Second * time.Duration(1))
}
}()
go func() {
for {
atomic.AddInt32(&requestId, 1)
if er = stream.SendMsg(&chat.Request{MainId: 2, SubId: 10000, RequestId: requestId, Input: "message-2"}); er != nil {
log.Println(er)
return
}
//time.Sleep(time.Second * time.Duration(1))
}
}()
go func() {
for {
atomic.AddInt32(&requestId, 1)
if er = stream.SendMsg(&chat.Request{MainId: 3, SubId: 10000, RequestId: requestId, Input: "message-3"}); er != nil {
log.Println(er)
return
}
//time.Sleep(time.Second * time.Duration(1))
}
}()
for {
var resp *chat.Response
resp, er = stream.Recv()
if er == io.EOF {
log.Println("接收到服务端的结算信号", er)
break
}
if er != nil {
log.Println("接收数据错误", er)
break
}
switch resp.MainId {
case 1:
log.Println("resp", resp)
case 2:
log.Println("resp", resp)
case 3:
log.Println("resp", resp)
}
}
}
func main() {
var er error
conn, er = grpc.Dial(address, grpc.WithInsecure())
if er != nil {
log.Fatalf("did not connect:%v", er)
}
defer func() {
er = conn.Close()
}()
go TestGreeter()
go TestMathService()
TestChat()
select {}
}
|
package gosnowth
import (
"bytes"
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"time"
)
const histogramTestData = `[
[
1556290800,
300,
{
"+23e-004": 1,
"+85e-004": 1
}
],
[
1556291100,
300,
{
"+22e-004": 1,
"+23e-004": 2,
"+30e-004": 1,
"+39e-003": 1
}
]
]`
const histTestData = `[
{
"account_id": 1,
"metric": "example1",
"id": "ae0f7f90-2a6b-481c-9cf5-21a31837020e",
"check_name": "test",
"offset": 1408724400,
"period": 60,
"histogram": "AAA="
}
]`
func TestHistogramValueMarshaling(t *testing.T) {
t.Parallel()
v := []HistogramValue{}
if err := json.NewDecoder(bytes.NewBufferString(
histogramTestData)).Decode(&v); err != nil {
t.Fatal(err)
}
if len(v) != 2 {
t.Fatalf("Expected length: 2, got %v", len(v))
}
if v[0].Timestamp() != "1556290800" {
t.Errorf("Expected timestamp: 1556290800, got: %v", v[0].Timestamp())
}
if v[0].Period.Seconds() != 300.0 {
t.Errorf("Expected seconds: 300, got: %v", v[0].Period.Seconds())
}
if v[0].Data["+23e-004"] != 1 {
t.Errorf("Expected data: 1, got: %v", v[0].Data["+23e-004"])
}
buf := &bytes.Buffer{}
if err := json.NewEncoder(buf).Encode(&v); err != nil {
t.Fatal(err)
}
exp := strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(
histogramTestData, " ", ""), "\n", ""), "\t", "") + "\n"
if buf.String() != exp {
t.Errorf("Expected JSON: %v, got: %v", exp, buf.String())
}
}
func TestReadHistogramValues(t *testing.T) {
t.Parallel()
ms := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter,
r *http.Request,
) {
if r.RequestURI == "/state" {
_, _ = w.Write([]byte(stateTestData))
return
}
if r.RequestURI == "/stats.json" {
_, _ = w.Write([]byte(statsTestData))
return
}
u := "/histogram/1556290800/1556291400/300/" +
"ae0f7f90-2a6b-481c-9cf5-21a31837020e/example1"
if strings.HasPrefix(r.RequestURI, u) {
_, _ = w.Write([]byte(histogramTestData))
return
}
}))
defer ms.Close()
sc, err := NewClient(context.Background(),
&Config{Servers: []string{ms.URL}})
if err != nil {
t.Fatal("Unable to create snowth client", err)
}
u, err := url.Parse(ms.URL)
if err != nil {
t.Fatal("Invalid test URL")
}
node := &SnowthNode{url: u}
res, err := sc.ReadHistogramValues(
"ae0f7f90-2a6b-481c-9cf5-21a31837020e", "example1",
300*time.Second, time.Unix(1556290800, 0),
time.Unix(1556291200, 0), node)
if err != nil {
t.Fatal(err)
}
if len(res) != 2 {
t.Fatalf("Expected length: 1, got: %v", len(res))
}
if res[0].Timestamp() != "1556290800" {
t.Errorf("Expected timestamp: 1556290800, got: %v", res[0].Timestamp())
}
if res[0].Period.Seconds() != 300.0 {
t.Errorf("Expected seconds: 300, got: %v", res[0].Period.Seconds())
}
if res[0].Data["+23e-004"] != 1 {
t.Errorf("Expected data: 1, got: %v", res[0].Data["+23e-004"])
}
}
func TestWriteHistogram(t *testing.T) {
t.Parallel()
ms := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter,
r *http.Request,
) {
if r.RequestURI == "/state" {
_, _ = w.Write([]byte(stateTestData))
return
}
if r.RequestURI == "/stats.json" {
_, _ = w.Write([]byte(statsTestData))
return
}
if r.RequestURI == "/histogram/write" {
rb := []HistogramData{}
if err := json.NewDecoder(r.Body).Decode(&rb); err != nil {
w.WriteHeader(http.StatusInternalServerError)
t.Error("Unable to decode JSON data")
return
}
if len(rb) < 1 {
w.WriteHeader(http.StatusInternalServerError)
t.Error("Invalid request")
return
}
exp := "ae0f7f90-2a6b-481c-9cf5-21a31837020e"
if rb[0].ID != exp {
w.WriteHeader(http.StatusInternalServerError)
t.Errorf("Expected UUID: %v, got: %v", exp, rb[0].ID)
return
}
_, _ = w.Write([]byte(histTestData))
return
}
}))
defer ms.Close()
sc, err := NewClient(context.Background(),
&Config{Servers: []string{ms.URL}})
if err != nil {
t.Fatal("Unable to create snowth client", err)
}
u, err := url.Parse(ms.URL)
if err != nil {
t.Fatal("Invalid test URL")
}
v := []HistogramData{}
if err = json.NewDecoder(bytes.NewBufferString(
histTestData)).Decode(&v); err != nil {
t.Fatalf("Unable to encode JSON %v", err)
}
node := &SnowthNode{url: u}
if err = sc.WriteHistogram(v, node); err != nil {
t.Fatal(err)
}
}
|
package main
import "fmt"
func main() {
var what interface{}
what = 100
PrintWhat(what)
what = "Hippo"
PrintWhat(what)
}
func PrintWhat(v interface{}) {
fmt.Println(v)
}
|
package ds
// Word struct concern
type Word struct {
ID int64
EN string `json:"EN" validate:"required"`
CN string `json:"CN"`
}
// Pagination info
type Pagination struct {
Page int
}
|
package design
import (
. "github.com/goadesign/goa/design"
. "github.com/goadesign/goa/design/apidsl"
)
var _ = Resource("version", func() {
DefaultMedia(ALMVersion)
BasePath("/version")
Action("show", func() {
Security("jwt", func() {
Scope("system")
})
Routing(
GET(""),
)
Description("Show current running version")
Response(OK)
})
})
var _ = Resource("login", func() {
BasePath("/login")
Action("authorize", func() {
Routing(
GET("authorize"),
)
Description("Authorize with the ALM")
Response(OK, func() {
Media(AuthToken)
})
Response(Unauthorized)
})
Action("generate", func() {
Routing(
GET("generate"),
)
Description("Generates a set of Tokens for different Auth levels. NOT FOR PRODUCTION. Only available if server is running in dev mode")
Response(OK, func() {
Media(CollectionOf(AuthToken))
})
Response(Unauthorized)
})
})
|
package some
import (
"bufio"
"fmt"
"log"
"net"
)
func StarnEcho() {
serve()
}
func serve() {
l, err := net.Listen("tcp", ":8080")
if err != nil {
log.Fatal(err)
}
for {
c, err := l.Accept()
if err != nil {
c.Close()
continue
}
go handle(c)
}
}
func handle(c net.Conn) {
defer c.Close()
fmt.Println(c.RemoteAddr())
buffReader := bufio.NewReader(c)
for {
b, err := buffReader.ReadByte()
if err != nil {
log.Fatal(err)
}
fmt.Print(string(b))
fmt.Fprintln(c, string(b))
}
}
|
package controllers
import (
middleware "scholarship/middlewares"
"scholarship/models"
"github.com/astaxie/beego"
"fmt"
)
// Operations about object
type ScholarshipController struct {
beego.Controller
}
// @Title Create
// @Description create object
// @Param sAddr query string true "The student address"
// @Param pAddr query string true "The project address"
// @Success 200 {string} models.Object.Id
// @Failure 403 body is empty
// @router / [get]
func (s *ScholarshipController) Match() {
var result models.ApiResult
sAddr := s.GetString("sAddr")
pAddr := s.GetString("pAddr")
fmt.Println("sAddr "+sAddr+" pAddr"+pAddr)
str := middleware.Comparefiles(beego.AppConfig.String("MTUrl"),pAddr,sAddr)
if str == "failure" {
result.Result = "false"
result.Data = ""
s.Data["json"] = result
}else if str == "success"{
result.Result = "true"
result.Data = ""
s.Data["json"] = "success"
}else {
result.Result = "false"
result.Data = "fail to match"
s.Data["json"] = result
}
s.ServeJSON()
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
orgpolicypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/orgpolicy/orgpolicy_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/orgpolicy"
)
// PolicyServer implements the gRPC interface for Policy.
type PolicyServer struct{}
// ProtoToPolicySpec converts a PolicySpec object from its proto representation.
func ProtoToOrgpolicyPolicySpec(p *orgpolicypb.OrgpolicyPolicySpec) *orgpolicy.PolicySpec {
if p == nil {
return nil
}
obj := &orgpolicy.PolicySpec{
Etag: dcl.StringOrNil(p.GetEtag()),
UpdateTime: dcl.StringOrNil(p.GetUpdateTime()),
InheritFromParent: dcl.Bool(p.GetInheritFromParent()),
Reset: dcl.Bool(p.GetReset()),
}
for _, r := range p.GetRules() {
obj.Rules = append(obj.Rules, *ProtoToOrgpolicyPolicySpecRules(r))
}
return obj
}
// ProtoToPolicySpecRules converts a PolicySpecRules object from its proto representation.
func ProtoToOrgpolicyPolicySpecRules(p *orgpolicypb.OrgpolicyPolicySpecRules) *orgpolicy.PolicySpecRules {
if p == nil {
return nil
}
obj := &orgpolicy.PolicySpecRules{
Values: ProtoToOrgpolicyPolicySpecRulesValues(p.GetValues()),
AllowAll: dcl.Bool(p.GetAllowAll()),
DenyAll: dcl.Bool(p.GetDenyAll()),
Enforce: dcl.Bool(p.GetEnforce()),
Condition: ProtoToOrgpolicyPolicySpecRulesCondition(p.GetCondition()),
}
return obj
}
// ProtoToPolicySpecRulesValues converts a PolicySpecRulesValues object from its proto representation.
func ProtoToOrgpolicyPolicySpecRulesValues(p *orgpolicypb.OrgpolicyPolicySpecRulesValues) *orgpolicy.PolicySpecRulesValues {
if p == nil {
return nil
}
obj := &orgpolicy.PolicySpecRulesValues{}
for _, r := range p.GetAllowedValues() {
obj.AllowedValues = append(obj.AllowedValues, r)
}
for _, r := range p.GetDeniedValues() {
obj.DeniedValues = append(obj.DeniedValues, r)
}
return obj
}
// ProtoToPolicySpecRulesCondition converts a PolicySpecRulesCondition object from its proto representation.
func ProtoToOrgpolicyPolicySpecRulesCondition(p *orgpolicypb.OrgpolicyPolicySpecRulesCondition) *orgpolicy.PolicySpecRulesCondition {
if p == nil {
return nil
}
obj := &orgpolicy.PolicySpecRulesCondition{
Expression: dcl.StringOrNil(p.GetExpression()),
Title: dcl.StringOrNil(p.GetTitle()),
Description: dcl.StringOrNil(p.GetDescription()),
Location: dcl.StringOrNil(p.GetLocation()),
}
return obj
}
// ProtoToPolicy converts a Policy resource from its proto representation.
func ProtoToPolicy(p *orgpolicypb.OrgpolicyPolicy) *orgpolicy.Policy {
obj := &orgpolicy.Policy{
Name: dcl.StringOrNil(p.GetName()),
Spec: ProtoToOrgpolicyPolicySpec(p.GetSpec()),
Parent: dcl.StringOrNil(p.GetParent()),
}
return obj
}
// PolicySpecToProto converts a PolicySpec object to its proto representation.
func OrgpolicyPolicySpecToProto(o *orgpolicy.PolicySpec) *orgpolicypb.OrgpolicyPolicySpec {
if o == nil {
return nil
}
p := &orgpolicypb.OrgpolicyPolicySpec{}
p.SetEtag(dcl.ValueOrEmptyString(o.Etag))
p.SetUpdateTime(dcl.ValueOrEmptyString(o.UpdateTime))
p.SetInheritFromParent(dcl.ValueOrEmptyBool(o.InheritFromParent))
p.SetReset(dcl.ValueOrEmptyBool(o.Reset))
sRules := make([]*orgpolicypb.OrgpolicyPolicySpecRules, len(o.Rules))
for i, r := range o.Rules {
sRules[i] = OrgpolicyPolicySpecRulesToProto(&r)
}
p.SetRules(sRules)
return p
}
// PolicySpecRulesToProto converts a PolicySpecRules object to its proto representation.
func OrgpolicyPolicySpecRulesToProto(o *orgpolicy.PolicySpecRules) *orgpolicypb.OrgpolicyPolicySpecRules {
if o == nil {
return nil
}
p := &orgpolicypb.OrgpolicyPolicySpecRules{}
p.SetValues(OrgpolicyPolicySpecRulesValuesToProto(o.Values))
p.SetAllowAll(dcl.ValueOrEmptyBool(o.AllowAll))
p.SetDenyAll(dcl.ValueOrEmptyBool(o.DenyAll))
p.SetEnforce(dcl.ValueOrEmptyBool(o.Enforce))
p.SetCondition(OrgpolicyPolicySpecRulesConditionToProto(o.Condition))
return p
}
// PolicySpecRulesValuesToProto converts a PolicySpecRulesValues object to its proto representation.
func OrgpolicyPolicySpecRulesValuesToProto(o *orgpolicy.PolicySpecRulesValues) *orgpolicypb.OrgpolicyPolicySpecRulesValues {
if o == nil {
return nil
}
p := &orgpolicypb.OrgpolicyPolicySpecRulesValues{}
sAllowedValues := make([]string, len(o.AllowedValues))
for i, r := range o.AllowedValues {
sAllowedValues[i] = r
}
p.SetAllowedValues(sAllowedValues)
sDeniedValues := make([]string, len(o.DeniedValues))
for i, r := range o.DeniedValues {
sDeniedValues[i] = r
}
p.SetDeniedValues(sDeniedValues)
return p
}
// PolicySpecRulesConditionToProto converts a PolicySpecRulesCondition object to its proto representation.
func OrgpolicyPolicySpecRulesConditionToProto(o *orgpolicy.PolicySpecRulesCondition) *orgpolicypb.OrgpolicyPolicySpecRulesCondition {
if o == nil {
return nil
}
p := &orgpolicypb.OrgpolicyPolicySpecRulesCondition{}
p.SetExpression(dcl.ValueOrEmptyString(o.Expression))
p.SetTitle(dcl.ValueOrEmptyString(o.Title))
p.SetDescription(dcl.ValueOrEmptyString(o.Description))
p.SetLocation(dcl.ValueOrEmptyString(o.Location))
return p
}
// PolicyToProto converts a Policy resource to its proto representation.
func PolicyToProto(resource *orgpolicy.Policy) *orgpolicypb.OrgpolicyPolicy {
p := &orgpolicypb.OrgpolicyPolicy{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetSpec(OrgpolicyPolicySpecToProto(resource.Spec))
p.SetParent(dcl.ValueOrEmptyString(resource.Parent))
return p
}
// applyPolicy handles the gRPC request by passing it to the underlying Policy Apply() method.
func (s *PolicyServer) applyPolicy(ctx context.Context, c *orgpolicy.Client, request *orgpolicypb.ApplyOrgpolicyPolicyRequest) (*orgpolicypb.OrgpolicyPolicy, error) {
p := ProtoToPolicy(request.GetResource())
res, err := c.ApplyPolicy(ctx, p)
if err != nil {
return nil, err
}
r := PolicyToProto(res)
return r, nil
}
// applyOrgpolicyPolicy handles the gRPC request by passing it to the underlying Policy Apply() method.
func (s *PolicyServer) ApplyOrgpolicyPolicy(ctx context.Context, request *orgpolicypb.ApplyOrgpolicyPolicyRequest) (*orgpolicypb.OrgpolicyPolicy, error) {
cl, err := createConfigPolicy(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyPolicy(ctx, cl, request)
}
// DeletePolicy handles the gRPC request by passing it to the underlying Policy Delete() method.
func (s *PolicyServer) DeleteOrgpolicyPolicy(ctx context.Context, request *orgpolicypb.DeleteOrgpolicyPolicyRequest) (*emptypb.Empty, error) {
cl, err := createConfigPolicy(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeletePolicy(ctx, ProtoToPolicy(request.GetResource()))
}
// ListOrgpolicyPolicy handles the gRPC request by passing it to the underlying PolicyList() method.
func (s *PolicyServer) ListOrgpolicyPolicy(ctx context.Context, request *orgpolicypb.ListOrgpolicyPolicyRequest) (*orgpolicypb.ListOrgpolicyPolicyResponse, error) {
cl, err := createConfigPolicy(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListPolicy(ctx, request.GetParent())
if err != nil {
return nil, err
}
var protos []*orgpolicypb.OrgpolicyPolicy
for _, r := range resources.Items {
rp := PolicyToProto(r)
protos = append(protos, rp)
}
p := &orgpolicypb.ListOrgpolicyPolicyResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigPolicy(ctx context.Context, service_account_file string) (*orgpolicy.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return orgpolicy.NewClient(conf), nil
}
|
package commands
import (
"archive/zip"
"encoding/json"
"fmt"
"os"
"path"
"reflect"
"regexp"
"strconv"
"strings"
"github.com/spf13/cobra"
)
// ValidateCommand - validate the given platform and zip archive
var ValidateCommand cobra.Command
// ValidateCommand flags
var zipPath string
// Metadata - structure for JSON metadata
type Metadata struct {
Product string `json:"product" required:"true"`
Service string `json:"service" required:"true"`
Version string `json:"version" required:"true"`
OS string `json:"os" required:"true"`
Role string `json:"role" required:"true"`
Owner string `json:"owner" required:"true"`
HealthCheckEndpoint string `json:"health_check_endpoint" required:"false"`
LogFilePath string `json:"log_file_path" required:"false"`
MetricsFilePath string `json:"metrics_file_path" required:"false"`
}
func init() {
ValidateCommand = cobra.Command{
Use: "validate",
Short: "Validate the given platform and zip archive",
Long: "Validate a zip archive adheres to the naming convention and contains the necessary files & folders for the given platform",
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
err := validate(cmd)
if err != nil {
fmt.Println("validation failed!")
}
return err
},
}
ValidateCommand.Flags().StringVarP(&zipPath, "zip", "z", "", "path to zip")
}
func validate(cmd *cobra.Command) error {
// validate the archive exists
_, err := os.Stat(zipPath)
if err != nil {
return err
}
// validate the archive name
zipName := path.Base(zipPath)
re := Config.GetString("regexArchiveName")
matched, err := regexp.MatchString(re, zipName)
if err != nil {
return err
}
if !matched {
return fmt.Errorf("%s does not match naming convention %s", zipName, re)
}
// open the archive
r, err := zip.OpenReader(zipPath)
if err != nil {
return err
}
defer r.Close()
// validate the archive contains the necessary files/folders
platform := regexp.MustCompile(Config.GetString("regexPlatforms")).FindString(zipPath)
platforms := Config.Get("platforms")
p := platforms.(map[string]map[string]struct{})
expectedFiles := p[platform]
tmpFiles := make(map[string]struct{}, len(expectedFiles))
for k, v := range expectedFiles {
tmpFiles[k] = v
}
for _, f := range r.File {
if _, ok := expectedFiles[f.Name]; ok {
if f.Name == "metadata.json" {
rc, err := f.Open()
if err != nil {
return err
}
defer rc.Close()
decoder := json.NewDecoder(rc)
var metadata Metadata
err = decoder.Decode(&metadata)
if err != nil {
return err
}
err = validateMetadata(metadata)
if err != nil {
return err
}
}
delete(tmpFiles, f.Name)
}
}
if len(tmpFiles) != 0 {
var missing string
for k := range tmpFiles {
missing += fmt.Sprintf("%s is missing %s\n", zipPath, k)
}
return fmt.Errorf(strings.TrimSpace(missing))
}
// validation was successful
fmt.Println("validation successful!")
return nil
}
func validateMetadata(metadata Metadata) error {
st := reflect.TypeOf(metadata)
for i := 0; i < st.NumField(); i++ {
field := st.Field(i)
required, err := strconv.ParseBool(field.Tag.Get("required"))
if err != nil {
return err
}
m := reflect.ValueOf(metadata)
value := reflect.Indirect(m).FieldByName(field.Name).String()
if required && value == "" {
return fmt.Errorf("invalid metadata.json - invalid '%s' property", field.Tag.Get("json"))
}
}
return nil
}
|
package benchmark
import (
"context"
"database/sql"
"fmt"
"testing"
"github.com/go-gorp/gorp"
"github.com/jinzhu/gorm"
"github.com/jmoiron/sqlx"
"xorm.io/xorm"
"github.com/ulule/makroud"
"github.com/ulule/makroud-benchmarks/mimic"
)
func BenchmarkMakroud_Insert(b *testing.B) {
exec := jetExecInsert()
exec.NumInput = -1
dsn := mimic.NewResult(exec)
node, err := makroud.Connect("mimic", dsn)
if err != nil {
b.Fatal(err)
}
driver, err := makroud.New(makroud.WithNode(node))
if err != nil {
b.Fatal(err)
}
ctx := context.Background()
b.Run("makroud", func(b *testing.B) {
for i := 0; i < b.N; i++ {
row := JetMakroud{
PilotID: 1,
AirportID: 1,
Name: "test",
Color: sql.NullString{},
UUID: "test",
Identifier: "test",
Cargo: []byte("test"),
Manifest: []byte("test"),
}
err = makroud.Save(ctx, driver, &row)
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkSQLX_Insert(b *testing.B) {
exec := jetExecInsert()
exec.NumInput = -1
dsn := mimic.NewResult(exec)
dbx, err := sqlx.Connect("mimic", dsn)
if err != nil {
b.Fatal(err)
}
ctx := context.Background()
query := fmt.Sprint(
"INSERT INTO jets SET (pilot_id, airport_id, name, color, uuid, identifier, cargo, manifest) ",
"VALUES (:pilot_id, :airport_id, :name, :color, :uuid, :identifier, :cargo, :manifest) RETURNING id",
)
args := map[string]interface{}{
"pilot_id": 1,
"airport_id": 1,
"name": "test",
"color": sql.NullString{},
"uuid": "test",
"identifier": "test",
"cargo": []byte("test"),
"manifest": []byte("test"),
}
b.Run("sqlx", func(b *testing.B) {
for i := 0; i < b.N; i++ {
var jet JetSQLX
stmt, err := dbx.PrepareNamedContext(ctx, query)
if err != nil {
b.Fatal(err)
}
defer stmt.Close()
err = stmt.GetContext(ctx, &jet, args)
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkGORM_Insert(b *testing.B) {
exec := jetExecInsert()
exec.NumInput = -1
dsn := mimic.NewResult(exec)
gormdb, err := gorm.Open("mimic", dsn)
if err != nil {
b.Fatal(err)
}
b.Run("gorm", func(b *testing.B) {
for i := 0; i < b.N; i++ {
row := JetGorm{
PilotID: 1,
AirportID: 1,
Name: "test",
Color: sql.NullString{},
UUID: "test",
Identifier: "test",
Cargo: []byte("test"),
Manifest: []byte("test"),
}
err := gormdb.Create(&row).Error
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkGORP_Insert(b *testing.B) {
exec := jetExecInsert()
exec.NumInput = -1
dsn := mimic.NewResult(exec)
db, err := sql.Open("mimic", dsn)
if err != nil {
b.Fatal(err)
}
gorpdb := &gorp.DbMap{Db: db, Dialect: gorp.PostgresDialect{}}
if err != nil {
panic(err)
}
gorpdb.AddTable(JetGorp{}).SetKeys(true, "ID")
b.Run("gorp", func(b *testing.B) {
for i := 0; i < b.N; i++ {
row := JetGorp{
PilotID: 1,
AirportID: 1,
Name: "test",
Color: sql.NullString{},
UUID: "test",
Identifier: "test",
Cargo: []byte("test"),
Manifest: []byte("test"),
}
err := gorpdb.Insert(&row)
if err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkXORM_Insert(b *testing.B) {
exec := jetExecInsert()
exec.NumInput = -1
dsn := mimic.NewResult(exec)
xormdb, err := xorm.NewEngine("mimic", dsn)
if err != nil {
b.Fatal(err)
}
b.Run("xorm", func(b *testing.B) {
for i := 0; i < b.N; i++ {
row := JetXorm{
ID: 1,
PilotID: 1,
AirportID: 1,
Name: "test",
Color: sql.NullString{},
UUID: "test",
Identifier: "test",
Cargo: []byte("test"),
Manifest: []byte("test"),
}
_, err := xormdb.Insert(&row)
if err != nil {
b.Fatal(err)
}
}
})
}
|
package gotils
import (
"log"
"reflect"
)
/*
Traverse the structs recursively allowing to modify the instances in place if required.
For example - hiding or updating values selectively just before serializing to JSON.
**/
type StructTraverseTraverser struct {
Params map[string]interface{}
}
/*
Interface to be adopted for instances that want to hide / modify fields by StructTraverseTraverser
**/
type StructTraverseVisitor interface {
StructTraverseVisitor(traverseContext *StructTraverseTraverser) error
}
func CreateStructTraverseContext(params map[string]interface{}) *StructTraverseTraverser {
return &StructTraverseTraverser{
Params: params,
}
}
func (t *StructTraverseTraverser) Traverse(objectToTraverse interface{}) error {
var err error
original := reflect.ValueOf(objectToTraverse)
err = t.traverseRecursive(original, false)
CheckNotFatal(err)
return err
}
func (t *StructTraverseTraverser) traverseRecursive(
original reflect.Value, isPointer bool) error {
var err error
switch original.Kind() {
case reflect.Ptr:
originalValue := original.Elem()
if !originalValue.IsValid() {
return err
}
// check if CanInterface:
if !originalValue.CanInterface() {
return err
}
if !originalValue.CanAddr() {
return err
}
if visitor, ok := (originalValue.Addr().Interface()).(StructTraverseVisitor); ok {
visitor.StructTraverseVisitor(t)
}
// continue recursively:
t.traverseRecursive(originalValue, true)
case reflect.Interface:
if original.IsZero() {
return err
}
originalValue := original.Elem()
if !originalValue.IsValid() {
return err
}
// check if CanInterface:
if !originalValue.CanInterface() {
return err
}
if !originalValue.CanAddr() {
return err
}
reflect.New(originalValue.Type()).Elem()
t.traverseRecursive(originalValue, false)
case reflect.Struct:
if !isPointer && original.CanAddr() && original.CanInterface() {
if _, ok := (original.Addr().Interface()).(StructTraverseVisitor); ok {
log.Fatalln("ERROR: all StructTraverseVisitor must be sent as a pointer: ", original.Type().Name())
CheckNotFatal(err)
}
}
for i := 0; i < original.NumField(); i += 1 {
field := original.Field(i)
t.traverseRecursive(field, false)
}
// TODO:: should reflect.UnsafePointer be handled?
case reflect.Slice:
for i := 0; i < original.Len(); i += 1 {
t.traverseRecursive(original.Index(i), false)
}
case reflect.Map:
for _, key := range original.MapKeys() {
originalValue := original.MapIndex(key)
t.traverseRecursive(originalValue, false)
}
default:
}
return err
}
|
package sorts
import (
"sort"
"testing"
)
func compareList(source []int, target []int) bool {
if len(source) != len(target) {
return false
}
for index := 0; index < len(source); index++ {
if source[index] != target[index] {
return false
}
}
return true
}
func systemSort(raw []int) []int {
target := make([]int, len(raw))
copy(target, raw)
sort.Ints(target)
return target
}
func Test_InsertSort(t *testing.T) {
rawList := []int{4, 3, 5, 21, 7, 8, 15, 1}
expect := systemSort(rawList)
list := insertSort(rawList)
listSlice := insertSortSlice(rawList)
listFind := insertSortFind(rawList)
listReal := insertSortReal(rawList)
if !compareList(expect, list) {
t.Error("Test_InsertSort insertSort Fail", list)
}
if !compareList(expect, listSlice) {
t.Error("Test_InsertSort insertSortSlice Fail", listSlice)
}
if !compareList(expect, listFind) {
t.Error("Test_InsertSort insertSortFind Fail", listFind)
}
if !compareList(expect, listReal) {
t.Error("Test_InsertSort insertSortReal Fail", listReal)
}
t.Log("Test_InsertSort RUN TEST")
}
|
package main
import (
"os"
"time"
"github.com/Sirupsen/logrus"
"github.com/kardianos/service"
)
type letsService struct{}
func (*letsService) Start(s service.Service) error {
logrus.Info("Start service")
return startWork()
}
func (*letsService) Stop(s service.Service) error {
logrus.Info("Stop service")
go func() {
time.Sleep(time.Second / 10)
os.Exit(0)
}()
return nil
}
|
package osbuild1
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewChronyStage(t *testing.T) {
expectedStage := &Stage{
Name: "org.osbuild.chrony",
Options: &ChronyStageOptions{},
}
actualStage := NewChronyStage(&ChronyStageOptions{})
assert.Equal(t, expectedStage, actualStage)
}
|
package config
import (
"github.com/game-explorer/animal-chess-server/internal/pkg/config"
"github.com/game-explorer/animal-chess-server/internal/pkg/log"
"github.com/game-explorer/animal-chess-server/internal/pkg/orm"
)
var App struct {
// 业务Debug
Debug bool `yaml:"debug"`
// OrmDebug开启后会打印sql语句
OrmDebug bool `yaml:"orm_debug"`
// LogDebug开启后会使用颜色
LogDebug bool `yaml:"log_debug"`
HttpAddr string `yaml:"http_addr"`
Mysql struct {
AnimalChess string `yaml:"animal_chess"`
} `yaml:"mysql"`
}
func init() {
config.Init(&App, config.WithFileName("config/config.yml"))
log.SetDebug(App.LogDebug)
orm.SetDebug(App.OrmDebug)
}
|
package efclient
import (
"bytes"
"encoding/json"
"fmt"
"log"
"math/rand"
"net/http"
"sync"
"time"
"github.com/nikolasMelui/go_ecommerce_faker_client/internal/app/helper"
"syreclabs.com/go/faker"
"syreclabs.com/go/faker/locales"
)
// CounterpartyDocument ...
type CounterpartyDocument struct {
ID int `json:"id"`
Title string `json:"title"`
Description string `json:"description"`
URL string `json:"url"`
Counterparty Counterparty `json:"counterparty"`
}
// CounterpartyDocumentData ...
type CounterpartyDocumentData struct {
Title string `json:"title"`
Description string `json:"description"`
URL string `json:"url"`
Counterparty int `json:"counterparty"`
}
// CreateCounterpartyDocument ...
func (c *Client) CreateCounterpartyDocument(counterpartyDocumentData *CounterpartyDocumentData) (*CounterpartyDocument, error) {
if err := helper.CreateFakeFile(counterpartyDocumentData.Title, "pdf"); err != nil {
return nil, err
}
log.Println("File was created seccessfully")
requestData := map[string]interface{}{
"title": &counterpartyDocumentData.Title,
"description": &counterpartyDocumentData.Description,
"url": &counterpartyDocumentData.URL,
"counterparty": &counterpartyDocumentData.Counterparty,
}
requestBody, err := json.Marshal(requestData)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", fmt.Sprintf("%s/counterparty-documents", c.BaseURL), bytes.NewBuffer(requestBody))
if err != nil {
return nil, err
}
var res CounterpartyDocument
if err := c.SendRequest(req, &res); err != nil {
return nil, err
}
return &res, nil
}
// CreateFakeCounterpartyDocuments ...
func (c *Client) CreateFakeCounterpartyDocuments(wg *sync.WaitGroup, count int) int {
faker.Locale = locales.Ru
ch := make(chan int, count)
ch <- 0
for i := 0; i < count; i++ {
wg.Add(1)
time.Sleep(time.Millisecond * 50)
go func(wg *sync.WaitGroup) {
defer wg.Done()
rand.Seed(time.Now().UnixNano())
maxCounterpartyID := count / 2
counterpartyID := rand.Intn(maxCounterpartyID) + 1
fileExtension := "pdf"
fileName := faker.Lorem().Characters(10) + "." + fileExtension
fileDescription := faker.Lorem().Sentence(10)
fakeCounterpartyDocument := CounterpartyDocumentData{
Title: fileName,
Description: fileDescription,
URL: "https://localhost:1337/" + fileName,
Counterparty: counterpartyID,
}
log.Println(fakeCounterpartyDocument)
_, err := c.CreateCounterpartyDocument(&fakeCounterpartyDocument)
if err != nil {
log.Print(fmt.Errorf("%v", err))
// log.Fatal(err)
} else {
counter := <-ch
ch <- counter + 1
}
}(wg)
}
wg.Wait()
close(ch)
return <-ch
}
|
package catp
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document00300102 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:catp.003.001.02 Document"`
Message *ATMWithdrawalCompletionAdviceV02 `xml:"ATMWdrwlCmpltnAdvc"`
}
func (d *Document00300102) AddMessage() *ATMWithdrawalCompletionAdviceV02 {
d.Message = new(ATMWithdrawalCompletionAdviceV02)
return d.Message
}
// The ATMWithdrawalCompletionAdvice message is sent by an ATM to an acquirer or its agent to inform of the result of a withdrawal transaction at an ATM.
// If the ATM is configured to only send negative completion, a generic completion message should be used instead of ATMCompletionAdvice.
type ATMWithdrawalCompletionAdviceV02 struct {
// Information related to the protocol management on a segment of the path from the ATM to the acquirer.
Header *iso20022.Header32 `xml:"Hdr"`
// Encrypted body of the message.
ProtectedATMWithdrawalCompletionAdvice *iso20022.ContentInformationType10 `xml:"PrtctdATMWdrwlCmpltnAdvc,omitempty"`
// Information related to the completion of a withdrawal transaction on the ATM.
ATMWithdrawalCompletionAdvice *iso20022.ATMWithdrawalCompletionAdvice2 `xml:"ATMWdrwlCmpltnAdvc,omitempty"`
// Trailer of the message containing a MAC.
SecurityTrailer *iso20022.ContentInformationType15 `xml:"SctyTrlr,omitempty"`
}
func (a *ATMWithdrawalCompletionAdviceV02) AddHeader() *iso20022.Header32 {
a.Header = new(iso20022.Header32)
return a.Header
}
func (a *ATMWithdrawalCompletionAdviceV02) AddProtectedATMWithdrawalCompletionAdvice() *iso20022.ContentInformationType10 {
a.ProtectedATMWithdrawalCompletionAdvice = new(iso20022.ContentInformationType10)
return a.ProtectedATMWithdrawalCompletionAdvice
}
func (a *ATMWithdrawalCompletionAdviceV02) AddATMWithdrawalCompletionAdvice() *iso20022.ATMWithdrawalCompletionAdvice2 {
a.ATMWithdrawalCompletionAdvice = new(iso20022.ATMWithdrawalCompletionAdvice2)
return a.ATMWithdrawalCompletionAdvice
}
func (a *ATMWithdrawalCompletionAdviceV02) AddSecurityTrailer() *iso20022.ContentInformationType15 {
a.SecurityTrailer = new(iso20022.ContentInformationType15)
return a.SecurityTrailer
}
|
package config
import (
"bytes"
"flag"
"io/ioutil"
"os"
"sub_account_service/app_server_v2/lib"
"github.com/BurntSushi/toml"
"github.com/golang/glog"
)
func init() {
ConfInst()
}
// Config 配置类型
type Config struct {
Operate_timeout int // 超时时间设置
LocalAddress string // 本机地址
LocalPort string // 端口
ApiAddress string
ApiPort string
Mysql string
KeyStore string
Phrase string
CheckLimit int
UpdateDate int
OrderAddress string
AppId string
DumpNum int
OrderNum int
GetBookTime int
ResetQuoTime int
}
//Optional 默认配置
var Optional *Config
//ConfInst 配置单例
func ConfInst() *Config {
if Optional == nil {
var path string
flag.StringVar(&path, "config", "./conf/config.toml", "config path")
flag.Parse()
ParseToml(path) // 初始化配置
return Optional
}
return Optional
}
// Opts 获取配置 废弃的方法推荐使用ConfInstance
func Opts() *Config {
return ConfInst()
}
// ParseToml 解析配置文件
func ParseToml(file string) error {
InitToml(file)
//GetFromConfig()
return nil
}
//InitToml 读配置文件
func InitToml(file string) error {
glog.Infoln(lib.Log("initing", "", "finding config ..."))
// 如果配置文件不存在
if _, err := os.Stat(file); os.IsNotExist(err) {
buf := new(bytes.Buffer)
if err := toml.NewEncoder(buf).Encode(Opts()); err != nil {
return err
}
glog.Infoln("没有找到配置文件,创建新文件 ...")
return ioutil.WriteFile(file, buf.Bytes(), 0644)
}
var conf Config
_, err := toml.DecodeFile(file, &conf)
if err != nil {
return err
}
Optional = &conf
glog.Infoln(lib.Log("initing", "", "config.Opts()"), Optional)
return nil
}
|
package main
import (
"math/rand"
"strconv"
"testing"
"time"
"github.com/VanBur/tcp-chat/internal/message"
"github.com/VanBur/tcp-chat/internal/server"
"github.com/VanBur/tcp-chat/internal/user"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSendReceiveMessageToSelectedUser(t *testing.T) {
const (
network = "tcp"
address = ":8008"
senderName = "tim"
receiverName = "joe"
messageText = "Hello!"
)
s, err := server.New(network, address)
require.NoError(t, err)
go s.Serve()
defer s.Stop()
send, err := user.New(network, address)
require.NoError(t, err)
recv, err := user.New(network, address)
require.NoError(t, err)
sendRegMsg := message.Message{CommandType: message.Connect, User: senderName}
err = send.Send(sendRegMsg)
require.NoError(t, err)
time.Sleep(time.Millisecond * 100)
recvRegMsg := message.Message{CommandType: message.Connect, User: receiverName}
err = recv.Send(recvRegMsg)
require.NoError(t, err)
time.Sleep(time.Millisecond * 100)
broadcastMsg := message.Message{
CommandType: message.Broadcast,
User: senderName,
Msg: &message.ChatMessage{To: receiverName, Text: messageText},
}
err = send.Send(broadcastMsg)
require.NoError(t, err)
time.Sleep(time.Millisecond * 100)
msg, err := recv.Read()
require.NoError(t, err)
assert.Equal(t, broadcastMsg, *msg)
}
func TestSendReceiveMessageToAllUsers(t *testing.T) {
const (
network = "tcp"
address = ":8008"
senderName = "tim"
receiversUsersCount = 10
messageText = "Hello!"
)
s, err := server.New(network, address)
require.NoError(t, err)
go s.Serve()
defer s.Stop()
sender, err := user.New(network, address)
require.NoError(t, err)
sendRegMsg := message.Message{CommandType: message.Connect, User: senderName}
err = sender.Send(sendRegMsg)
require.NoError(t, err)
time.Sleep(time.Millisecond * 100)
receivers := make([]*user.User, 0, receiversUsersCount)
for i := 0; i < receiversUsersCount; i++ {
recv, err := user.New(network, address)
require.NoError(t, err)
receivers = append(receivers, recv)
recvName := rand.Int()
recvRegMsg := message.Message{CommandType: message.Connect, User: strconv.Itoa(recvName)}
err = recv.Send(recvRegMsg)
require.NoError(t, err)
time.Sleep(time.Millisecond * 100)
}
broadcastMsg := message.Message{
CommandType: message.Broadcast,
User: senderName,
Msg: &message.ChatMessage{Text: messageText},
}
err = sender.Send(broadcastMsg)
require.NoError(t, err)
time.Sleep(time.Millisecond * 100)
for _, currReceiver := range receivers {
msg, err := currReceiver.Read()
require.NoError(t, err)
assert.Equal(t, broadcastMsg, *msg)
}
}
func TestSendReceiveMessageToDisconnectedUser(t *testing.T) {
const (
network = "tcp"
address = ":8008"
senderName = "tim"
receiverName = "joe"
messageText = "Hello!"
)
s, err := server.New(network, address)
require.NoError(t, err)
go s.Serve()
defer s.Stop()
send, err := user.New(network, address)
require.NoError(t, err)
recv, err := user.New(network, address)
require.NoError(t, err)
sendRegMsg := message.Message{CommandType: message.Connect, User: senderName}
err = send.Send(sendRegMsg)
require.NoError(t, err)
time.Sleep(time.Millisecond * 100)
recvRegMsg := message.Message{CommandType: message.Connect, User: receiverName}
err = recv.Send(recvRegMsg)
require.NoError(t, err)
time.Sleep(time.Millisecond * 100)
recvLeaveMsg := message.Message{CommandType: message.Disconnect, User: receiverName}
err = recv.Send(recvLeaveMsg)
require.NoError(t, err)
time.Sleep(time.Millisecond * 100)
broadcastMsg := message.Message{
CommandType: message.Broadcast,
User: senderName,
Msg: &message.ChatMessage{To: receiverName, Text: messageText},
}
err = send.Send(broadcastMsg)
require.NoError(t, err)
}
|
package pg
import (
// "database/sql"
"fmt"
. "grm-searcher/types"
. "grm-service/dbcentral/pg"
// "strconv"
"strings"
)
type MetaDB struct {
MetaCentralDB
}
func (db MetaDB) GetTotalCountWhere(tableName, where string) int64 {
var total int64 = 0
sql := fmt.Sprintf("select count(*) from %s where %s;", tableName, where)
rows, err := db.Conn.Query(sql)
if err != nil {
return total
}
defer rows.Close()
for rows.Next() {
rows.Scan(&total)
return total
}
return total
}
func (db MetaDB) GetDevice(dataid string) (string, error) {
//不存在,则直接获取
sql := fmt.Sprintf(`select device from data_object where uuid = '%s'`, dataid)
rows, err := db.Conn.Query(sql)
if err != nil {
return "", err
}
defer rows.Close()
var device string
if rows.Next() {
err = rows.Scan(&device)
if err != nil {
return "", err
}
}
return device, nil
}
func (db MetaDB) SearchByGeo(dataids []string, r SearchInfo) ([]*MetaInfo, int64, error) {
infos := make([]*MetaInfo, 0)
where := fmt.Sprintf(" uuid in (%s) and ST_Intersects(envelope,ST_GeometryFromText('%s', 4326)) ",
strings.Join(dataids, ","), r.Geometry)
total := db.GetTotalCountWhere("data_object", where)
sql := fmt.Sprintf("select name,data_type,sub_type,path,file_size,uuid,ST_AsGeoJson(envelope)"+
" from data_object where %s", where)
if len(r.Limit) > 0 && len(r.Offset) > 0 &&
len(r.Sort) > 0 && len(r.Order) > 0 {
sql += " order by " + r.Sort + " " + r.Order +
" limit " + r.Limit + " offset " + r.Offset
}
var name, data_type, path, obj_id, envelope, sub_type string
var file_size float64
rows, _ := db.Conn.Query(sql)
defer rows.Close()
for rows.Next() {
err := rows.Scan(&name, &data_type, &sub_type, &path, &file_size, &obj_id, &envelope)
if err != nil {
continue
}
infos = append(infos, &MetaInfo{
Name: name,
DataType: data_type,
SubType: sub_type,
Path: path,
FileSize: file_size,
EnvelopeGeoJson: envelope,
UUID: obj_id,
})
}
return infos, total, nil
}
func (db MetaDB) DataFilter(_dataids []string, geo string) ([]*MetaInfo, int64, error) {
infos := make([]*MetaInfo, 0)
where := fmt.Sprintf(" uuid in (%s) and ST_Intersects(envelope,ST_GeometryFromText('%s', 4326)) ",
strings.Join(_dataids, ","), geo)
total := db.GetTotalCountWhere("data_object", where)
sql := fmt.Sprintf("select name,data_type,sub_type,path,file_size,uuid,ST_AsText(envelope)"+
" from data_object where %s", where)
var name, data_type, path, obj_id, envelope, sub_type string
var file_size float64
rowsM, _ := db.Conn.Query(sql)
defer rowsM.Close()
for rowsM.Next() {
err := rowsM.Scan(&name, &data_type, &sub_type, &path, &file_size, &obj_id, &envelope)
if err != nil {
continue
}
infos = append(infos, &MetaInfo{
Name: name,
DataType: data_type,
SubType: sub_type,
Path: path,
FileSize: file_size,
EnvelopeGeoJson: envelope,
UUID: obj_id,
})
}
return infos, total, nil
}
|
package cli
import (
"context"
"io/ioutil"
"os"
"os/signal"
"syscall"
"github.com/bonedaddy/go-defi/bclient"
"github.com/bonedaddy/go-defi/config"
"github.com/bonedaddy/go-defi/txmatch"
"github.com/urfave/cli/v2"
)
func txMatchCommand() *cli.Command {
return &cli.Command{
Name: "txmatch",
Aliases: []string{"txm"},
Usage: "allows filtering for transactions that match predefined conditions",
Action: func(c *cli.Context) error {
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM, os.Interrupt)
ctx, cancel := context.WithCancel(c.Context)
defer cancel()
cfg, err := config.LoadConfig(c.String("config.path"))
if err != nil {
return err
}
client, err := cfg.EthClient(ctx)
if err != nil {
return err
}
abiBytes, err := ioutil.ReadFile(c.String("abi.file"))
if err != nil {
return err
}
bc, err := bclient.NewClient(ctx, client)
if err != nil {
return err
}
logger, err := cfg.ZapLogger()
if err != nil {
return err
}
matcher, err := txmatch.NewMatcher(
logger,
bc,
string(abiBytes),
c.StringSlice("methods"),
[]string{c.String("contract.address")},
)
if err != nil {
return err
}
go func() {
<-ch
cancel()
}()
return matcher.Match(c.String("out.file"), c.Uint64("start.block"), c.Uint64("end.block"))
},
Flags: []cli.Flag{
&cli.Uint64Flag{
Name: "start.block",
Value: 10950651,
Usage: "start of the block range to query",
},
&cli.Uint64Flag{
Name: "end.block",
Value: 12089509,
Usage: "end of the block range to query",
},
&cli.StringFlag{
Name: "contract.address",
Value: "",
Usage: "contract address to filter transactions against",
},
&cli.StringSliceFlag{
Name: "methods",
Usage: "method to filter transactions against",
},
&cli.StringFlag{
Name: "abi.file",
Usage: "file containing the abi definition",
Value: "abifile.txt",
},
&cli.StringFlag{
Name: "out.file",
Usage: "file to store matched addresses in",
Value: "outfile.txt",
},
},
}
}
|
/* https://www.careercup.com/question?id=5095457003929600 */
package fbinterview
import (
"fmt"
"strings"
)
var ALPH_NUM = 26
// 1..26
// kw -- keyboard width
// x = 1..26
// y = 0..
func PrintSentence(text string, kw int) string {
cx, cy := 0, 0
x, y := 0, 0
moves := []string{}
for _, r := range text {
num := int(r - 'A' + 1)
x = num % kw
y = num / kw
move := ""
dy := cy - y
if dy < 0 {
move += fmt.Sprintf("D%d", -dy)
} else if dy > 0 {
move += fmt.Sprintf("U%d", dy)
}
if move != "" {
moves = append(moves, move)
}
move = ""
dx := cx - x
if dx < 0 {
move = fmt.Sprintf("R%d", -dx)
} else if dx > 0 {
move = fmt.Sprintf("L%d", dx)
}
if move != "" {
moves = append(moves, move)
}
moves = append(moves, "T")
cx = x
cy = y
}
result := strings.Join(moves, ", ")
fmt.Println("Result: ", result)
return result
}
|
package main
import (
"bytes"
"io/ioutil"
"os"
"testing"
)
type MockReadKeyFile struct{ Str string }
type ErrorMockReadKeyFile string
func (e ErrorMockReadKeyFile) Error() string { return string(e) }
func (k MockReadKeyFile) ReadFile(path string) ([]byte, error) {
if path == "ok" {
buf := bytes.NewBufferString(k.Str)
return ioutil.ReadAll(buf)
} else {
return nil, ErrorMockReadKeyFile(path)
}
}
func TestParams(t *testing.T) {
mock := MockReadKeyFile{"foo"}
readKeyFile = mock.ReadFile
errorCases := []struct {
Name, Ips, Key, User, Command, Expected string
}{
{
"missing user param",
"-H=8.8.8.8",
"-k=ok",
"",
"-c=hostname",
"Argument error: user",
},
{
"missing key param",
"-H=8.8.8.8",
"-k=err",
"-u=user",
"-c=hostname",
"Argument error: key path",
},
{
"missing command param",
"-H=8.8.8.8",
"-k=ok",
"-u=user",
"",
"Argument error: command",
},
}
for _, tc := range errorCases {
t.Run(tc.Name, func(t *testing.T) {
os.Args = []string{
"cmd", tc.Ips, tc.Key, tc.User, tc.Command,
}
params := Params{}
_, err := params.Parse()
if err.Error() != tc.Expected {
t.Fatal("expected:", tc.Expected, "got:", err)
}
})
}
t.Run("all arguments valid", func(t *testing.T) {
expected := Params{
Ips: []string{"8.8.8.8"},
Key: []byte("foo"),
User: "user",
Command: "hostname",
Timeout: 30,
showIp: true,
}
os.Args = []string{
"cmd",
"-H=8.8.8.8",
"-k=ok",
"-u=user",
"-c=hostname",
}
params := Params{}
params.Parse()
if params.Ips[0] != expected.Ips[0] {
t.Fatal("expected:", expected.Ips, "got:", params.Ips)
}
if string(params.Key) != string(expected.Key) {
t.Fatal("expected:", expected.Key, "got:", params.Key)
}
if params.User != expected.User {
t.Fatal("expected:", expected.User, "got:", params.User)
}
if params.Command != expected.Command {
t.Fatal("expected:", expected.Command,
"got:", params.Command)
}
if params.Timeout != expected.Timeout {
t.Fatal("expected:", expected.Timeout,
"got:", params.Timeout)
}
if params.showIp != expected.showIp {
t.Fatal("expected:", expected.Timeout,
"got:", params.Timeout)
}
})
}
|
//package load helps the mars load redcode warriors into the core based on the
// configuration. this package must:
// 1) lex all supplied warriors
// 2) parse and simplify all lables and expressions
// 3) read config and inspect existing core
// 4) return byte arrays and locations that they should be placed in the core
//
package load
|
package pkg
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestLinks(t *testing.T) {
t.Run("Remove Ignored Links", func(t *testing.T) {
externalLinksToIgnore, internalLinksToIgnore := []string{"github.com"}, []string{"../external_links.md"}
links := Links{
Link{
AbsPath: "https://twitter.com",
TypeOf: ExternalLink,
},
Link{
AbsPath: "https://github.com",
TypeOf: ExternalLink,
},
Link{
AbsPath: "http://dont.exist.link.com",
TypeOf: ExternalLink,
},
Link{
AbsPath: "test-markdowns/external_links.md",
RelPath: "../external_links.md",
TypeOf: InternalLink,
},
Link{
AbsPath: "test-markdowns/sub_path/sub_sub_path/without_links.md",
RelPath: "sub_sub_path/without_links.md",
TypeOf: InternalLink,
},
Link{
AbsPath: "test-markdowns/sub_path/absolute_path.md",
RelPath: "absolute_path.md",
TypeOf: InternalLink,
},
Link{
AbsPath: "test-markdowns/sub_path/invalid.md",
RelPath: "invalid.md",
TypeOf: InternalLink,
},
}
expected := Links{
Link{
AbsPath: "https://twitter.com",
TypeOf: ExternalLink,
},
Link{
AbsPath: "http://dont.exist.link.com",
TypeOf: ExternalLink,
},
Link{
AbsPath: "test-markdowns/sub_path/sub_sub_path/without_links.md",
RelPath: "sub_sub_path/without_links.md",
TypeOf: InternalLink,
},
Link{
AbsPath: "test-markdowns/sub_path/absolute_path.md",
RelPath: "absolute_path.md",
TypeOf: InternalLink,
},
Link{
AbsPath: "test-markdowns/sub_path/invalid.md",
RelPath: "invalid.md",
TypeOf: InternalLink,
},
}
result := links.RemoveIgnoredLinks(externalLinksToIgnore, internalLinksToIgnore)
assert.Equal(t, expected, result)
})
t.Run("Filter", func(t *testing.T) {
// given
links := Links{
Link{
AbsPath: "testPath1",
TypeOf: InternalLink,
},
Link{
AbsPath: "testPath2",
TypeOf: HashInternalLink,
},
Link{
AbsPath: "testPath3",
TypeOf: ExternalLink,
},
}
expected := Links{
Link{
AbsPath: "testPath3",
TypeOf: ExternalLink,
},
}
// when
result := links.Filter(func(link Link) bool {
return link.TypeOf == ExternalLink
})
// then
assert.ElementsMatch(t, expected, result)
})
}
|
package streamdal
import (
"bytes"
"testing"
. "github.com/onsi/gomega"
)
func TestAuthenticate(t *testing.T) {
g := NewGomegaWithT(t)
apiResponse := `{
"id": "8d8af58b-7d3d-474f-82ff-8b228245d159",
"name": "Test User",
"email": "test@streamdal.com",
"onboarding_state": "",
"onboarding_state_status": "",
"team": {
"id": "dce9c35e-1762-4233-97b8-e3f1830faf57",
"name": "Testing-1"
}
}`
b := StreamdalWithMockResponse(200, apiResponse)
output, err := b.Authenticate("test@streamdal.com", "password123")
g.Expect(err).ToNot(HaveOccurred())
g.Expect(output).To(BeAssignableToTypeOf(&AuthResponse{}))
g.Expect(output.AccountID).To(Equal("8d8af58b-7d3d-474f-82ff-8b228245d159"))
g.Expect(output.Name).To(Equal("Test User"))
g.Expect(output.Email).To(Equal("test@streamdal.com"))
g.Expect(output.Team.ID).To(Equal("dce9c35e-1762-4233-97b8-e3f1830faf57"))
g.Expect(output.Team.Name).To(Equal("Testing-1"))
}
func TestReadUsername(t *testing.T) {
g := NewGomegaWithT(t)
var stdin bytes.Buffer
stdin.Write([]byte("test@streamdal.com\n"))
username, err := readUsername(&stdin)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(username).To(Equal("test@streamdal.com"))
}
func TestReadPassword(t *testing.T) {
g := NewGomegaWithT(t)
var stdin bytes.Buffer
stdin.Write([]byte("test@streamdal.com\n"))
var testfunc = func(fd int) ([]byte, error) {
return []byte("solarwinds123"), nil
}
username, err := readPassword(testfunc)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(username).To(Equal("solarwinds123"))
}
|
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
package shutdown
import (
"net/http"
)
// WrapHandler will return an http Handler
// That will lock shutdown until all have completed
// and will return http.StatusServiceUnavailable if
// shutdown has been initiated.
func WrapHandler(h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
l := Lock()
if l == nil {
w.WriteHeader(http.StatusServiceUnavailable)
return
}
// We defer, so panics will not keep a lock
defer l()
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
// WrapHandlerFunc will return an http.HandlerFunc
// that will lock shutdown until all have completed.
// The handler will return http.StatusServiceUnavailable if
// shutdown has been initiated.
func WrapHandlerFunc(h http.HandlerFunc) http.HandlerFunc {
fn := func(w http.ResponseWriter, r *http.Request) {
l := Lock()
if l == nil {
w.WriteHeader(http.StatusServiceUnavailable)
return
}
// We defer, so panics will not keep a lock
defer l()
h(w, r)
}
return http.HandlerFunc(fn)
}
|
package scache
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestLruSetAndGet(t *testing.T) {
cache := newShardRU(nil, newCounter(1000), newTimer(), &Config{
TTL: 1 * time.Second,
}, nil)
const Key = "test"
cache.Set(Key, "TEST DATA1")
cache.Set(Key, "TEST DATA2")
val, err := cache.Get(Key)
require.NoError(t, err)
require.Equal(t, "TEST DATA2", val)
}
func TestLruOldestList(t *testing.T) {
getKeys := func(src *listWithOldEntriesLRU) (keys []interface{}) {
keys = make([]interface{}, 0)
for {
if k, ok := src.Next(); ok {
keys = append(keys, k)
} else {
break
}
}
return
}
getEpochKeys := func(src *epoch) (keys []interface{}) {
for _, item := range src.items {
if item.Cost != nil {
keys = append(keys, item.Key)
}
}
return
}
type InData struct {
Key string
Cost, Timer uint32
}
for i, testInfo := range []struct {
Limit int
Res []interface{}
InData []InData
}{
{
// one epoch. old is first
Limit: 1,
Res: []interface{}{"1"},
InData: []InData{
{Key: "1", Cost: 1, Timer: 3},
{Key: "2", Cost: 2, Timer: 3},
{Key: "3", Cost: 3, Timer: 3},
},
},
{
// one epoch. old is in the middle
Limit: 1,
Res: []interface{}{"5"},
InData: []InData{
{Key: "4", Cost: 2, Timer: 3},
{Key: "5", Cost: 1, Timer: 3},
{Key: "6", Cost: 3, Timer: 3},
},
},
{
// one epoch. old is last
Limit: 1,
Res: []interface{}{"9"},
InData: []InData{
{Key: "7", Cost: 2, Timer: 3},
{Key: "8", Cost: 3, Timer: 3},
{Key: "9", Cost: 1, Timer: 3},
},
},
{
// two epoch. old is first
Limit: 1,
Res: []interface{}{"10", "11"},
InData: []InData{
{Key: "10", Cost: 10, Timer: 3},
{Key: "11", Cost: 1, Timer: 3},
{Key: "12", Cost: 3, Timer: 3},
},
},
{
// two epoch. old is in the middle
Limit: 1,
Res: []interface{}{"14", "13"},
InData: []InData{
{Key: "13", Cost: 1, Timer: 3},
{Key: "14", Cost: 10, Timer: 3},
{Key: "15", Cost: 3, Timer: 3},
},
},
{
// two epoch. old is last
Limit: 1,
Res: []interface{}{"18", "16"},
InData: []InData{
{Key: "16", Cost: 1, Timer: 3},
{Key: "17", Cost: 3, Timer: 3},
{Key: "18", Cost: 10, Timer: 3},
},
},
{
// two epoch. old is in the middle
Limit: 1,
Res: []interface{}{"20", "19"},
InData: []InData{
{Key: "19", Cost: 1, Timer: 3},
{Key: "20", Cost: 10, Timer: 3},
{Key: "21", Cost: 11, Timer: 3},
},
},
{
// two epoch. old is in the middle
Limit: 3,
Res: []interface{}{"23", "24", "26", "22", "25"},
InData: []InData{
{Key: "22", Cost: 1, Timer: 3},
{Key: "23", Cost: 10, Timer: 3},
{Key: "24", Cost: 11, Timer: 3},
{Key: "25", Cost: 2, Timer: 3},
{Key: "26", Cost: 12, Timer: 3},
},
},
} {
l := newListWithOldEntriesLRU(testInfo.Limit)
for _, item := range testInfo.InData {
cost := item.Cost
pCost := &cost
l.Add(item.Key, pCost, item.Timer)
}
require.Equalf(t,
testInfo.Res, getKeys(l),
"#%d: before max cost: %q; after max cost: %q",
i,
getEpochKeys(l.beforeMaxCost),
getEpochKeys(l.afterMaxCost))
l.Clear()
}
}
func TestLruTTL(t *testing.T) {
chClean := make(chan struct{}, 10)
cache := newShardRU(chClean, newCounter(2), newTimer(), &Config{
TTL: 1 * time.Second,
}, nil)
cache.Set("test1", "test1")
cache.Set("test2", "test2")
for _, k := range []string{"test1", "test2"} {
v, err := cache.Get(k)
require.NoError(t, err)
require.Equal(t, k, v)
}
cache.Set("test3", "test3")
{
v, err := cache.Get("test3")
require.NoError(t, err)
require.Equal(t, "test3", v)
}
time.Sleep(cache.ttl + time.Millisecond/2)
for _, k := range []string{"test1", "test2"} {
v, err := cache.Get(k)
require.Equal(t, ErrNotFound, err)
require.Nil(t, v)
}
_, ok := <-chClean
require.True(t, ok) // remove test1 before add test3
select {
case _, ok = <-chClean:
require.False(t, ok)
t.Fatal("not empty")
default:
// ok
}
}
func TestLruGetUnsetted(t *testing.T) {
cache := newShardRU(nil, newCounter(1000), newTimer(), &Config{
TTL: 1 * time.Second,
ItemsToPrune: 1,
}, nil)
v, err := cache.Get("test")
require.Equal(t, ErrNotFound, err)
require.Nil(t, v)
}
func TestLruDel(t *testing.T) {
cache := newShardRU(nil, newCounter(2), newTimer(), &Config{
TTL: 1 * time.Second,
ItemsToPrune: 1,
}, nil)
cache.Set("key", "DATA")
cache.Del("key")
v, err := cache.Get("test")
require.Equal(t, ErrNotFound, err)
require.Nil(t, v)
}
|
package main
import (
"fmt"
"sync"
)
var x=0
var wg sync.WaitGroup
var lock sync.Mutex //互斥锁
func add(){
for i:=0;i<5000;i++{
lock.Lock()
x=x+1
lock.Unlock()
}
wg.Done()
}
func main(){
wg.Add(2)
go add()
go add()
wg.Wait()
fmt.Println(x)
}
|
package main
import (
"eventgo/repository"
"fmt"
"github.com/gin-gonic/gin"
"log"
"net/http"
"os"
"os/signal"
"strconv"
"syscall"
)
var version Version
var GitHash string
var BuildTime string
var GoVer string
type Version struct {
GitCommit string
ApiVersion string
GoVersion string
BuildDate string
}
func main() {
version = Version{GitCommit: GitHash, ApiVersion: "1.0.0", BuildDate: BuildTime, GoVersion: GoVer}
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
go func() {
sig := <-sigs
fmt.Println(sig)
repository.Cleanup()
fmt.Println("Exiting")
os.Exit(0)
}()
repository.InitDB()
handleRequests()
}
func handleRequests() {
r := gin.Default()
//r.Use(commonMiddleware)
r.GET("/", homePage)
r.GET("/events", allEvents)
r.POST("/events", createEvent)
r.GET("/events/active", eventByActive)
r.GET("/events/:id", eventById)
r.PUT("/events/:id", updateEvent)
r.PATCH("/events/:id", updateEvent)
log.Fatal(r.Run(":8080"))
}
func homePage(c *gin.Context) {
fmt.Println("Endpoint Hit: homePage")
c.JSON(http.StatusOK, &version)
}
func allEvents(c *gin.Context) {
events := repository.Event{}.FindAll()
c.JSON(http.StatusOK, &events)
}
func eventByActive(c *gin.Context) {
var event = repository.Event{}.FindActive()
c.JSON(http.StatusOK, &event)
}
func eventById(c *gin.Context) {
key := c.Param("id")
id, _ := strconv.Atoi(key)
event := repository.Event{}.FindById(uint(id))
c.JSON(http.StatusOK, &event)
}
func createEvent(c *gin.Context) {
var event repository.Event
if c.ShouldBind(&event) == nil {
repository.Event{}.Create(&event)
}
c.JSON(http.StatusOK, &event)
}
func updateEvent(c *gin.Context) {
var event map[string]interface{}
c.BindJSON(&event)
uId, _ := strconv.Atoi(c.Param("id"))
nEvent, _ := repository.Event{}.PatchUpdate(uint(uId), event)
c.JSON(http.StatusOK, &nEvent)
}
|
package string_arrage
func checkInclusion(s1 string, s2 string) bool {
if len(s1) > len(s2) {
return false
}
count := map[byte]int{}
toMatch := 0
for i := 0; i < len(s1); i++ {
if count[s1[i]] == 0 {
toMatch += 1
}
count[s1[i]] += 1
}
window := map[byte]int{}
left, right, matched := 0, 0, 0
for right < len(s2) {
char := s2[right]
if count[char] > 0 {
window[char]++
if window[char] == count[char] {
matched++
}
}
right++
for matched == toMatch && left < right {
if right-left == len(s1) {
return true
}
char2 := s2[left]
if count[char2] > 0 {
if window[char2] == count[char2] {
matched--
}
window[char2]--
}
left++
}
}
return false
}
|
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"embed"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/gdamore/tcell/v2"
"gopkg.in/yaml.v3"
)
// Color is a color string.
type Color string
// ThemeConfig is the theme config.
type ThemeConfig struct {
Info struct {
Title Color
Text Color
}
Menu struct {
Description Color
Key Color
}
Logo struct {
Text Color
}
Crumbs struct {
Foreground Color
Background Color
}
Border struct {
App Color
Table Color
}
Table struct {
Title Color
Header Color
Body Color
CursorBg Color
CursorFg Color
}
Status struct {
Starting Color
Healthy Color
UnHealthy Color
Waiting Color
Succeeded Color
Failed Color
Unknown Color
}
Yaml struct {
Key Color
Colon Color
Value Color
}
Topology struct {
Line Color
App Color
Workflow Color
Component Color
Policy Color
Trait Color
Kind Color
}
}
var (
// ThemeConfigFS is the theme config file system.
//go:embed theme/*
ThemeConfigFS embed.FS
// ThemeMap is the theme map.
ThemeMap = make(map[string]ThemeConfig)
// ThemeNameArray is the theme name array.
ThemeNameArray []string
// homePath is the home path.
homePath string
// diyThemeDirPath is the diy theme dir path like ~/.vela/theme/themes
diyThemeDirPath string
// themeConfigFilePath is the theme config file path like ~/.vela/theme/_config.yaml
themeConfigFilePath string
)
const (
// DefaultColor represents a default color.
DefaultColor Color = "default"
// DefaultTheme represents a default theme.
DefaultTheme = "default"
embedThemePath = "theme"
themeHomeDirPath = ".vela/theme"
diyThemeDir = "themes"
themeConfigFile = "_config.yaml"
)
func init() {
homePath, _ = os.UserHomeDir()
diyThemeDirPath = filepath.Join(homePath, themeHomeDirPath, diyThemeDir)
themeConfigFilePath = filepath.Join(homePath, themeHomeDirPath, themeConfigFile)
dir, err := ThemeConfigFS.ReadDir(embedThemePath)
if err != nil {
return
}
// embed theme config
for _, item := range dir {
content, err := ThemeConfigFS.ReadFile(filepath.Join(embedThemePath, item.Name()))
if err != nil {
continue
}
var t ThemeConfig
err = yaml.Unmarshal(content, &t)
if err != nil {
continue
}
themeName := strings.Split(item.Name(), ".")[0]
ThemeMap[themeName] = t
ThemeNameArray = append(ThemeNameArray, themeName)
}
// load diy theme config
dir, err = os.ReadDir(diyThemeDirPath)
if err != nil {
return
}
for _, item := range dir {
content, err := os.ReadFile(filepath.Clean(filepath.Join(diyThemeDirPath, item.Name())))
if err != nil {
continue
}
var t ThemeConfig
err = yaml.Unmarshal(content, &t)
if err != nil {
continue
}
themeName := strings.Split(item.Name(), ".")[0]
ThemeMap[themeName] = t
ThemeNameArray = append(ThemeNameArray, themeName)
}
}
// LoadThemeConfig loads theme config from env or use the default setting
func LoadThemeConfig() *ThemeConfig {
themeConfigName := struct {
Name string `yaml:"name"`
}{}
// returns default theme if config file not exist
if !makeThemeConfigFileIfNotExist() {
return defaultTheme()
}
content, err := os.ReadFile(filepath.Clean(themeConfigFilePath))
if err != nil {
return defaultTheme()
}
err = yaml.Unmarshal(content, &themeConfigName)
if err != nil {
return defaultTheme()
}
if themeConfigName.Name == DefaultTheme {
return defaultTheme()
}
if config, ok := ThemeMap[themeConfigName.Name]; ok {
return &config
}
return defaultTheme()
}
func defaultTheme() *ThemeConfig {
return &ThemeConfig{
Info: struct {
Title Color
Text Color
}{
Title: "royalblue",
Text: "lightgray",
},
Menu: struct {
Description Color
Key Color
}{
Description: "gray",
Key: "royalblue",
},
Logo: struct {
Text Color
}{
Text: "royalblue",
},
Crumbs: struct {
Foreground Color
Background Color
}{
Foreground: "white",
Background: "royalblue",
},
Border: struct {
App Color
Table Color
}{
App: "black",
Table: "lightgray",
},
Table: struct {
Title Color
Header Color
Body Color
CursorBg Color
CursorFg Color
}{
Title: "royalblue",
Header: "white",
Body: "blue",
CursorBg: "blue",
CursorFg: "black",
},
Yaml: struct {
Key Color
Colon Color
Value Color
}{
Key: "#d33582",
Colon: "lightgray",
Value: "#839495",
},
Status: struct {
Starting Color
Healthy Color
UnHealthy Color
Waiting Color
Succeeded Color
Failed Color
Unknown Color
}{
Starting: "blue",
Healthy: "green",
UnHealthy: "red",
Waiting: "yellow",
Succeeded: "orange",
Failed: "purple",
Unknown: "gray",
},
Topology: struct {
Line Color
App Color
Workflow Color
Component Color
Policy Color
Trait Color
Kind Color
}{
Line: "cadetblue",
App: "red",
Workflow: "orange",
Component: "green",
Policy: "yellow",
Trait: "lightseagreen",
Kind: "orange",
},
}
}
// PersistentThemeConfig saves theme config to file
func PersistentThemeConfig(themeName string) {
makeThemeConfigFileIfNotExist()
_ = os.WriteFile(themeConfigFilePath, []byte("name : "+themeName), 0600)
}
// makeThemeConfigFileIfNotExist makes theme config file and write default content if not exist
func makeThemeConfigFileIfNotExist() bool {
velaThemeHome := filepath.Clean(filepath.Join(homePath, themeHomeDirPath))
if _, err := os.Open(filepath.Clean(themeConfigFilePath)); err != nil {
if os.IsNotExist(err) {
// make file if not exist
_ = os.MkdirAll(filepath.Clean(velaThemeHome), 0700)
_ = os.WriteFile(filepath.Clean(themeConfigFilePath), []byte("name : "+DefaultTheme), 0600)
}
return false
}
return true
}
// String returns color as string.
func (c Color) String() string {
if c.isHex() {
return string(c)
}
if c == DefaultColor {
return "-"
}
col := c.Color().TrueColor().Hex()
if col < 0 {
return "-"
}
return fmt.Sprintf("#%06x", col)
}
func (c Color) isHex() bool {
return len(c) == 7 && c[0] == '#'
}
// Color returns a view color.
func (c Color) Color() tcell.Color {
if c == DefaultColor {
return tcell.ColorDefault
}
return tcell.GetColor(string(c)).TrueColor()
}
|
package mathx
func MaxInt(a int, b int) int {
if a >= b {
return a
}
return b
}
func MinInt(a int, b int) int {
if a <= b {
return a
}
return b
}
func MaxInt64(a int64, b int64) int64 {
if a >= b {
return a
}
return b
}
func MaxFloat64(a float64, b float64) float64 {
if a >= b {
return a
}
return b
}
|
package commands
import (
"context"
"strings"
"github.com/docker/docker/api/types/image"
"github.com/samber/lo"
dockerTypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/fatih/color"
"github.com/jesseduffield/lazydocker/pkg/utils"
"github.com/sirupsen/logrus"
)
// Image : A docker Image
type Image struct {
Name string
Tag string
ID string
Image dockerTypes.ImageSummary
Client *client.Client
OSCommand *OSCommand
Log *logrus.Entry
DockerCommand LimitedDockerCommand
}
// Remove removes the image
func (i *Image) Remove(options dockerTypes.ImageRemoveOptions) error {
if _, err := i.Client.ImageRemove(context.Background(), i.ID, options); err != nil {
return err
}
return nil
}
func getHistoryResponseItemDisplayStrings(layer image.HistoryResponseItem) []string {
tag := ""
if len(layer.Tags) > 0 {
tag = layer.Tags[0]
}
id := strings.TrimPrefix(layer.ID, "sha256:")
if len(id) > 10 {
id = id[0:10]
}
idColor := color.FgWhite
if id == "<missing>" {
idColor = color.FgBlue
}
dockerFileCommandPrefix := "/bin/sh -c #(nop) "
createdBy := layer.CreatedBy
if strings.Contains(layer.CreatedBy, dockerFileCommandPrefix) {
createdBy = strings.Trim(strings.TrimPrefix(layer.CreatedBy, dockerFileCommandPrefix), " ")
split := strings.Split(createdBy, " ")
createdBy = utils.ColoredString(split[0], color.FgYellow) + " " + strings.Join(split[1:], " ")
}
createdBy = strings.Replace(createdBy, "\t", " ", -1)
size := utils.FormatBinaryBytes(int(layer.Size))
sizeColor := color.FgWhite
if size == "0B" {
sizeColor = color.FgBlue
}
return []string{
utils.ColoredString(id, idColor),
utils.ColoredString(tag, color.FgGreen),
utils.ColoredString(size, sizeColor),
createdBy,
}
}
// RenderHistory renders the history of the image
func (i *Image) RenderHistory() (string, error) {
history, err := i.Client.ImageHistory(context.Background(), i.ID)
if err != nil {
return "", err
}
tableBody := lo.Map(history, func(layer image.HistoryResponseItem, _ int) []string {
return getHistoryResponseItemDisplayStrings(layer)
})
headers := [][]string{{"ID", "TAG", "SIZE", "COMMAND"}}
table := append(headers, tableBody...)
return utils.RenderTable(table)
}
// RefreshImages returns a slice of docker images
func (c *DockerCommand) RefreshImages() ([]*Image, error) {
images, err := c.Client.ImageList(context.Background(), dockerTypes.ImageListOptions{})
if err != nil {
return nil, err
}
ownImages := make([]*Image, len(images))
for i, image := range images {
firstTag := ""
tags := image.RepoTags
if len(tags) > 0 {
firstTag = tags[0]
}
nameParts := strings.Split(firstTag, ":")
tag := ""
name := "none"
if len(nameParts) > 1 {
tag = nameParts[len(nameParts)-1]
name = strings.Join(nameParts[:len(nameParts)-1], ":")
for prefix, replacement := range c.Config.UserConfig.Replacements.ImageNamePrefixes {
if strings.HasPrefix(name, prefix) {
name = strings.Replace(name, prefix, replacement, 1)
break
}
}
}
ownImages[i] = &Image{
ID: image.ID,
Name: name,
Tag: tag,
Image: image,
Client: c.Client,
OSCommand: c.OSCommand,
Log: c.Log,
DockerCommand: c,
}
}
return ownImages, nil
}
// PruneImages prunes images
func (c *DockerCommand) PruneImages() error {
_, err := c.Client.ImagesPrune(context.Background(), filters.Args{})
return err
}
|
//go:build test
// +build test
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package deployment
import (
"context"
"encoding/json"
"fmt"
"log"
"math/rand"
"os"
"os/exec"
"regexp"
"strconv"
"time"
"github.com/Azure/aks-engine/test/e2e/kubernetes/hpa"
"github.com/Azure/aks-engine/test/e2e/kubernetes/pod"
"github.com/Azure/aks-engine/test/e2e/kubernetes/service"
"github.com/Azure/aks-engine/test/e2e/kubernetes/util"
"github.com/pkg/errors"
)
const (
validateDeploymentNotExistRetries = 3
deploymentGetAfterCreateTimeout = 1 * time.Minute
)
// List holds a list of deployments returned from kubectl get deploy
type List struct {
Deployments []Deployment `json:"items"`
}
// Deployment repesentes a kubernetes deployment
type Deployment struct {
Metadata Metadata `json:"metadata"`
}
// Metadata holds information like labels, name, and namespace
type Metadata struct {
CreatedAt time.Time `json:"creationTimestamp"`
Labels map[string]string `json:"labels"`
Name string `json:"name"`
Namespace string `json:"namespace"`
HasHPA bool `json:"hasHPA"`
}
// Spec holds information the deployment strategy and number of replicas
type Spec struct {
Replicas int `json:"replicas"`
Template Template `json:"template"`
}
// Template is used for fetching the deployment spec -> containers
type Template struct {
TemplateSpec TemplateSpec `json:"spec"`
}
// TemplateSpec holds the list of containers for a deployment, the dns policy, and restart policy
type TemplateSpec struct {
Containers []Container `json:"containers"`
DNSPolicy string `json:"dnsPolicy"`
RestartPolicy string `json:"restartPolicy"`
}
// Container holds information like image, pull policy, name, etc...
type Container struct {
Image string `json:"image"`
PullPolicy string `json:"imagePullPolicy"`
Name string `json:"name"`
}
// CreateLinuxDeployAsync wraps CreateLinuxDeploy with a struct response for goroutine + channel usage
func CreateLinuxDeployAsync(ctx context.Context, image, name, namespace, app, role string) GetResult {
d, err := CreateLinuxDeploy(image, name, namespace, app, role)
return GetResult{
deployment: d,
err: err,
}
}
// CreateLinuxDeployWithRetry will create a deployment for a given image with a name in a namespace, with retry
func CreateLinuxDeployWithRetry(image, name, namespace, app, role string, sleep, timeout time.Duration) (*Deployment, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan GetResult)
var mostRecentCreateLinuxDeployWithRetryWithRetryError error
var d *Deployment
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- CreateLinuxDeployAsync(ctx, image, name, namespace, app, role)
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentCreateLinuxDeployWithRetryWithRetryError = result.err
d = result.deployment
if mostRecentCreateLinuxDeployWithRetryWithRetryError == nil {
return d, nil
}
case <-ctx.Done():
return d, errors.Errorf("CreateLinuxDeployWithRetry timed out: %s\n", mostRecentCreateLinuxDeployWithRetryWithRetryError)
}
}
}
const webDeploymentTmpl = `---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: %s
role: %s
name: %s
spec:
replicas: 1
selector:
matchLabels:
app: %s
role: %s
template:
metadata:
labels:
app: %s
role: %s
spec:
containers:
- image: %s
name: %s
resources:
requests:
cpu: 10m
memory: 10M
nodeSelector:
kubernetes.io/os: %s
`
// CreateLinuxDeploy will create a deployment for a given image with a name in a namespace
func CreateLinuxDeploy(image, name, namespace, app, role string) (*Deployment, error) {
var commandTimeout time.Duration
tmpFile, err := os.CreateTemp("", "e2e-linux-deployment-*.yaml")
if err != nil {
return nil, err
}
defer os.Remove(tmpFile.Name())
if app == "" {
app = "webapp"
}
if role == "" {
role = "any"
}
manifest := fmt.Sprintf(webDeploymentTmpl,
app, role, name, app, role, app, role, image, name, "linux")
fmt.Fprintln(tmpFile, manifest)
cmd := exec.Command("k", "apply", "-n", namespace, "-f", tmpFile.Name())
out, err := util.RunAndLogCommand(cmd, commandTimeout)
if err != nil {
log.Printf("Error trying to deploy %s [%s] in namespace %s:%s\n", name, image, namespace, string(out))
return nil, err
}
d, err := GetWithRetry(name, namespace, 3*time.Second, deploymentGetAfterCreateTimeout)
if err != nil {
log.Printf("Error while trying to fetch Deployment %s in namespace %s:%s\n", name, namespace, err)
return nil, err
}
return d, nil
}
// CreateDeployFromImageAsync wraps CreateLinuxDeploy with a struct response for goroutine + channel usage
func CreateDeployFromImageAsync(image, name, namespace, app, role string) GetResult {
d, err := CreateLinuxDeploy(image, name, namespace, app, role)
return GetResult{
deployment: d,
err: err,
}
}
// CreateDeploymentFromImageWithRetry will kubectl apply a Deployment from file with a name with retry toleration
func CreateDeploymentFromImageWithRetry(image, name, namespace, app, role string, sleep, timeout time.Duration) (*Deployment, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan GetResult)
var mostRecentCreateDeploymentFromImageWithRetryError error
var d *Deployment
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- CreateDeployFromImageAsync(image, name, namespace, app, role)
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentCreateDeploymentFromImageWithRetryError = result.err
d = result.deployment
if mostRecentCreateDeploymentFromImageWithRetryError == nil {
if d != nil {
return d, nil
}
}
case <-ctx.Done():
return d, errors.Errorf("CreateDeploymentFromImageWithRetry timed out: %s\n", mostRecentCreateDeploymentFromImageWithRetryError)
}
}
}
// CreateLinuxDeployIfNotExist first checks if a deployment already exists, and return it if so
// If not, we call CreateLinuxDeploy
func CreateLinuxDeployIfNotExist(image, name, namespace, app, role string, sleep, timeout time.Duration) (*Deployment, error) {
deployment, err := Get(name, namespace, validateDeploymentNotExistRetries)
if err != nil {
return CreateDeploymentFromImageWithRetry(image, name, namespace, app, role, sleep, timeout)
}
return deployment, nil
}
// CreateLinuxDeployDeleteIfExists will create a deployment, deleting any pre-existing deployment with the same name
func CreateLinuxDeployDeleteIfExists(pattern, image, name, namespace, app, role string, timeout time.Duration) (*Deployment, error) {
deployments, err := GetAllByPrefixWithRetry(pattern, namespace, 5*time.Second, timeout)
if err != nil {
return nil, err
}
for _, d := range deployments {
d.Delete(util.DefaultDeleteRetries)
}
return CreateLinuxDeployWithRetry(image, name, namespace, app, role, 3*time.Minute, timeout)
}
const runDeploymentTmpl = `---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
run: %s
name: %s
spec:
replicas: %d
selector:
matchLabels:
run: %s
template:
metadata:
labels:
run: %s
spec:
containers:
- image: %s
name: %s
command:
- /bin/sh
- -c
- "%s"
nodeSelector:
kubernetes.io/os: %s
`
// RunLinuxDeploy will create a deployment that runs a bash command in a pod
func RunLinuxDeploy(image, name, namespace, command string, replicas int) (*Deployment, error) {
var commandTimeout time.Duration
tmpFile, err := os.CreateTemp("", "e2e-linux-deployment-*.yaml")
if err != nil {
return nil, err
}
defer os.Remove(tmpFile.Name())
manifest := fmt.Sprintf(runDeploymentTmpl,
name, name, replicas, name, name, image, name, command, "linux")
fmt.Fprintln(tmpFile, manifest)
cmd := exec.Command("k", "apply", "-n", namespace, "-f", tmpFile.Name())
out, err := util.RunAndLogCommand(cmd, commandTimeout)
if err != nil {
log.Printf("Error trying to deploy %s [%s] in namespace %s:%s\n", name, image, namespace, string(out))
return nil, err
}
d, err := GetWithRetry(name, namespace, 3*time.Second, deploymentGetAfterCreateTimeout)
if err != nil {
log.Printf("Error while trying to fetch Deployment %s in namespace %s:%s\n", name, namespace, err)
return nil, err
}
return d, nil
}
// RunLinuxDeployDeleteIfExists will create a deployment that runs a bash command in a pod,
// deleting any pre-existing deployment with the same name
func RunLinuxDeployDeleteIfExists(pattern, image, name, namespace, command string, replicas int) (*Deployment, error) {
deployments, err := GetAllByPrefix(pattern, namespace)
if err != nil {
return nil, err
}
for _, d := range deployments {
d.Delete(util.DefaultDeleteRetries)
}
return RunLinuxDeploy(image, name, namespace, command, replicas)
}
type deployRunnerCmd func(string, string, string, string, int) (*Deployment, error)
// RunDeploymentMultipleTimes runs the same command 'desiredAttempts' times
func RunDeploymentMultipleTimes(deployRunnerCmd deployRunnerCmd, image, name, command string, replicas, desiredAttempts int, sleep, podTimeout, timeout time.Duration) (int, error) {
var successfulAttempts int
var actualAttempts int
logResults := func() {
log.Printf("Ran command on %d of %d desired attempts with %d successes\n\n", actualAttempts, desiredAttempts, successfulAttempts)
}
defer logResults()
for i := 0; i < desiredAttempts; i++ {
actualAttempts++
r := rand.New(rand.NewSource(time.Now().UnixNano()))
deploymentName := fmt.Sprintf("%s-%d", name, r.Intn(99999))
var d *Deployment
var err error
d, err = deployRunnerCmd(image, deploymentName, "default", command, replicas)
if err != nil {
return successfulAttempts, err
}
pods, err := d.WaitForReplicas(replicas, replicas, sleep, timeout)
if err != nil {
log.Printf("deployment %s did not have the expected replica count %d in time\n", deploymentName, replicas)
return successfulAttempts, err
}
var podsSucceeded int
for _, p := range pods {
running, err := pod.WaitOnSuccesses(p.Metadata.Name, p.Metadata.Namespace, 6, true, sleep, podTimeout)
if err != nil {
log.Printf("pod %s did not succeed in time\n", p.Metadata.Name)
return successfulAttempts, err
}
if running {
podsSucceeded++
}
}
err = d.Delete(util.DefaultDeleteRetries)
if err != nil {
return successfulAttempts, err
}
if podsSucceeded == replicas {
successfulAttempts++
}
}
return successfulAttempts, nil
}
// CreateWindowsDeployAsync wraps CreateWindowsDeploy with a struct response for goroutine + channel usage
func CreateWindowsDeployAsync(image, name, namespace, app, role string) GetResult {
d, err := CreateWindowsDeploy(image, name, namespace, app, role)
return GetResult{
deployment: d,
err: err,
}
}
// CreateWindowsDeployWithRetry will return all deployments in a given namespace that match a prefix, retrying if error up to a timeout
func CreateWindowsDeployWithRetry(image, name, namespace, app, role string, sleep, timeout time.Duration) (*Deployment, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan GetResult)
var mostRecentCreateWindowsDeployWithRetryError error
var d *Deployment
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- CreateWindowsDeployAsync(image, name, namespace, app, role)
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentCreateWindowsDeployWithRetryError = result.err
d = result.deployment
if mostRecentCreateWindowsDeployWithRetryError == nil {
return d, nil
}
case <-ctx.Done():
return d, errors.Errorf("GetAllByPrefixWithRetry timed out: %s\n", mostRecentCreateWindowsDeployWithRetryError)
}
}
}
// CreateDeploymentFromFile will create a Deployment from file with a name
func CreateDeploymentFromFile(filename, name, namespace string, sleep, timeout time.Duration) (*Deployment, error) {
cmd := exec.Command("k", "apply", "-f", filename)
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error trying to create Deployment %s:%s\n", name, string(out))
return nil, err
}
d, err := GetWithRetry(name, namespace, sleep, timeout)
if err != nil {
log.Printf("Error while trying to fetch Deployment %s:%s\n", name, err)
return nil, err
}
return d, nil
}
// CreateDeploymentFromFileAsync wraps CreateDeploymentFromFile with a struct response for goroutine + channel usage
func CreateDeploymentFromFileAsync(filename, name, namespace string, sleep, timeout time.Duration) GetResult {
d, err := CreateDeploymentFromFile(filename, name, namespace, sleep, timeout)
return GetResult{
deployment: d,
err: err,
}
}
// CreateDeploymentFromFileWithRetry will kubectl apply a Deployment from file with a name with retry toleration
func CreateDeploymentFromFileWithRetry(filename, name, namespace string, sleep, timeout time.Duration) (*Deployment, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan GetResult)
var mostRecentCreateDeploymentFromFileWithRetryError error
var d *Deployment
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- CreateDeploymentFromFileAsync(filename, name, namespace, sleep, timeout)
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentCreateDeploymentFromFileWithRetryError = result.err
d = result.deployment
if mostRecentCreateDeploymentFromFileWithRetryError == nil {
if d != nil {
return d, nil
}
}
case <-ctx.Done():
return d, errors.Errorf("CreateDeploymentFromFileWithRetry timed out: %s\n", mostRecentCreateDeploymentFromFileWithRetryError)
}
}
}
// CreateWindowsDeploy will create a deployment for a given image with a name in a namespace and create a service mapping a hostPort
func CreateWindowsDeploy(image, name, namespace, app, role string) (*Deployment, error) {
var commandTimeout time.Duration
var cmd *exec.Cmd
tmpFile, err := os.CreateTemp("", "e2e-windows-deployment-*.yaml")
if err != nil {
return nil, err
}
defer os.Remove(tmpFile.Name())
if app == "" {
app = "webapp"
}
if role == "" {
role = "any"
}
manifest := fmt.Sprintf(webDeploymentTmpl,
app, role, name, app, role, app, role, image, name, "windows")
fmt.Fprintln(tmpFile, manifest)
cmd = exec.Command("k", "apply", "-n", namespace, "-f", tmpFile.Name())
out, err := util.RunAndLogCommand(cmd, commandTimeout)
if err != nil {
log.Printf("Error trying to deploy %s [%s] in namespace %s:%s\n", name, image, namespace, string(out))
return nil, err
}
d, err := GetWithRetry(name, namespace, 3*time.Second, deploymentGetAfterCreateTimeout)
if err != nil {
log.Printf("Error while trying to fetch Deployment %s in namespace %s:%s\n", name, namespace, err)
return nil, err
}
return d, nil
}
const hostportDeploymentTmpl = `---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
run: %s
name: %s
spec:
replicas: 1
selector:
matchLabels:
run: %s
template:
metadata:
labels:
run: %s
spec:
containers:
- image: %s
name: %s
ports:
- containerPort: %d%s
nodeSelector:
kubernetes.io/os: %s
`
// CreateWindowsDeployWithHostport will create a deployment for a given image with a name in a namespace and create a service mapping a hostPort
func CreateWindowsDeployWithHostport(image, name, namespace string, port int, hostport int) (*Deployment, error) {
var commandTimeout time.Duration
tmpFile, err := os.CreateTemp("", "e2e-windows-deployment-*.yaml")
if err != nil {
return nil, err
}
defer os.Remove(tmpFile.Name())
var hostportStr string
if hostport != -1 {
hostportStr = fmt.Sprintf("\n hostPort: %d", hostport)
}
manifest := fmt.Sprintf(hostportDeploymentTmpl,
name, name, name, name, image, name, port, hostportStr, "windows")
fmt.Fprintln(tmpFile, manifest)
cmd := exec.Command("k", "apply", "-n", namespace, "-f", tmpFile.Name())
out, err := util.RunAndLogCommand(cmd, commandTimeout)
if err != nil {
log.Printf("Error trying to deploy %s [%s] in namespace %s:%s\n", name, image, namespace, string(out))
return nil, err
}
d, err := GetWithRetry(name, namespace, 3*time.Second, deploymentGetAfterCreateTimeout)
if err != nil {
log.Printf("Error while trying to fetch Deployment %s in namespace %s:%s\n", name, namespace, err)
return nil, err
}
return d, nil
}
// CreateWindowsDeployWithHostportIfNotExist first checks if a deployment already exists, and return it if so
// If not, we call CreateWindowsDeploy
func CreateWindowsDeployWithHostportIfNotExist(image, name, namespace string, port int, hostport int) (*Deployment, error) {
deployment, err := Get(name, namespace, validateDeploymentNotExistRetries)
if err != nil {
return CreateWindowsDeployWithHostport(image, name, namespace, port, hostport)
}
return deployment, nil
}
// CreateWindowsDeployWithHostportDeleteIfExist first checks if a deployment already exists according to a naming pattern
// If a pre-existing deployment is found matching that pattern, it is deleted
func CreateWindowsDeployWithHostportDeleteIfExist(pattern, image, name, namespace string, port int, hostport int) (*Deployment, error) {
deployments, err := GetAllByPrefix(pattern, namespace)
if err != nil {
return nil, err
}
for _, d := range deployments {
d.Delete(util.DefaultDeleteRetries)
}
return CreateWindowsDeployWithHostport(image, name, namespace, port, hostport)
}
// CreateWindowsDeployDeleteIfExist first checks if a deployment already exists according to a naming pattern
// If a pre-existing deployment is found matching that pattern, it is deleted
func CreateWindowsDeployDeleteIfExist(pattern, image, name, namespace, app, role string, timeout time.Duration) (*Deployment, error) {
deployments, err := GetAllByPrefixWithRetry(pattern, namespace, 5*time.Second, timeout)
if err != nil {
return nil, err
}
for _, d := range deployments {
d.Delete(util.DefaultDeleteRetries)
}
return CreateWindowsDeployWithRetry(image, name, namespace, app, role, 3*time.Minute, timeout)
}
// Get returns a deployment from a name and namespace
func Get(name, namespace string, retries int) (*Deployment, error) {
d := Deployment{}
var out []byte
var err error
for i := 0; i < retries; i++ {
cmd := exec.Command("k", "get", "deploy", name, "-n", namespace, "-o", "json")
out, err = cmd.CombinedOutput()
if err != nil {
util.PrintCommand(cmd)
log.Printf("Error getting deployment: %s\n", err)
} else {
jsonErr := json.Unmarshal(out, &d)
if jsonErr != nil {
log.Printf("Error unmarshalling deployment json:%s\n", jsonErr)
err = jsonErr
}
}
time.Sleep(3 * time.Second)
}
return &d, err
}
// GetAll will return all deployments in a given namespace
func GetAll(namespace string) (*List, error) {
cmd := exec.Command("k", "get", "deployments", "-n", namespace, "-o", "json")
util.PrintCommand(cmd)
out, err := cmd.CombinedOutput()
if err != nil {
log.Printf("Error getting all deployments:\n")
return nil, err
}
dl := List{}
err = json.Unmarshal(out, &dl)
if err != nil {
log.Printf("Error unmarshalling deployments json:%s\n", err)
return nil, err
}
return &dl, nil
}
// GetAllByPrefixResult is the result type for GetAllByPrefixAsync
type GetAllByPrefixResult struct {
Deployments []Deployment
Err error
}
// GetAllByPrefixAsync wraps GetAllByPrefix with a struct response for goroutine + channel usage
func GetAllByPrefixAsync(prefix, namespace string) GetAllByPrefixResult {
deployments, err := GetAllByPrefix(prefix, namespace)
return GetAllByPrefixResult{
Deployments: deployments,
Err: err,
}
}
// GetAllByPrefixWithRetry will return all deployments in a given namespace that match a prefix, retrying if error up to a timeout
func GetAllByPrefixWithRetry(prefix, namespace string, sleep, timeout time.Duration) ([]Deployment, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan GetAllByPrefixResult)
var mostRecentGetAllByPrefixWithRetryError error
var deployments []Deployment
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- GetAllByPrefixAsync(prefix, namespace)
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentGetAllByPrefixWithRetryError = result.Err
deployments = result.Deployments
if mostRecentGetAllByPrefixWithRetryError == nil {
return deployments, nil
}
case <-ctx.Done():
return deployments, errors.Errorf("GetAllByPrefixWithRetry timed out: %s\n", mostRecentGetAllByPrefixWithRetryError)
}
}
}
// GetAllByPrefix will return all pods in a given namespace that match a prefix
func GetAllByPrefix(prefix, namespace string) ([]Deployment, error) {
dl, err := GetAll(namespace)
if err != nil {
return nil, err
}
deployments := []Deployment{}
for _, d := range dl.Deployments {
matched, err := regexp.MatchString(prefix+"-.*", d.Metadata.Name)
if err != nil {
log.Printf("Error trying to match deployment name:%s\n", err)
return nil, err
}
if matched {
deployments = append(deployments, d)
}
}
return deployments, nil
}
// Describe will describe a deployment resource
func (d *Deployment) Describe() error {
var commandTimeout time.Duration
cmd := exec.Command("k", "describe", "deployment", d.Metadata.Name, "-n", d.Metadata.Namespace)
out, err := util.RunAndLogCommand(cmd, commandTimeout)
log.Printf("\n%s\n", string(out))
return err
}
// Delete will delete a deployment in a given namespace
func (d *Deployment) Delete(retries int) error {
var zeroValueDuration time.Duration
var kubectlOutput []byte
var kubectlError error
for i := 0; i < retries; i++ {
cmd := exec.Command("k", "delete", "deploy", "-n", d.Metadata.Namespace, d.Metadata.Name)
kubectlOutput, kubectlError = util.RunAndLogCommand(cmd, zeroValueDuration)
if kubectlError != nil {
log.Printf("Error while trying to delete deployment %s in namespace %s:%s\n", d.Metadata.Namespace, d.Metadata.Name, string(kubectlOutput))
continue
}
break
}
if kubectlError != nil {
return kubectlError
}
if d.Metadata.HasHPA {
for i := 0; i < retries; i++ {
cmd := exec.Command("k", "delete", "hpa", "-n", d.Metadata.Namespace, d.Metadata.Name)
kubectlOutput, kubectlError = util.RunAndLogCommand(cmd, zeroValueDuration)
if kubectlError != nil {
log.Printf("Deployment %s has associated HPA but unable to delete in namespace %s:%s\n", d.Metadata.Namespace, d.Metadata.Name, string(kubectlOutput))
continue
}
break
}
}
return kubectlError
}
// Expose will create a load balancer and expose the deployment on a given port
func (d *Deployment) Expose(svcType string, targetPort, exposedPort int) error {
var commandTimeout time.Duration
cmd := exec.Command("k", "expose", "deployment", d.Metadata.Name, "--type", svcType, "-n", d.Metadata.Namespace, "--target-port", strconv.Itoa(targetPort), "--port", strconv.Itoa(exposedPort))
out, err := util.RunAndLogCommand(cmd, commandTimeout)
if err != nil {
log.Printf("Error while trying to expose (%s) target port (%v) for deployment %s in namespace %s on port %v:%s\n", svcType, targetPort, d.Metadata.Name, d.Metadata.Namespace, exposedPort, string(out))
return err
}
return nil
}
// ExposeIfNotExist will create a load balancer and expose the deployment on a given port if the associated service doesn't already exist
func (d *Deployment) ExposeIfNotExist(svcType string, targetPort, exposedPort int) error {
_, err := service.Get(d.Metadata.Name, d.Metadata.Namespace)
if err != nil {
return d.Expose(svcType, targetPort, exposedPort)
}
return nil
}
// ExposeDeleteIfExist will create a load balancer and expose the deployment on a given port
// If a service matching the passed in pattern already exists, we'll delete it first
func (d *Deployment) ExposeDeleteIfExist(pattern, namespace, svcType string, targetPort, exposedPort int) error {
services, err := service.GetAllByPrefix(pattern, namespace)
if err != nil {
return err
}
for _, s := range services {
s.Delete(util.DefaultDeleteRetries)
}
return d.Expose(svcType, targetPort, exposedPort)
}
// ScaleDeployment scales a deployment to n instancees
func (d *Deployment) ScaleDeployment(n int) error {
var commandTimeout time.Duration
cmd := exec.Command("k", "scale", fmt.Sprintf("--replicas=%d", n), "deployment", d.Metadata.Name)
out, err := util.RunAndLogCommand(cmd, commandTimeout)
if err != nil {
log.Printf("Error while scaling deployment %s to %d pods:%s\n", d.Metadata.Name, n, string(out))
return err
}
return nil
}
// CreateDeploymentHPA applies autoscale characteristics to deployment
func (d *Deployment) CreateDeploymentHPA(cpuPercent, min, max int) error {
var commandTimeout time.Duration
cmd := exec.Command("k", "autoscale", "deployment", d.Metadata.Name, fmt.Sprintf("--cpu-percent=%d", cpuPercent),
fmt.Sprintf("--min=%d", min), fmt.Sprintf("--max=%d", max))
out, err := util.RunAndLogCommand(cmd, commandTimeout)
if err != nil {
log.Printf("Error while configuring autoscale against deployment %s:%s\n", d.Metadata.Name, string(out))
return err
}
d.Metadata.HasHPA = true
return nil
}
// CreateDeploymentHPADeleteIfExist applies autoscale characteristics to deployment, deleting any pre-existing HPA resource first
func (d *Deployment) CreateDeploymentHPADeleteIfExist(cpuPercent, min, max int) error {
h, err := hpa.Get(d.Metadata.Name, d.Metadata.Namespace, 5)
if err == nil {
err := h.Delete(util.DefaultDeleteRetries)
if err != nil {
return err
}
_, err = hpa.WaitOnDeleted(d.Metadata.Name, d.Metadata.Namespace, 5*time.Second, 1*time.Minute)
if err != nil {
return err
}
}
return d.CreateDeploymentHPA(cpuPercent, min, max)
}
// Pods will return all pods related to a deployment
func (d *Deployment) Pods() ([]pod.Pod, error) {
return pod.GetAllByPrefixWithRetry(d.Metadata.Name, d.Metadata.Namespace, 3*time.Second, 20*time.Minute)
}
// PodsRunning will return all pods in a Running state related to a deployment
func (d *Deployment) PodsRunning() ([]pod.Pod, error) {
return pod.GetAllRunningByPrefixWithRetry(d.Metadata.Name, d.Metadata.Namespace, 3*time.Second, 20*time.Minute)
}
// GetWithRetry gets a deployment, allowing for retries
func GetWithRetry(name, namespace string, sleep, timeout time.Duration) (*Deployment, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan GetResult)
var mostRecentGetWithRetryError error
var deployment *Deployment
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- GetAsync(name, namespace)
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentGetWithRetryError = result.err
deployment = result.deployment
if mostRecentGetWithRetryError == nil {
if deployment != nil {
return deployment, nil
}
}
case <-ctx.Done():
return nil, errors.Errorf("GetWithRetry timed out: %s\n", mostRecentGetWithRetryError)
}
}
}
// GetResult is a return struct for GetAsync
type GetResult struct {
deployment *Deployment
err error
}
// GetAsync wraps Get with a struct response for goroutine + channel usage
func GetAsync(name, namespace string) GetResult {
deployment, err := Get(name, namespace, 1)
return GetResult{
deployment: deployment,
err: err,
}
}
// WaitForReplicas waits for a pod replica count between min and max
func (d *Deployment) WaitForReplicas(min, max int, sleep, timeout time.Duration) ([]pod.Pod, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan pod.GetPodsResult)
var mostRecentWaitForReplicasError error
var pods []pod.Pod
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- pod.GetAllRunningByPrefixAsync(d.Metadata.Name, d.Metadata.Namespace)
time.Sleep(sleep)
}
}
}()
for {
select {
case result := <-ch:
mostRecentWaitForReplicasError = result.Err
pods = result.Pods
if mostRecentWaitForReplicasError == nil {
if min == -1 {
if len(pods) <= max {
return pods, nil
}
} else if max == -1 {
if len(pods) >= min {
return pods, nil
}
} else {
if len(pods) >= min && len(pods) <= max {
return pods, nil
}
}
}
case <-ctx.Done():
err := d.Describe()
if err != nil {
log.Printf("Unable to describe deployment %s: %s", d.Metadata.Name, err)
}
return pods, errors.Errorf("WaitForReplicas timed out: %s\n", mostRecentWaitForReplicasError)
}
}
}
// WaitForReplicasWithAction waits for a pod replica count between min and max and runs an action after every check
func (d *Deployment) WaitForReplicasWithAction(min, max int, sleep, timeout time.Duration, action func() error) ([]pod.Pod, error) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
ch := make(chan pod.GetPodsResult)
var mostRecentWaitForReplicasError error
var pods []pod.Pod
go func() {
for {
select {
case <-ctx.Done():
return
default:
ch <- pod.GetAllRunningByPrefixAsync(d.Metadata.Name, d.Metadata.Namespace)
time.Sleep(sleep)
err := action()
if err != nil {
mostRecentWaitForReplicasError = err
cancel()
}
}
}
}()
for {
select {
case result := <-ch:
mostRecentWaitForReplicasError = result.Err
pods = result.Pods
if mostRecentWaitForReplicasError == nil {
if min == -1 {
if len(pods) <= max {
return pods, nil
}
} else if max == -1 {
if len(pods) >= min {
return pods, nil
}
} else {
if len(pods) >= min && len(pods) <= max {
return pods, nil
}
}
}
case <-ctx.Done():
err := d.Describe()
if err != nil {
log.Printf("Unable to describe deployment %s: %s", d.Metadata.Name, err)
}
return pods, errors.Errorf("WaitForReplicas timed out: %s\n", mostRecentWaitForReplicasError)
}
}
}
|
package stack
import "fmt"
type Browser struct {
backStack *Stack
forwardStack *Stack
}
func NewBrowser() *Browser {
return &Browser{New(), New()}
}
func (b *Browser) Push(addr string) {
b.backStack.Push(addr)
}
func (b *Browser) Forward() {
if b.forwardStack.Len() == 0 {
return
}
v, _ := b.forwardStack.Pop()
b.backStack.Push(v)
}
func (b *Browser) Back() {
if b.backStack.Len() == 0 {
return
}
v, _ := b.backStack.Pop()
b.forwardStack.Push(v)
}
func (b *Browser) String() string {
return "\n" + fmt.Sprint(b.backStack) + "\n" + fmt.Sprint(b.forwardStack)
}
|
--- vendor/maunium.net/go/tcell/tscreen.go.orig 2022-04-12 11:45:41 UTC
+++ vendor/maunium.net/go/tcell/tscreen.go
@@ -50,13 +50,9 @@ const (
// $COLUMNS environment variables can be set to the actual window size,
// otherwise defaults taken from the terminal database are used.
func NewTerminfoScreen() (Screen, error) {
- term := os.Getenv("TERM")
- if len(term) >= 6 && term[:6] == "screen" && len(os.Getenv("TMUX")) > 0 {
- term = "tmux"
- }
- ti, e := terminfo.LookupTerminfo(term)
+ ti, e := terminfo.LookupTerminfo(os.Getenv("TERM"))
if e != nil {
- ti, e = loadDynamicTerminfo(term)
+ ti, e = loadDynamicTerminfo(os.Getenv("TERM"))
if e != nil {
return nil, e
}
|
/*
* EVE Swagger Interface
*
* An OpenAPI for EVE Online
*
* OpenAPI spec version: 0.4.1.dev1
*
* Generated by: https://github.com/swagger-api/swagger-codegen.git
*/
package swagger
// recipient object
type PostCharactersCharacterIdMailRecipient struct {
// recipient_id integer
RecipientId int32 `json:"recipient_id,omitempty"`
// recipient_type string
RecipientType string `json:"recipient_type,omitempty"`
}
|
package handlers
import (
"encoding/json"
"net/http"
"time"
"github.com/KARTHICK13691/go-currency/models"
"github.com/gorilla/mux"
"github.com/shopspring/decimal"
)
// Latest /latest route controller
func Latest(w http.ResponseWriter, r *http.Request) {
baseParam := r.URL.Query().Get("base")
symbolsParam := r.URL.Query().Get("symbols")
currencies, err := models.LatestRates(baseParam, symbolsParam)
if err != nil {
http.Error(w, http.StatusText(500), 500)
return
}
decimal.MarshalJSONWithoutQuotes = true
response, err := json.MarshalIndent(currencies, "", " ")
if err != nil {
http.Error(w, http.StatusText(500), 500)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(response)
}
// Historical : /{date} route controller
func Historical(w http.ResponseWriter, r *http.Request) {
baseParam := r.URL.Query().Get("base")
symbolsParam := r.URL.Query().Get("symbols")
vars := mux.Vars(r)
_, err := time.Parse("2006-01-02", vars["date"])
if err != nil {
http.Error(w, http.StatusText(400), 400)
return
}
currencies, err := models.HistoricalRates(baseParam, symbolsParam, vars["date"])
if err != nil {
http.Error(w, http.StatusText(500), 500)
return
}
decimal.MarshalJSONWithoutQuotes = true
response, err := json.MarshalIndent(currencies, "", " ")
if err != nil {
http.Error(w, http.StatusText(500), 500)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(response)
}
// Favicon /favicon.ico route controller
func Favicon(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "image/x-icon")
w.Header().Set("Cache-Control", "public, max-age=7776000")
http.ServeFile(w, r, "./static/favicon.ico")
}
|
/*
Descripton:
Given a typical x/y coordinate system we can plot lines. It would be interesting to know which lines intersect.
Input:
A series of lines from 1 to many to put in our 2-D space. The data will be in the form:
(label) (x1 y1) (x2 y2)
(label) will be a letter A-Z
(x1 y1) will be the coordinates of the starting point on line
(x2 y2) will be the coordinates of the ending point on line
example input:
A -2.5 .5 3.5 .5
B -2.23 99.99 -2.10 -56.23
C -1.23 99.99 -1.10 -56.23
D 100.1 1000.34 2000.23 2100.23
E 1.5 -1 1.5 1.0
F 2.0 2.0 3.0 2.0
G 2.5 .5 2.5 2.0
Max X can be 1,000,000,000.00
Max Y can be 1,000,000,000.00
Output:
The program will list which lines intersect. And which have 0 intersects.
Example Output:
Intersecting Lines:
A B
A C
A E
A G
F G
No intersections:
D
Difficulty:
This is a coder_d00d(tm) unknown difficulty challenge. It could be easy. Could be hard. But it seems cool for a Friday.
If you want to make it easier: input is only 2 lines and you return yes/no
If you want to make it harder: output is the 2 lines and the (x y) point they intersect at.
*/
package main
import "fmt"
func main() {
intersections([]line{
{'A', point{-2.5, .5}, point{3.5, .5}},
{'B', point{-2.23, 99.99}, point{-2.10, -56.23}},
{'C', point{-1.23, 99.99}, point{-1.10, -56.23}},
{'D', point{100.1, 1000.34}, point{2000.23, 2100.23}},
{'E', point{1.5, -1}, point{1.5, 1.0}},
{'F', point{2.0, 2.0}, point{3.0, 2.0}},
{'G', point{2.5, .5}, point{2.5, 2.0}},
})
}
type point struct {
x, y float64
}
type line struct {
name rune
a, b point
}
func intersections(l []line) {
var (
m = make(map[rune]bool)
p [][2]rune
q []rune
)
n := len(l)
for i := 0; i < n; i++ {
x := l[i].name
for j := i + 1; j < n; j++ {
y := l[j].name
if intersect(l[i], l[j]) {
p = append(p, [2]rune{x, y})
m[x], m[y] = true, true
}
}
if !m[x] {
q = append(q, x)
}
}
fmt.Printf("Intersecting Lines:\n")
for _, c := range p {
fmt.Printf("%c %c\n", c[0], c[1])
}
fmt.Printf("No intersections:\n")
for _, c := range q {
fmt.Printf("%c\n", c)
}
}
// https://bryceboe.com/2006/10/23/line-segment-intersection-algorithm/
func intersect(l1, l2 line) bool {
A, B := l1.a, l1.b
C, D := l2.a, l2.b
return ccw(A, C, D) != ccw(B, C, D) && ccw(A, B, C) != ccw(A, B, D)
}
func ccw(A, B, C point) bool {
return (C.y-A.y)*(B.x-A.x) >= (B.y-A.y)*(C.x-A.x)
}
|
package sp_test
import (
"encoding/json"
"fmt"
"reflect"
"regexp"
"strings"
"testing"
"time"
"github.com/chenyoufu/esql/sp"
)
// Ensure the parser can parse strings into Statement ASTs.
func TestParser_ParseStatement(t *testing.T) {
// For use in various tests.
now := time.Now()
var tests = []struct {
skip bool
s string
params map[string]interface{}
stmt sp.Statement
err string
}{
// SELECT * statement
{
s: `SELECT * FROM myseries`,
stmt: &sp.SelectStatement{
IsRawQuery: true,
Fields: []*sp.Field{
{Expr: &sp.Wildcard{}},
},
Sources: []sp.Source{&sp.Measurement{Database: "myseries"}},
},
},
// SELECT group by having statement
{
s: `SELECT ipo_year, COUNT(*) AS ipo_count FROM symbol GROUP BY ipo_year HAVING ipo_count > 200`,
stmt: &sp.SelectStatement{
IsRawQuery: false,
Fields: []*sp.Field{
{Expr: &sp.VarRef{Val: "ipo_year", Segments: []string{"ipo_year"}}},
{Expr: &sp.Call{Name: "count", Args: []sp.Expr{&sp.Wildcard{}}}, Alias: "ipo_count"},
},
Sources: []sp.Source{&sp.Measurement{Database: "symbol"}},
Dimensions: []*sp.Dimension{
{Expr: &sp.VarRef{Val: "ipo_year", Segments: []string{"ipo_year"}}},
},
Having: &sp.BinaryExpr{
Op: sp.GT,
LHS: &sp.VarRef{Val: "ipo_count", Segments: []string{"ipo_count"}},
RHS: &sp.IntegerLiteral{Val: 200},
},
},
},
{
s: `SELECT * FROM myseries GROUP BY *`,
stmt: &sp.SelectStatement{
IsRawQuery: true,
Fields: []*sp.Field{
{Expr: &sp.Wildcard{}},
},
Sources: []sp.Source{&sp.Measurement{Database: "myseries"}},
Dimensions: []*sp.Dimension{{Expr: &sp.Wildcard{}}},
},
},
{
s: `SELECT field1, * FROM myseries GROUP BY *`,
stmt: &sp.SelectStatement{
IsRawQuery: true,
Fields: []*sp.Field{
{Expr: &sp.VarRef{Val: "field1", Segments: []string{"field1"}}},
{Expr: &sp.Wildcard{}},
},
Sources: []sp.Source{&sp.Measurement{Database: "myseries"}},
Dimensions: []*sp.Dimension{{Expr: &sp.Wildcard{}}},
},
},
{
s: `SELECT *, field1 FROM myseries GROUP BY *`,
stmt: &sp.SelectStatement{
IsRawQuery: true,
Fields: []*sp.Field{
{Expr: &sp.Wildcard{}},
{Expr: &sp.VarRef{Val: "field1", Segments: []string{"field1"}}},
},
Sources: []sp.Source{&sp.Measurement{Database: "myseries"}},
Dimensions: []*sp.Dimension{{Expr: &sp.Wildcard{}}},
},
},
// SELECT statement
{
s: fmt.Sprintf(`SELECT mean(field1), sum(field2) ,count(field3) AS field_x FROM myseries WHERE host = 'hosta.influxdb.org' and time > %d GROUP BY time("10h") ORDER BY DESC LIMIT 20, 10`, now.Unix()),
stmt: &sp.SelectStatement{
IsRawQuery: false,
Fields: []*sp.Field{
{Expr: &sp.Call{Name: "mean", Args: []sp.Expr{&sp.VarRef{Val: "field1", Segments: []string{"field1"}}}}},
{Expr: &sp.Call{Name: "sum", Args: []sp.Expr{&sp.VarRef{Val: "field2", Segments: []string{"field2"}}}}},
{Expr: &sp.Call{Name: "count", Args: []sp.Expr{&sp.VarRef{Val: "field3", Segments: []string{"field3"}}}}, Alias: "field_x"},
},
Sources: []sp.Source{&sp.Measurement{Database: "myseries"}},
Condition: &sp.BinaryExpr{
Op: sp.AND,
LHS: &sp.BinaryExpr{
Op: sp.EQ,
LHS: &sp.VarRef{Val: "host", Segments: []string{"host"}},
RHS: &sp.StringLiteral{Val: "hosta.influxdb.org"},
},
RHS: &sp.BinaryExpr{
Op: sp.GT,
LHS: &sp.VarRef{Val: "time", Segments: []string{"time"}},
RHS: &sp.IntegerLiteral{Val: now.Unix()},
},
},
Dimensions: []*sp.Dimension{{Expr: &sp.Call{Name: "time", Args: []sp.Expr{&sp.StringLiteral{Val: "10h"}}}}},
SortFields: []*sp.SortField{
{Ascending: false},
},
Limit: 20,
Offset: 10,
},
},
{
s: `SELECT foo.bar.baz AS foo FROM myseries`,
stmt: &sp.SelectStatement{
IsRawQuery: true,
Fields: []*sp.Field{
{Expr: &sp.VarRef{Val: "foo.bar.baz", Segments: []string{"foo", "bar", "baz"}}, Alias: "foo"},
},
Sources: []sp.Source{&sp.Measurement{Database: "myseries"}},
},
},
{
s: `SELECT func1(arg1, 100, arg3, arg4) FROM myseries`,
stmt: &sp.SelectStatement{
IsRawQuery: false,
Fields: []*sp.Field{
{Expr: &sp.Call{
Name: "func1",
Args: []sp.Expr{
&sp.VarRef{Val: "arg1", Segments: []string{"arg1"}},
&sp.IntegerLiteral{Val: 100},
&sp.VarRef{Val: "arg3", Segments: []string{"arg3"}},
&sp.VarRef{Val: "arg4", Segments: []string{"arg4"}},
}}},
},
Sources: []sp.Source{&sp.Measurement{Database: "myseries"}},
},
},
{
s: `SELECT func1(field1) / func2(field2) FROM myseries`,
stmt: &sp.SelectStatement{
IsRawQuery: false,
Fields: []*sp.Field{
{
Expr: &sp.BinaryExpr{
LHS: &sp.Call{
Name: "func1",
Args: []sp.Expr{
&sp.VarRef{Val: "field1", Segments: []string{"field1"}},
},
},
RHS: &sp.Call{
Name: "func2",
Args: []sp.Expr{
&sp.VarRef{Val: "field2", Segments: []string{"field2"}},
},
},
Op: sp.DIV,
},
},
},
Sources: []sp.Source{
&sp.Measurement{Database: "myseries"},
},
},
},
{
s: fmt.Sprintf(`SELECT func1(func2(field1)) FROM myseries GROUP BY func3(field3)`),
stmt: &sp.SelectStatement{
IsRawQuery: false,
Fields: []*sp.Field{
{
Expr: &sp.Call{
Name: "func1",
Args: []sp.Expr{
&sp.Call{
Name: "func2",
Args: []sp.Expr{
&sp.VarRef{Val: "field1", Segments: []string{"field1"}},
},
},
},
},
},
},
Sources: []sp.Source{&sp.Measurement{Database: "myseries"}},
Dimensions: []*sp.Dimension{
{
Expr: &sp.Call{
Name: "func3",
Args: []sp.Expr{
&sp.VarRef{Val: "field3", Segments: []string{"field3"}},
},
},
},
},
},
},
// SELECT statement (lowercase)
{
s: `select my_field from myseries`,
stmt: &sp.SelectStatement{
IsRawQuery: true,
Fields: []*sp.Field{{Expr: &sp.VarRef{Val: "my_field", Segments: []string{"my_field"}}}},
Sources: []sp.Source{&sp.Measurement{Database: "myseries"}},
},
},
// SELECT statement with multiple ORDER BY fields
{
skip: true,
s: `SELECT field1 FROM myseries ORDER BY ASC, field1, field2 DESC LIMIT 10`,
stmt: &sp.SelectStatement{
IsRawQuery: true,
Fields: []*sp.Field{{Expr: &sp.VarRef{Val: "field1"}}},
Sources: []sp.Source{&sp.Measurement{Database: "myseries"}},
SortFields: []*sp.SortField{
{Ascending: true},
{Name: "field1"},
{Name: "field2"},
},
Limit: 10,
},
},
// SELECT * FROM cpu WHERE host = 'serverC' AND region =~ /.*west.*/
{
s: `SELECT * FROM cpu WHERE host = 'serverC' AND region =~ /.*west.*/`,
stmt: &sp.SelectStatement{
IsRawQuery: true,
Fields: []*sp.Field{{Expr: &sp.Wildcard{}}},
Sources: []sp.Source{&sp.Measurement{Database: "cpu"}},
Condition: &sp.BinaryExpr{
Op: sp.AND,
LHS: &sp.BinaryExpr{
Op: sp.EQ,
LHS: &sp.VarRef{Val: "host", Segments: []string{"host"}},
RHS: &sp.StringLiteral{Val: "serverC"},
},
RHS: &sp.BinaryExpr{
Op: sp.EQREGEX,
LHS: &sp.VarRef{Val: "region", Segments: []string{"region"}},
RHS: &sp.RegexLiteral{Val: regexp.MustCompile(".*west.*")},
},
},
},
},
{
s: `select count(distinct(field3)), sum(field4) from metrics`,
stmt: &sp.SelectStatement{
IsRawQuery: false,
Fields: []*sp.Field{
{Expr: &sp.Call{Name: "count", Args: []sp.Expr{&sp.Call{Name: "distinct", Args: []sp.Expr{&sp.VarRef{Val: "field3", Segments: []string{"field3"}}}}}}},
{Expr: &sp.Call{Name: "sum", Args: []sp.Expr{&sp.VarRef{Val: "field4", Segments: []string{"field4"}}}}},
},
Sources: []sp.Source{&sp.Measurement{Database: "metrics"}},
},
},
// SELECT * FROM WHERE field comparisons
{
s: `SELECT * FROM cpu WHERE load > 100`,
stmt: &sp.SelectStatement{
IsRawQuery: true,
Fields: []*sp.Field{{Expr: &sp.Wildcard{}}},
Sources: []sp.Source{&sp.Measurement{Database: "cpu"}},
Condition: &sp.BinaryExpr{
Op: sp.GT,
LHS: &sp.VarRef{Val: "load", Segments: []string{"load"}},
RHS: &sp.IntegerLiteral{Val: 100},
},
},
},
{
s: `SELECT * FROM cpu WHERE load >= 100`,
stmt: &sp.SelectStatement{
IsRawQuery: true,
Fields: []*sp.Field{{Expr: &sp.Wildcard{}}},
Sources: []sp.Source{&sp.Measurement{Database: "cpu"}},
Condition: &sp.BinaryExpr{
Op: sp.GTE,
LHS: &sp.VarRef{Val: "load", Segments: []string{"load"}},
RHS: &sp.IntegerLiteral{Val: 100},
},
},
},
{
s: `SELECT * FROM cpu WHERE load = 100`,
stmt: &sp.SelectStatement{
IsRawQuery: true,
Fields: []*sp.Field{{Expr: &sp.Wildcard{}}},
Sources: []sp.Source{&sp.Measurement{Database: "cpu"}},
Condition: &sp.BinaryExpr{
Op: sp.EQ,
LHS: &sp.VarRef{Val: "load", Segments: []string{"load"}},
RHS: &sp.IntegerLiteral{Val: 100},
},
},
},
{
s: `SELECT * FROM cpu WHERE load <= 100`,
stmt: &sp.SelectStatement{
IsRawQuery: true,
Fields: []*sp.Field{{Expr: &sp.Wildcard{}}},
Sources: []sp.Source{&sp.Measurement{Database: "cpu"}},
Condition: &sp.BinaryExpr{
Op: sp.LTE,
LHS: &sp.VarRef{Val: "load", Segments: []string{"load"}},
RHS: &sp.IntegerLiteral{Val: 100},
},
},
},
{
s: `SELECT * FROM cpu WHERE load < 100`,
stmt: &sp.SelectStatement{
IsRawQuery: true,
Fields: []*sp.Field{{Expr: &sp.Wildcard{}}},
Sources: []sp.Source{&sp.Measurement{Database: "cpu"}},
Condition: &sp.BinaryExpr{
Op: sp.LT,
LHS: &sp.VarRef{Val: "load", Segments: []string{"load"}},
RHS: &sp.IntegerLiteral{Val: 100},
},
},
},
{
s: `SELECT * FROM cpu WHERE load != 100`,
stmt: &sp.SelectStatement{
IsRawQuery: true,
Fields: []*sp.Field{{Expr: &sp.Wildcard{}}},
Sources: []sp.Source{&sp.Measurement{Database: "cpu"}},
Condition: &sp.BinaryExpr{
Op: sp.NEQ,
LHS: &sp.VarRef{Val: "load", Segments: []string{"load"}},
RHS: &sp.IntegerLiteral{Val: 100},
},
},
},
// Errors
{s: ``, err: `found EOF, expected SELECT at line 1, char 1`},
{s: `SELECT`, err: `found EOF, expected identifier, string, number, bool at line 1, char 8`},
{s: `blah blah`, err: `found blah, expected SELECT at line 1, char 1`},
{s: `SELECT field1 X`, err: `found X, expected FROM at line 1, char 15`},
{s: `SELECT field1 FROM "series" WHERE X`, err: `found series, expected identifier at line 1, char 19`},
{s: `SELECT field1 FROM myseries GROUP`, err: `found EOF, expected BY at line 1, char 35`},
{s: `SELECT field1 FROM myseries LIMIT`, err: `found EOF, expected integer at line 1, char 35`},
{s: `SELECT field1 FROM myseries LIMIT 10.5`, err: `found 10.5, expected integer at line 1, char 35`},
{s: `SELECT top() FROM myseries`, err: `invalid number of arguments for top, expected at least 1, got 0`},
{s: `SELECT field1 FROM myseries ORDER`, err: `found EOF, expected BY at line 1, char 35`},
{s: `SELECT field1 FROM myseries ORDER BY`, err: `found EOF, expected identifier, ASC, DESC at line 1, char 38`},
{s: `SELECT field1 FROM myseries ORDER BY /`, err: `found /, expected identifier, ASC, DESC at line 1, char 38`},
{s: `SELECT field1 FROM myseries ORDER BY 1`, err: `found 1, expected identifier, ASC, DESC at line 1, char 38`},
{s: `SELECT field1 FROM myseries ORDER BY time ASC,`, err: `found EOF, expected identifier at line 1, char 47`},
{s: `SELECT field1 AS`, err: `found EOF, expected identifier at line 1, char 18`},
{s: `SELECT field1 FROM 12`, err: `found 12, expected identifier at line 1, char 20`},
{s: `SELECT 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 FROM myseries`, err: `unable to parse integer at line 1, char 8`},
{s: `SELECT 10.5h FROM myseries`, err: `found h, expected FROM at line 1, char 12`},
{s: `SELECT value > 2 FROM cpu`, err: `invalid operator > in SELECT field, only support +-*/`},
{s: `SELECT value = 2 FROM cpu`, err: `invalid operator = in SELECT field, only support +-*/`},
}
for i, tt := range tests {
if tt.skip {
continue
}
p := sp.NewParser(strings.NewReader(tt.s))
stmt, err := p.ParseStatement()
if !reflect.DeepEqual(tt.err, errstring(err)) {
t.Errorf("%d. %q: error mismatch:\n exp=%s\n got=%s\n\n", i, tt.s, tt.err, err)
} else if tt.err == "" {
if !reflect.DeepEqual(tt.stmt, stmt) {
t.Logf("\n# %s\nexp=%s\ngot=%s\n", tt.s, mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt))
t.Logf("\nSQL exp=%s\nSQL got=%s\n", tt.stmt.String(), stmt.String())
t.Errorf("%d. %q\n\nstmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt)
} else {
stmt2, err := sp.ParseStatement(stmt.String())
if err != nil {
t.Errorf("%d. %q: unable to parse statement string: %s", i, stmt.String(), err)
} else if !reflect.DeepEqual(tt.stmt, stmt2) {
t.Logf("\n# %s\nexp=%s\ngot=%s\n", tt.s, mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt2))
t.Logf("\nSQL exp=%s\nSQL got=%s\n", tt.stmt.String(), stmt2.String())
t.Errorf("%d. %q\n\nstmt reparse mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt2)
}
}
}
}
}
func TestParseGroupBy(t *testing.T) {
// For use in various tests.
var tests = []struct {
s string
d string
err string
}{
{s: `SELECT sum(x) FROM Packetbeat where uid="xxx" group by tcp.src_ip`, d: `tcp.src_ip`, err: ``},
{s: `SELECT sum(x) FROM Packetbeat group by tcp.src_ip, tcp.dst_ip`, d: `tcp.src_ip, tcp.dst_ip`, err: ``},
}
for i, tt := range tests {
p := sp.NewParser(strings.NewReader(tt.s))
stmt, err := p.ParseStatement()
if !reflect.DeepEqual(tt.err, errstring(err)) {
t.Errorf("%d. %q: error mismatch:\n exp=%s\n got=%s\n\n", i, tt.s, tt.err, err)
}
d := stmt.(*sp.SelectStatement).Dimensions.String()
if d != tt.d {
t.Errorf("%d. %q: error mismatch:\n exp=%s\n got=%s\n\n", i, tt.s, tt.d, d)
}
}
}
// Ensure the parser can only parse select statement
func TestParseStatement(t *testing.T) {
// For use in various tests.
var tests = []struct {
s string
stmt sp.Statement
err string
}{
// Errors
{s: ``, err: `found EOF, expected SELECT at line 1, char 1`},
{s: `CREATE`, err: `found CREATE, expected SELECT at line 1, char 1`},
{s: `SELECT sum(x) FROM Packetbeat`, err: ``},
}
for i, tt := range tests {
p := sp.NewParser(strings.NewReader(tt.s))
_, err := p.ParseStatement()
if !reflect.DeepEqual(tt.err, errstring(err)) {
t.Errorf("%d. %q: error mismatch:\n exp=%s\n got=%s\n\n", i, tt.s, tt.err, err)
}
}
}
// Ensure the parser can parse expressions into an AST.
func TestParser_ParseExpr(t *testing.T) {
var tests = []struct {
s string
expr sp.Expr
err string
}{
// Primitives
{s: `100.0`, expr: &sp.NumberLiteral{Val: 100}},
{s: `100`, expr: &sp.IntegerLiteral{Val: 100}},
{s: `'foo bar'`, expr: &sp.StringLiteral{Val: "foo bar"}},
{s: `true`, expr: &sp.BooleanLiteral{Val: true}},
{s: `false`, expr: &sp.BooleanLiteral{Val: false}},
{s: `my_ident`, expr: &sp.VarRef{Val: "my_ident", Segments: []string{"my_ident"}}},
// Simple binary expression
{
s: `1 + 2`,
expr: &sp.BinaryExpr{
Op: sp.ADD,
LHS: &sp.IntegerLiteral{Val: 1},
RHS: &sp.IntegerLiteral{Val: 2},
},
},
// Binary expression with LHS precedence
{
s: `1 * 2 + 3`,
expr: &sp.BinaryExpr{
Op: sp.ADD,
LHS: &sp.BinaryExpr{
Op: sp.MUL,
LHS: &sp.IntegerLiteral{Val: 1},
RHS: &sp.IntegerLiteral{Val: 2},
},
RHS: &sp.IntegerLiteral{Val: 3},
},
},
// Binary expression with RHS precedence
{
s: `1 + 2 * 3`,
expr: &sp.BinaryExpr{
Op: sp.ADD,
LHS: &sp.IntegerLiteral{Val: 1},
RHS: &sp.BinaryExpr{
Op: sp.MUL,
LHS: &sp.IntegerLiteral{Val: 2},
RHS: &sp.IntegerLiteral{Val: 3},
},
},
},
// Binary expression with LHS paren group.
{
s: `(1 + 2) * 3`,
expr: &sp.BinaryExpr{
Op: sp.MUL,
LHS: &sp.ParenExpr{
Expr: &sp.BinaryExpr{
Op: sp.ADD,
LHS: &sp.IntegerLiteral{Val: 1},
RHS: &sp.IntegerLiteral{Val: 2},
},
},
RHS: &sp.IntegerLiteral{Val: 3},
},
},
// Binary expression with no precedence, tests left associativity.
{
s: `1 * 2 * 3`,
expr: &sp.BinaryExpr{
Op: sp.MUL,
LHS: &sp.BinaryExpr{
Op: sp.MUL,
LHS: &sp.IntegerLiteral{Val: 1},
RHS: &sp.IntegerLiteral{Val: 2},
},
RHS: &sp.IntegerLiteral{Val: 3},
},
},
// Binary expression with regex.
{
s: `region =~ /us.*/`,
expr: &sp.BinaryExpr{
Op: sp.EQREGEX,
LHS: &sp.VarRef{Val: "region", Segments: []string{"region"}},
RHS: &sp.RegexLiteral{Val: regexp.MustCompile(`us.*`)},
},
},
// Binary expression with quoted '/' regex.
{
s: `url =~ /http\:\/\/www\.example\.com/`,
expr: &sp.BinaryExpr{
Op: sp.EQREGEX,
LHS: &sp.VarRef{Val: "url", Segments: []string{"url"}},
RHS: &sp.RegexLiteral{Val: regexp.MustCompile(`http\://www\.example\.com`)},
},
},
// Complex binary expression.
{
s: `value + 3 < 30 AND 1 + 2 OR true`,
expr: &sp.BinaryExpr{
Op: sp.OR,
LHS: &sp.BinaryExpr{
Op: sp.AND,
LHS: &sp.BinaryExpr{
Op: sp.LT,
LHS: &sp.BinaryExpr{
Op: sp.ADD,
LHS: &sp.VarRef{Val: "value", Segments: []string{"value"}},
RHS: &sp.IntegerLiteral{Val: 3},
},
RHS: &sp.IntegerLiteral{Val: 30},
},
RHS: &sp.BinaryExpr{
Op: sp.ADD,
LHS: &sp.IntegerLiteral{Val: 1},
RHS: &sp.IntegerLiteral{Val: 2},
},
},
RHS: &sp.BooleanLiteral{Val: true},
},
},
// Function call (empty)
{
s: `my_func()`,
expr: &sp.Call{
Name: "my_func",
},
},
// Function call (multi-arg)
{
s: `my_func(1, 2 + 3)`,
expr: &sp.Call{
Name: "my_func",
Args: []sp.Expr{
&sp.IntegerLiteral{Val: 1},
&sp.BinaryExpr{
Op: sp.ADD,
LHS: &sp.IntegerLiteral{Val: 2},
RHS: &sp.IntegerLiteral{Val: 3},
},
},
},
},
}
for i, tt := range tests {
expr, err := sp.NewParser(strings.NewReader(tt.s)).ParseExpr()
if !reflect.DeepEqual(tt.err, errstring(err)) {
t.Errorf("%d. %q: error mismatch:\n exp=%s\n got=%s\n\n", i, tt.s, tt.err, err)
} else if tt.err == "" && !reflect.DeepEqual(tt.expr, expr) {
t.Errorf("%d. %q\n\nexpr mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.expr, expr)
}
}
}
// Ensure a string can be quoted.
func TestQuote(t *testing.T) {
for i, tt := range []struct {
in string
out string
}{
{``, `''`},
{`foo`, `'foo'`},
{"foo\nbar", `'foo\nbar'`},
{`foo bar\\`, `'foo bar\\\\'`},
{`'foo'`, `'\'foo\''`},
} {
if out := sp.QuoteString(tt.in); tt.out != out {
t.Errorf("%d. %s: mismatch: %s != %s", i, tt.in, tt.out, out)
}
}
}
// Ensure an identifier's segments can be quoted.
func TestQuoteIdent(t *testing.T) {
for i, tt := range []struct {
ident []string
s string
}{
{[]string{``}, `""`},
{[]string{`select`}, `"select"`},
{[]string{`in-bytes`}, `"in-bytes"`},
{[]string{`foo`, `bar`}, `"foo".bar`},
{[]string{`foo`, ``, `bar`}, `"foo"..bar`},
{[]string{`foo bar`, `baz`}, `"foo bar".baz`},
{[]string{`foo.bar`, `baz`}, `"foo.bar".baz`},
{[]string{`foo.bar`, `rp`, `baz`}, `"foo.bar"."rp".baz`},
{[]string{`foo.bar`, `rp`, `1baz`}, `"foo.bar"."rp"."1baz"`},
} {
if s := sp.QuoteIdent(tt.ident...); tt.s != s {
t.Errorf("%d. %s: mismatch: %s != %s", i, tt.ident, tt.s, s)
}
}
}
// MustParseSelectStatement parses a select statement. Panic on error.
func MustParseSelectStatement(s string) *sp.SelectStatement {
stmt, err := sp.NewParser(strings.NewReader(s)).ParseStatement()
if err != nil {
panic(err)
}
return stmt.(*sp.SelectStatement)
}
// MustParseExpr parses an expression. Panic on error.
func MustParseExpr(s string) sp.Expr {
expr, err := sp.NewParser(strings.NewReader(s)).ParseExpr()
if err != nil {
fmt.Println(s)
panic(err)
}
return expr
}
// mustMarshalJSON encodes a value to JSON.
func mustMarshalJSON(v interface{}) []byte {
b, err := json.MarshalIndent(v, "", " ")
if err != nil {
panic(err)
}
return b
}
func intptr(v int) *int {
return &v
}
func BenchmarkParseStatement1(b *testing.B) {
b.ReportAllocs()
s := `SELECT count(field) FROM series WHERE value > 10`
for i := 0; i < b.N; i++ {
if stmt, err := sp.NewParser(strings.NewReader(s)).ParseStatement(); err != nil {
b.Fatalf("unexpected error: %s", err)
} else if stmt == nil {
b.Fatalf("expected statement: %s", stmt)
} else {
_ = stmt.String()
}
}
// b.SetBytes(int64(len(s)))
}
func BenchmarkParseStatement2(b *testing.B) {
b.ReportAllocs()
s := "select max(tcp.in_pkts) from packetbeat where guid = 'for a test you know'"
for i := 0; i < b.N; i++ {
if stmt, err := sp.NewParser(strings.NewReader(s)).ParseStatement(); err != nil {
b.Fatalf("unexpected error: %s", err)
} else if stmt == nil {
b.Fatalf("expected statement: %s", stmt)
}
}
b.SetBytes(int64(len(s)))
}
|
package mymap
import (
"fmt"
"strconv"
"sync"
"testing"
)
func TestInitMap(t *testing.T) {
m1 := map[int]int{1: 1, 2: 4, 3: 9}
t.Log(m1)
t.Log(m1[0], m1[1], m1[2], m1[3])
t.Logf("len m1=%d", len(m1))
m2 := map[int]int{}
m2[4] = 16
t.Logf("len m2=%d", len(m2))
m3 := make(map[int]int, 10)
t.Logf("len m2=%d", len(m3))
}
func TestAccessNotExistingKey(t *testing.T) {
m1 := map[int]int{}
t.Log(m1[1])
m1[2] = 0
t.Log(m1[2])
m1[3] = 0
if v, ok := m1[3]; ok {
t.Logf("key s is %d", v)
} else {
t.Log("key s is not existing")
}
}
func TestTravelMap(t *testing.T) {
m1 := map[int]int{1: 1, 2: 4, 3: 9}
for k, v := range m1 {
t.Log(k, v)
}
}
func TestMapWithFuncValue(t *testing.T) {
m := map[int]func(op int) int{}
m[1] = func(op int) int { return op }
m[2] = func(op int) int { return op * op }
m[3] = func(op int) int { return op * op * op }
t.Log(m[1](1), m[2](2), m[3](3))
}
func TestMapForSet(t *testing.T) {
mySet := map[int]bool{}
mySet[1] = true
n := 3
if mySet[n] {
t.Logf("%d is existing", n)
} else {
t.Logf("%d is not existing", n)
}
mySet[3] = true
t.Log(mySet, 1)
n = 1
if mySet[n] {
t.Logf("%d is existing", n)
} else {
t.Logf("%d is not existing", n)
}
}
func TestMapfuncvalue(t *testing.T) {
Mapfuncvalue()
}
func TestMapvaluenil(t *testing.T) {
Mapvaluenil()
}
func TestMapvaluenil1(t *testing.T) {
Mapvaluenil1()
}
func TestNonConcurrentMap(t *testing.T) {
// fatal error: concurrent map read and map write
m := make(map[int]int)
go func() {
for {
m[1] = 1
}
}()
go func() {
for {
_ = m[1]
}
}()
for {
}
}
func TestConcurrentMap(t *testing.T) {
var scene sync.Map
i := 0
for i < 1000 {
scene.Store("creece"+strconv.Itoa(i), i)
// scene.Store("creece", 98)
// scene.Store("egypt", 200)
// scene.Store("London", 201)
// if count, ok := scene.Load("London"); ok {
// newCount := fmt.Sprintf("%d", count)
// counts, _ := strconv.Atoi(newCount)
// t.Log(ok, counts-1, reflect.TypeOf(count))
// }
counts := ParseSyncMap(&scene, "London")
if counts > 0 {
t.Log(counts)
}
// scene.Delete("London")
// scene.Store("London", 201)
scene.Range(func(k, v interface{}) bool {
fmt.Printf("iterator: %v %v\n", k, v)
return true
})
i++
}
}
func ParseSyncMap(p *sync.Map, key string) int {
if count, ok := p.Load(key); ok {
newCount := fmt.Sprintf("%d", count)
counts, _ := strconv.Atoi(newCount)
return counts
}
return 0
}
|
// Copyright 2016 Lennart Espe. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE.md file.
package lib
import (
"errors"
"io/ioutil"
"net/http"
"net/url"
"path/filepath"
"strings"
)
// Origin is a accessable data store.
type Origin interface {
// Scan fetches the stored HashSet and source name from the origin.
// It may return an error if the set is not accessible.
Scan() (HashSet, string, error)
// Get fetches a file specified by its path from the origin.
// It may return an error if the file is not accessible.
Get(file string) ([]byte, error)
}
// HTTPOrigin has a HTTP back-end.
type HTTPOrigin struct {
Base string
}
func (h HTTPOrigin) Get(file string) ([]byte, error) {
requestURL, err := url.Parse(h.Base)
if err != nil {
log.Error("url parse:", err)
return []byte{}, err
}
requestURL.Path = filepath.Join(requestURL.Path, file)
resp, err := http.Get(requestURL.String())
if err != nil {
log.Warning("http request:", err)
return []byte{}, err
}
buf, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Warning("http transmission:", err)
return []byte{}, err
}
return buf, nil
}
func (h HTTPOrigin) Scan() (HashSet, string, error) {
data, err := h.Get(PatchFile)
if err != nil {
log.Warning("http scan:", err)
return HashSet{}, "", err
}
items, source := Parse(string(data))
log.Notice("fetched", len(items), "from http origin")
return items, source, nil
}
// FileOrigin has a filesystem back-end.
type FileOrigin struct {
Path string
}
func (origin FileOrigin) Scan() (HashSet, string, error) {
data, err := origin.Get(PatchFile)
if err != nil {
log.Warning("local scan:", err)
return HashSet{}, "", nil
}
items, source := Parse(string(data))
log.Notice("fetched", len(items), "from local origin")
return items, source, nil
}
func (origin FileOrigin) Get(file string) ([]byte, error) {
data, err := ioutil.ReadFile(filepath.Join(origin.Path, file))
if err != nil {
log.Warning("local file:", err)
return []byte{}, err
}
return data, nil
}
// GetOrigin finds a fitting origin based on the path.
// It may return an error if there if the path pattern is unknown.
func GetOrigin(path string) (Origin, error) {
if strings.HasPrefix(path, "http") {
return &HTTPOrigin{path}, nil
} else if strings.HasPrefix(path, "/") {
return &FileOrigin{path}, nil
} else {
return nil, errors.New("unknown origin: " + path)
}
}
|
package models
import (
"time"
)
// UserDetail : user_detailテーブルモデル
type UserDetail struct {
ID int64
User int64
UserName string
Icon int
UpdateAt time.Time
}
|
package cmd
import (
"fmt"
"io"
"net"
"os"
"github.com/google/uuid"
"github.com/grandcat/zeroconf"
"github.com/spf13/cobra"
)
var recvCmd = &cobra.Command{
Use: "recv <id>",
Short: "`recv` output over the network",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
id := args[0]
ln, err := net.Listen("tcp", ":0")
if err != nil {
return err
}
defer ln.Close()
instance, _ := uuid.NewRandom()
service := fmt.Sprintf("%s.%s", id, DefaultServiceRoot)
port := ln.Addr().(*net.TCPAddr).Port
server, err := zeroconf.Register(instance.String(), service, DefaultDomain, port, nil, nil)
if err != nil {
return err
}
defer server.Shutdown()
conn, err := ln.Accept()
if err != nil {
return err
}
defer conn.Close()
_, err = io.Copy(os.Stdout, conn)
return err
},
}
|
package dao
import "TruckMonitor-Backend/model"
type ContractDao interface {
FindById(id int) (*model.Contract, error)
FindDetails(contractId int) ([]*model.ContractDetail, error)
}
|
package main
import (
"github.com/vugu/vugu"
"github.com/powerman/tr/web/app/internal/app"
"github.com/powerman/tr/web/app/internal/wire"
)
func vuguSetup(buildEnv *vugu.BuildEnv, eventEnv vugu.EventEnv) vugu.Builder {
appl := app.New()
buildEnv.SetWireFunc(func(b vugu.Builder) {
if c, ok := b.(wire.ApplWirer); ok {
c.WireAppl(appl)
}
})
ret := &Root{}
buildEnv.WireComponent(ret)
appl.Initialize()
return ret
}
|
package day14
import (
"fmt"
"math"
"regexp"
"strconv"
"strings"
"github.com/kdeberk/advent-of-code/2019/internal/utils"
)
type reaction struct {
product string
quantity int
required map[string]int
}
func makeReaction(product string, required int) reaction {
return reaction{product, required, make(map[string]int, 0)}
}
type quantified_name struct {
quantity int
name string
}
func parseLine(line string) ([]quantified_name, error) {
r := regexp.MustCompile(`,\s|\s=>\s`)
result := make([]quantified_name, 0)
for _, item := range r.Split(line, -1) {
split := strings.Split(item, " ")
quantity, err := strconv.Atoi(split[0])
if err != nil {
return result, err
}
result = append(result, quantified_name{quantity, split[1]})
}
return result, nil
}
func readReactions(filename string) (map[string]reaction, error) {
lines, err := utils.ReadLines(filename)
if err != nil {
return map[string]reaction{}, err
}
reactions := make(map[string]reaction, len(lines))
for _, line := range lines {
quantified, err := parseLine(line)
if err != nil {
return reactions, err
}
product := quantified[len(quantified)-1]
required := quantified[0 : len(quantified)-1]
reactions[product.name] = makeReaction(product.name, product.quantity)
for _, resource := range required {
reactions[product.name].required[resource.name] = resource.quantity
}
}
return reactions, nil
}
func createDependencyHierarchy(reactions map[string]reaction) [][]string {
deps := map[string]int{"FUEL": 0}
queue := []string{"FUEL"}
maxDepth := 0
var current string
for 0 < len(queue) {
current, queue = queue[0], queue[1:]
depth := deps[current]
if maxDepth < depth {
maxDepth = depth
}
for child, _ := range reactions[current].required {
if child_depth, ok := deps[child]; ok && depth < child_depth {
// If child already exists in tree and is deep enough, then skip
continue
}
deps[child] = depth + 1
queue = append(queue, child)
}
}
levels := make([][]string, maxDepth+1)
for level := 0; level <= maxDepth; level++ {
levels[level] = []string{}
}
for resource, level := range deps {
levels[level] = append(levels[level], resource)
}
return levels
}
func requiredOreForFuel(nFuel int, reactions map[string]reaction) int {
hier := createDependencyHierarchy(reactions)
needed := map[string]int{}
for resource := range reactions {
needed[resource] = 0
}
needed["FUEL"] = nFuel
for _, level := range hier[:len(hier)-1] {
for _, resource := range level {
n_reactions := int(math.Ceil(float64(needed[resource]) / float64(reactions[resource].quantity)))
for dep, amount := range reactions[resource].required {
needed[dep] += n_reactions * amount
}
}
}
return needed["ORE"]
}
func part1(reactions map[string]reaction) int {
return requiredOreForFuel(1, reactions)
}
func part2(reactions map[string]reaction) int {
oneFuel := requiredOreForFuel(1, reactions)
ratio := 10e12 / oneFuel
// Don't ask me, I don't know how this works...
return int(float64(ratio) * (float64(10e11) / float64(requiredOreForFuel(ratio, reactions))))
}
func Solve() error {
reactions, err := readReactions("./input/14.txt")
if err != nil {
return err
}
fmt.Println("Day 14, Part 1. Calculate the minimum amount of ORE needed to produce 1 FUEL")
fmt.Println(" ", part1(reactions))
fmt.Println("Day 14, Part 2. Calculate the amount of FUEL that can be produced given 10**12 ORE")
fmt.Println(" ", part2(reactions))
return nil
}
|
package middleware
import (
"context"
"net/http"
)
const _avAPIKeyHeader = "x-av-access-key"
type contextKey int
const _avAPIKeyContextKey = 0
// AVAPIKeymiddleware simply looks at the expected API Key header for AV products
// and if an API Key is found it is placed into the context of the request
func AVAPIKeyMiddleware() func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
apiKey := r.Header.Get(_avAPIKeyHeader)
// If the header is empty then continue
if apiKey == "" {
next.ServeHTTP(w, r)
return
}
// Put the api key found into the context and continue
ctx := r.Context()
ctx = context.WithValue(ctx, _avAPIKeyContextKey, apiKey)
ctx = context.WithValue(ctx, "passed-auth-check", "true")
next.ServeHTTP(w, r.WithContext(ctx))
return
})
}
}
// GetAVAPIKey gets an AV API key from a context if it exists
func GetAVAPIKey(ctx context.Context) (string, bool) {
apiKey, ok := ctx.Value(_avAPIKeyContextKey).(string)
return apiKey, ok
}
|
package bo
type GetJob struct {
Current int `json:"current"`
Size int `json:"size"`
Pages int `json:"pages"`
Total int `json:"total"`
Orders []Order `json:"orders"`
Records []*GetJobList `json:"records"`
}
type GetJobList struct {
Id int `json:"id"` //Id
JobSort int `json:"jobSort"` //排序
CreateBy int `json:"createBy"` //创建人
UpdateBy int `json:"updateBy"` //更新人
CreateTime int64 `json:"createTime"` //创建时间
UpdateTime int64 `json:"updateTime"` //更新时间
Enabled bool `json:"enabled"` //状态:1启用(默认)、0禁用
Name string `json:"name"` //岗位名称
}
type JobListDownload struct {
Name string `json:"name, horizontal,omitempty"`
Enabled string `json:"enabled, horizontal,omitempty"`
CreateTime string `json:"createTime, horizontal,omitempty"`
}
|
package main
import (
"github.com/gallo-cedrone/fromgotok8s/src/externalservice"
"github.com/magiconair/properties/assert"
"io/ioutil"
"net/http"
"testing"
"time"
)
func TestMainFunction(t *testing.T) {
config()
server := startServer(externalservice.MockGoogleDependency{})
defer server.Shutdown(nil)
time.Sleep(500 * time.Millisecond)
resp, err := http.Get("http://localhost:8080")
if err != nil {
t.Fatal(err.Error())
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err.Error())
}
assert.Equal(t, string(data), "https://google.com answered with statusCode: 200")
}
|
package table
import (
"fmt"
"io"
"strconv"
"strings"
"time"
"github.com/makkes/gitlab-cli/api"
)
func pad(s string, width int) string {
if width < 0 {
return s
}
return fmt.Sprintf(fmt.Sprintf("%%-%ds", width), s)
}
func calcProjectColumnWidths(ps []api.Project) map[string]int {
res := make(map[string]int)
res["id"] = 15
res["name"] = 40
res["url"] = 50
res["clone"] = 50
for _, p := range ps {
w := len(strconv.Itoa(p.ID))
if w > res["id"] {
res["id"] = w
}
w = len(p.Name)
if w > res["name"] {
res["name"] = w
}
w = len(p.URL)
if w > res["url"] {
res["url"] = w
}
w = len(p.SSHGitURL)
if w > res["clone"] {
res["clone"] = w
}
}
return res
}
func calcJobsColumnWidths() map[string]int {
res := make(map[string]int)
res["id"] = 20
res["status"] = 20
res["stage"] = 10
return res
}
func calcPipelineColumnWidths(pipelines []api.PipelineDetails, now time.Time) map[string]int {
res := make(map[string]int)
res["id"] = 20
res["status"] = 20
res["duration"] = 10
res["started_at"] = 25
res["url"] = 50
for _, p := range pipelines {
w := len(fmt.Sprintf("%d:%d", p.ProjectID, p.ID))
if w > res["id"] {
res["id"] = w
}
w = len(p.Status)
if w > res["status"] {
res["status"] = w
}
w = len(p.Duration(now))
if w > res["duration"] {
res["duration"] = w
}
w = len(p.URL)
if w > res["url"] {
res["url"] = w
}
}
return res
}
func calcIssueColumnWidths(issues []api.Issue) map[string]int {
res := make(map[string]int)
res["id"] = 20
res["title"] = 30
res["state"] = 10
res["url"] = 50
for _, i := range issues {
w := len(fmt.Sprintf("%d:%d", i.ProjectID, i.ID))
if w > res["id"] {
res["id"] = w
}
w = len(i.State)
if w > res["state"] {
res["state"] = w
}
w = len(i.URL)
if w > res["url"] {
res["url"] = w
}
}
return res
}
func calcVarColumnWidths(vars []api.Var) map[string]int {
res := make(map[string]int)
res["key"] = 20
res["value"] = 40
res["protected"] = 9
res["environment_scope"] = 11
for _, v := range vars {
w := len(v.Key)
if w > res["key"] {
res["key"] = w
}
w = len(v.Value)
if w > res["value"] {
res["value"] = w
}
w = len(v.EnvironmentScope)
if w > res["environment_scope"] {
res["environment_scope"] = w
}
}
return res
}
func PrintJobs(jobs api.Jobs) {
widths := calcJobsColumnWidths()
fmt.Printf("%s %s %s\n",
pad("ID", widths["id"]),
pad("STATUS", widths["status"]),
pad("STAGE", widths["stage"]))
for _, j := range jobs {
fmt.Printf("%s %s %s\n",
pad(fmt.Sprintf("%d:%d", j.ProjectID, j.ID), widths["id"]),
pad(j.Status, widths["status"]),
pad(j.Stage, widths["stage"]))
}
}
func PrintPipelines(ps []api.PipelineDetails) {
widths := calcPipelineColumnWidths(ps, time.Now())
fmt.Printf("%s %s %s %s %s\n",
pad("ID", widths["id"]),
pad("STATUS", widths["status"]),
pad("DURATION", widths["duration"]),
pad("STARTED AT", widths["started_at"]),
pad("URL", widths["url"]))
for _, p := range ps {
fmt.Printf("%s %s %s %s %s\n",
pad(fmt.Sprintf("%d:%d", p.ProjectID, p.ID), widths["id"]),
pad(p.Status, widths["status"]),
pad(p.Duration(time.Now()), widths["duration"]),
pad(p.StartedAt.Format("2006-01-02 15:04:05 MST"), widths["started_at"]),
pad(p.URL, widths["url"]))
}
}
func PrintProjects(out io.Writer, ps []api.Project) {
widths := calcProjectColumnWidths(ps)
fmt.Fprintf(out, "%s %s %s %s\n",
pad("ID", widths["id"]),
pad("NAME", widths["name"]),
pad("URL", widths["url"]),
pad("CLONE", widths["clone"]))
for _, p := range ps {
fmt.Fprintf(out, "%s %s %s %s\n",
pad(strconv.Itoa(p.ID), widths["id"]),
pad(p.Name, widths["name"]),
pad(p.URL, widths["url"]),
pad(p.SSHGitURL, widths["clone"]))
}
}
func calcProjectAccessTokenColumnWidths(atl []api.ProjectAccessToken) map[string]int {
res := make(map[string]int)
res["id"] = 10
res["name"] = 20
res["expires"] = 15
res["scopes"] = 5
for _, t := range atl {
w := len(fmt.Sprintf("%d", t.ID))
if w > res["id"] {
res["id"] = w
}
w = len(t.Name)
if w > res["name"] {
res["name"] = w
}
w = len(t.ExpiresAt.Format(time.Stamp))
if w > res["expires"] {
res["expires"] = w
}
w = len(strings.Join(t.Scopes, ","))
if w > res["scopes"] {
res["scopes"] = w
}
}
return res
}
func PrintProjectAccessTokens(out io.Writer, atl []api.ProjectAccessToken) {
widths := calcProjectAccessTokenColumnWidths(atl)
fmt.Fprintf(out, "%s %s %s %s\n",
pad("ID", widths["id"]),
pad("NAME", widths["name"]),
pad("EXPIRES AT", widths["expires"]),
pad("SCOPES", widths["scopes"]),
)
for _, t := range atl {
name := t.Name
if len(name) > widths["name"] {
name = name[0:widths["name"]-1] + "…"
}
fmt.Fprintf(out, "%s %s %s %s\n",
pad(fmt.Sprintf("%d", t.ID), widths["id"]),
pad(name, widths["name"]),
pad(t.ExpiresAt.Format(time.Stamp), widths["expires"]),
pad(strings.Join(t.Scopes, ","), widths["scopes"]),
)
}
}
func PrintIssues(out io.Writer, issues []api.Issue) {
widths := calcIssueColumnWidths(issues)
fmt.Fprintf(out, "%s %s %s %s\n",
pad("ID", widths["id"]),
pad("TITLE", widths["title"]),
pad("STATE", widths["state"]),
pad("URL", widths["url"]))
for _, i := range issues {
title := i.Title
if len(title) > widths["title"] {
title = title[0:widths["title"]-1] + "…"
}
fmt.Fprintf(out, "%s %s %s %s\n",
pad(fmt.Sprintf("%d:%d", i.ProjectID, i.ID), widths["id"]),
pad(title, widths["title"]),
pad(i.State, widths["state"]),
pad(i.URL, widths["url"]))
}
}
func PrintVars(out io.Writer, vars []api.Var) {
widths := calcVarColumnWidths(vars)
fmt.Fprintf(out, "%s %s %s %s\n",
pad("KEY", widths["key"]),
pad("VALUE", widths["value"]),
pad("PROTECTED", widths["protected"]),
pad("ENVIRONMENT", widths["environment_scope"]))
for _, v := range vars {
fmt.Fprintf(out, "%s %s %s %s\n",
pad(v.Key, widths["key"]),
pad(v.Value, widths["value"]),
pad(fmt.Sprintf("%t", v.Protected), widths["protected"]),
pad(v.EnvironmentScope, widths["environment_scope"]))
}
}
|
package main
import (
"fmt"
"net"
"os"
_ "github.com/lib/pq"
"database/sql"
"strconv"
)
const (
CONN_HOST = "localhost"
CONN_PORT = "3333"
CONN_TYPE = "tcp"
DB_TYPE = "postgres"
DB_NAME = "testdb" // TODO change
DB_USER = "postgres"
DB_PSWD = "postgres"
DB_HOST = "localhost"
DB_QUERY = "SELECT id FROM testtable WHERE id=$1" // TODO change
)
type Record struct {
id int
}
func main() {
// listen incoming connections.
l, err := net.Listen(CONN_TYPE, CONN_HOST + ":" + CONN_PORT)
checkError(err, "Listening error: %s")
// close the listener when the application closes.
defer l.Close()
fmt.Println("Listening on " + CONN_HOST + ":" + CONN_PORT)
for {
// listen an incoming connection
conn, err := l.Accept()
checkError(err, "Accepting error: %s")
// handle connections in a new goroutine
go handleRequest(conn)
}
}
func handleRequest(conn net.Conn) {
// make a buffer to hold incoming data
buf := make([]byte, 1024)
// read the incoming connection into the buffer
reqLen, err := conn.Read(buf)
checkError(err, "Incoming connection reading error: %s")
receivedData := string(buf[:reqLen])
fmt.Println(receivedData)
data := getDeviceData("1")
// send a response
conn.Write([]byte(strconv.Itoa(data.id)))
conn.Close()
}
func getDeviceData(id string) Record {
// open databse connection.
db, err := sql.Open(DB_TYPE, "user="+DB_USER+" dbname="+DB_NAME+" password="+DB_PSWD+" host="+DB_HOST+" sslmode=disable")
checkError(err, "Opening database connection is failed: %s")
err = db.Ping()
checkError(err, "Establishing database connection is failed: %s")
query, err := db.Prepare(DB_QUERY)
checkError(err, "Prepareing query statement is failed: %s")
var r Record
err = query.QueryRow(id).Scan(&r.id)
checkError(err, "Getting data is failed: %s")
db.Close()
return r
}
func checkError(err error, msg string) {
if err != nil {
//fmt.Fprintf(os.Stderr, msg, err.Error())
fmt.Println(msg, err.Error())
os.Exit(1)
}
}
|
package main
import (
github "../../github"
ospaf "../../lib"
)
func main() {
}
|
package main
import (
"fmt"
"reflect"
)
func main() {
v := 3
p := &v
fmt.Printf("%p\n", p) //指针变量p, 保存变量v的地址
fmt.Printf("%v\n", *p) //通过指针变量p, 访问变量v对应内存
p1 := &p
fmt.Printf("%p\n", p1) //指针变量p1, 保存指针变量p的地址
fmt.Printf("%p\n", *p1) //通过指针变量p1, 访问指针变量p对应内存
fmt.Println(reflect.TypeOf(*p1))
fmt.Println(reflect.TypeOf(*p1).Kind())
fmt.Printf("%d\n", **p1) //通过指针变量*p1, 访问变量v对应内存
}
|
package cmd
import (
"fmt"
"log"
"github.com/lhopki01/dirin/internal/config"
"github.com/spf13/cobra"
"github.com/spf13/viper"
yaml "gopkg.in/yaml.v3"
)
func registerCreateCmd(rootCmd *cobra.Command) {
createCmd := &cobra.Command{
Use: "create <collection name>",
Short: "Create a collection of directories for subsequent commands to run against",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
runCreateCmd(args[0])
},
}
rootCmd.AddCommand(createCmd)
err := viper.BindPFlags(createCmd.Flags())
if err != nil {
log.Fatalf("Binding flags failed: %s", err)
}
viper.AutomaticEnv()
}
func runCreateCmd(collection string) {
fmt.Printf("Creating collection %s\n", collection)
c := &config.Collection{
Name: collection,
}
_, err := yaml.Marshal(c)
if err != nil {
log.Fatal(err)
}
err = c.CreateCollection()
if err != nil {
log.Fatal(err)
}
runSwitchCmd(collection)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.