text stringlengths 11 4.05M |
|---|
/**
ๅบๅๆ้ฟ้ๅขๅญๅบๅ้ฟๅบฆ
*/
package backTrack
type SubqueryMax struct {
arr []int
cnt int
}
func newSubqueryMax(arr []int, cnt int) *SubqueryMax {
return &SubqueryMax{
arr: arr,
cnt: cnt,
}
}
func (sm *SubqueryMax) getMax(i int) int {
if i == 0 {
return 1
}
var length int
if sm.arr[i] >= sm.arr[i-1] {
length = sm.getMax(i-1) + 1
} else {
length = sm.getMax(i - 1)
}
return length
}
|
package routes
import (
"encoding/json"
"net/http"
"time"
"github.com/gorilla/mux"
"github.com/jinzhu/gorm"
"github.com/winded/tyomaa/backend/db"
"github.com/winded/tyomaa/backend/middleware"
"github.com/winded/tyomaa/backend/util"
"github.com/winded/tyomaa/backend/util/context"
"github.com/winded/tyomaa/shared/api"
)
func ClockRoutes(router *mux.Router) {
router.Use(middleware.Authorization)
router.Path("").Methods(http.MethodGet).HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer util.HandleApiError(w)
ctx := context.Get(r)
var activeEntry db.TimeEntry
err := db.Instance.First(&activeEntry, `user_id = ? AND "end" IS NULL`, ctx.User.ID).Error
if gorm.IsRecordNotFoundError(err) {
panic(api.ClockActiveEntryNotFoundErr)
} else if err != nil {
panic(err)
}
var response api.ClockGetResponse
if activeEntry.ID != 0 {
response.Entry = activeEntry.ToApiFormat()
}
json.NewEncoder(w).Encode(response)
})
router.Path("/start").Methods(http.MethodPost).HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer util.HandleApiError(w)
ctx := context.Get(r)
var request api.ClockStartPostRequest
if err := json.NewDecoder(r.Body).Decode(&request); err != nil {
panic(api.Error(http.StatusBadRequest, "Malformed body"))
}
if !util.ValidateNameIdentifier(request.Project) {
panic(api.Error(http.StatusBadRequest, "Project: "+util.NameIdentifierError))
}
activeEntry := db.TimeEntry{
UserID: ctx.User.ID,
Project: request.Project,
Start: time.Now(),
End: nil,
}
if err := db.Instance.Save(&activeEntry).Error; err != nil {
panic(err)
}
json.NewEncoder(w).Encode(api.ClockStartPostResponse{
Entry: activeEntry.ToApiFormat(),
})
})
router.Path("/stop").Methods(http.MethodPost).HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer util.HandleApiError(w)
ctx := context.Get(r)
var activeEntry db.TimeEntry
err := db.Instance.Where(`user_id = ? AND "end" IS NULL`, ctx.User.ID).First(&activeEntry).Error
if gorm.IsRecordNotFoundError(err) {
panic(api.ClockActiveEntryNotFoundErr)
} else if err != nil {
panic(err)
}
end := time.Now()
activeEntry.End = &end
if err := db.Instance.Save(&activeEntry).Error; err != nil {
panic(err)
}
json.NewEncoder(w).Encode(api.ClockStopPostResponse{
Entry: activeEntry.ToApiFormat(),
})
})
}
|
package main
import (
"io/ioutil"
"os"
"strings"
"text/template"
"github.com/aymerick/douceur/css"
"github.com/aymerick/douceur/parser"
"gopkg.in/alecthomas/kingpin.v3-unstable"
"github.com/alecthomas/chroma/v2"
)
const (
outputTemplate = `package styles
import (
"github.com/alecthomas/chroma/v2"
)
// {{.Name}} style.
var {{.Name}} = Register(chroma.MustNewStyle("{{.Name|Lower}}", chroma.StyleEntries{
{{- range .Rules}}
{{- if .Prelude|TokenType}}
chroma.{{.Prelude|TokenType}}: "{{.Declarations|TranslateDecls}}",
{{- end}}
{{- end}}
}))
`
)
var (
typeByClass = map[string]chroma.TokenType{
".hll": chroma.Background,
}
cssNamedColours = map[string]string{
"black": "#000000", "silver": "#c0c0c0", "gray": "#808080", "white": "#ffffff",
"maroon": "#800000", "red": "#ff0000", "purple": "#800080", "fuchsia": "#ff00ff",
"green": "#008000", "lime": "#00ff00", "olive": "#808000", "yellow": "#ffff00",
"navy": "#000080", "blue": "#0000ff", "teal": "#008080", "aqua": "#00ffff",
"orange": "#ffa500", "aliceblue": "#f0f8ff", "antiquewhite": "#faebd7", "aquamarine": "#7fffd4",
"azure": "#f0ffff", "beige": "#f5f5dc", "bisque": "#ffe4c4", "blanchedalmond": "#ffebcd",
"blueviolet": "#8a2be2", "brown": "#a52a2a", "burlywood": "#deb887", "cadetblue": "#5f9ea0",
"chartreuse": "#7fff00", "chocolate": "#d2691e", "coral": "#ff7f50", "cornflowerblue": "#6495ed",
"cornsilk": "#fff8dc", "crimson": "#dc143c", "cyan": "#00ffff", "darkblue": "#00008b",
"darkcyan": "#008b8b", "darkgoldenrod": "#b8860b", "darkgray": "#a9a9a9", "darkgreen": "#006400",
"darkgrey": "#a9a9a9", "darkkhaki": "#bdb76b", "darkmagenta": "#8b008b", "darkolivegreen": "#556b2f",
"darkorange": "#ff8c00", "darkorchid": "#9932cc", "darkred": "#8b0000", "darksalmon": "#e9967a",
"darkseagreen": "#8fbc8f", "darkslateblue": "#483d8b", "darkslategray": "#2f4f4f", "darkslategrey": "#2f4f4f",
"darkturquoise": "#00ced1", "darkviolet": "#9400d3", "deeppink": "#ff1493", "deepskyblue": "#00bfff",
"dimgray": "#696969", "dimgrey": "#696969", "dodgerblue": "#1e90ff", "firebrick": "#b22222",
"floralwhite": "#fffaf0", "forestgreen": "#228b22", "gainsboro": "#dcdcdc", "ghostwhite": "#f8f8ff",
"gold": "#ffd700", "goldenrod": "#daa520", "greenyellow": "#adff2f", "grey": "#808080",
"honeydew": "#f0fff0", "hotpink": "#ff69b4", "indianred": "#cd5c5c", "indigo": "#4b0082",
"ivory": "#fffff0", "khaki": "#f0e68c", "lavender": "#e6e6fa", "lavenderblush": "#fff0f5",
"lawngreen": "#7cfc00", "lemonchiffon": "#fffacd", "lightblue": "#add8e6", "lightcoral": "#f08080",
"lightcyan": "#e0ffff", "lightgoldenrodyellow": "#fafad2", "lightgray": "#d3d3d3", "lightgreen": "#90ee90",
"lightgrey": "#d3d3d3", "lightpink": "#ffb6c1", "lightsalmon": "#ffa07a", "lightseagreen": "#20b2aa",
"lightskyblue": "#87cefa", "lightslategray": "#778899", "lightslategrey": "#778899", "lightsteelblue": "#b0c4de",
"lightyellow": "#ffffe0", "limegreen": "#32cd32", "linen": "#faf0e6", "magenta": "#ff00ff",
"mediumaquamarine": "#66cdaa", "mediumblue": "#0000cd", "mediumorchid": "#ba55d3", "mediumpurple": "#9370db",
"mediumseagreen": "#3cb371", "mediumslateblue": "#7b68ee", "mediumspringgreen": "#00fa9a", "mediumturquoise": "#48d1cc",
"mediumvioletred": "#c71585", "midnightblue": "#191970", "mintcream": "#f5fffa", "mistyrose": "#ffe4e1",
"moccasin": "#ffe4b5", "navajowhite": "#ffdead", "oldlace": "#fdf5e6", "olivedrab": "#6b8e23",
"orangered": "#ff4500", "orchid": "#da70d6", "palegoldenrod": "#eee8aa", "palegreen": "#98fb98",
"paleturquoise": "#afeeee", "palevioletred": "#db7093", "papayawhip": "#ffefd5", "peachpuff": "#ffdab9",
"peru": "#cd853f", "pink": "#ffc0cb", "plum": "#dda0dd", "powderblue": "#b0e0e6",
"rosybrown": "#bc8f8f", "royalblue": "#4169e1", "saddlebrown": "#8b4513", "salmon": "#fa8072",
"sandybrown": "#f4a460", "seagreen": "#2e8b57", "seashell": "#fff5ee", "sienna": "#a0522d",
"skyblue": "#87ceeb", "slateblue": "#6a5acd", "slategray": "#708090", "slategrey": "#708090",
"snow": "#fffafa", "springgreen": "#00ff7f", "steelblue": "#4682b4", "tan": "#d2b48c",
"thistle": "#d8bfd8", "tomato": "#ff6347", "turquoise": "#40e0d0", "violet": "#ee82ee",
"wheat": "#f5deb3", "whitesmoke": "#f5f5f5", "yellowgreen": "#9acd32", "rebeccapurple": "#663399",
}
nameArg = kingpin.Arg("name", "Name of output style.").Required().String()
fileArg = kingpin.Arg("stylesheets", ".css file to import").Required().ExistingFile()
)
func init() {
for tt, str := range chroma.StandardTypes {
typeByClass["."+str] = tt
}
}
func translateDecls(decls []*css.Declaration) string {
out := []string{}
for _, decl := range decls {
switch decl.Property {
case "color":
clr := decl.Value
if c, ok := cssNamedColours[clr]; ok {
clr = c
}
out = append(out, clr)
case "background-color":
out = append(out, "bg:"+decl.Value)
case "font-style":
if strings.Contains(decl.Value, "italic") {
out = append(out, "italic")
}
case "font-weight":
if strings.Contains(decl.Value, "bold") {
out = append(out, "bold")
}
case "text-decoration":
if strings.Contains(decl.Value, "underline") {
out = append(out, "underline")
}
}
}
return strings.Join(out, " ")
}
func main() {
kingpin.Parse()
source, err := ioutil.ReadFile(*fileArg)
kingpin.FatalIfError(err, "")
css, err := parser.Parse(string(source))
kingpin.FatalIfError(err, "")
context := map[string]interface{}{
"Name": *nameArg,
"Rules": css.Rules,
}
tmpl := template.Must(template.New("style").Funcs(template.FuncMap{
"Lower": strings.ToLower,
"TranslateDecls": translateDecls,
"TokenType": func(s string) chroma.TokenType { return typeByClass[s] },
}).Parse(outputTemplate))
err = tmpl.Execute(os.Stdout, context)
kingpin.FatalIfError(err, "")
}
|
package main
import (
"io/ioutil"
"log"
"os"
"testing"
"time"
)
func TestExecute(t *testing.T) {
defer quiet()()
loop := &Loop{}
loop.Commands = Commands{
{Exec: "true"},
}
ok, err := loop.Execute()
if !ok {
t.Error("not ok")
}
if err != nil {
t.Fatal(err)
}
}
func TestExecuteFail(t *testing.T) {
defer quiet()()
loop := &Loop{}
loop.Commands = Commands{
{Exec: "false"},
{Exec: "true"},
}
ok, err := loop.Execute()
if ok {
t.Error("ok")
}
if err != nil {
t.Fatal(err)
}
}
func TestExecuteInvalid(t *testing.T) {
defer quiet()()
loop := &Loop{}
loop.Commands = Commands{
{Exec: "abcdefghijklmnopqrstuwxyz"},
}
ok, err := loop.Execute()
if ok {
t.Error("ok")
}
if err == nil {
t.Error("no error")
}
}
func TestStartStopTerminated(t *testing.T) {
defer quiet()()
loop := &Loop{}
loop.Command = &Command{
Exec: "true",
}
err := loop.Start()
if err != nil {
t.Fatal(err)
}
_, err = loop.Stop()
if err != nil {
t.Fatal(err)
}
}
func TestStartStopDaemon(t *testing.T) {
defer quiet()()
loop := &Loop{}
loop.Command = &Command{
Exec: "sleep",
Args: []string{"1m"},
}
err := loop.Start()
if err != nil {
t.Fatal(err)
}
_, err = loop.Stop()
if err != nil {
t.Fatal(err)
}
}
func TestStartStopInvalid(t *testing.T) {
defer quiet()()
loop := &Loop{}
loop.Command = &Command{
Exec: "abcdefghijklmnopqrstuwxyz",
}
err := loop.Start()
if err == nil {
t.Fatal("error expected")
}
_, err = loop.Stop()
if err != nil {
t.Fatal(err)
}
}
func TestWatch(t *testing.T) {
defer quiet()()
loop := &Loop{
Include: Patterns{"*"},
}
wait := make(chan error)
go func() {
for {
ioutil.WriteFile("test", []byte{}, 0644)
time.Sleep(1 * time.Millisecond)
}
}()
go func() {
err := loop.Watch()
wait <- err
close(wait)
}()
var err error
select {
case err = <-wait:
case <-time.After(1 * time.Second):
t.Fatal("timeout")
}
if err != nil {
t.Fatal(err)
}
}
func TestEnv(t *testing.T) {
defer quiet()()
loop := &Loop{}
loop.Commands = Commands{
{
Env: map[string]string{"TEST": "x"},
Exec: "bash",
Args: []string{"-uc", "test x=$TEST"},
},
}
ok, err := loop.Execute()
if !ok {
t.Fatal("not ok")
}
if err != nil {
t.Fatal(err)
}
}
func quiet() func() {
null, _ := os.Open(os.DevNull)
sout := os.Stdout
serr := os.Stderr
os.Stdout = null
os.Stderr = null
log.SetOutput(null)
return func() {
defer null.Close()
os.Stdout = sout
os.Stderr = serr
log.SetOutput(os.Stderr)
}
}
|
/*
* Copyright 2020 The Magma Authors.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package service_registry
const (
ServiceName = "service_registry"
// ServiceRegistryMode:
ServiceRegistryModeEnvVar = "SERVICE_REGISTRY_MODE"
// TODO: Move service registry modes to orc8r/lib
DockerServiceRegistry = "docker"
K8sServiceRegistry = "k8s"
YamlServiceRegistry = "yaml"
// TODO: Move standardized ports to service creation
HttpServerPort = 8080
GrpcServicePort = 9180
)
|
package libldbrest
import (
"net/http"
"sync/atomic"
)
// An atomic.Value that deals specifically with http.Handlers, and which can act
// as an http.Handler itself by grabbing and running the currently held Handler.
type SwappableHandler struct {
holder atomic.Value
}
func (sh *SwappableHandler) Store(handler http.Handler) {
sh.holder.Store(&handlerWrapper{handler})
}
func (sh *SwappableHandler) Load() http.Handler {
return sh.holder.Load().(http.Handler)
}
func (sh *SwappableHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
sh.Load().ServeHTTP(w, r)
}
// For a given atomic.Value, only instances of the same type can be Store()d,
// and this specifically applies to the concrete type (they can't just be the
// same interface type). So this is a thin http.Handler wrapper that serves as
// the single concrete type for SwappableHandler's atomic.Value.
type handlerWrapper struct {
http.Handler
}
|
package h_test
import (
"errors"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/josephspurrier/h"
"github.com/stretchr/testify/assert"
)
func TestFResponseOK(t *testing.T) {
mux := http.NewServeMux()
mux.Handle("/", h.F(func(w http.ResponseWriter, r *http.Request) (int, error) {
return http.StatusOK, nil
}))
r := httptest.NewRequest("GET", "/", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, r)
assert.Equal(t, http.StatusOK, w.Code)
assert.Equal(t, w.Body.String(), "")
}
func TestFResponseInternalServerError(t *testing.T) {
mux := http.NewServeMux()
mux.Handle("/", h.F(func(w http.ResponseWriter, r *http.Request) (int, error) {
return http.StatusInternalServerError, errors.New("error happened")
}))
r := httptest.NewRequest("GET", "/", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, r)
assert.Equal(t, http.StatusInternalServerError, w.Code)
assert.Contains(t, w.Body.String(), "error happened")
}
func TestServeHTTPResponseOK(t *testing.T) {
r := httptest.NewRequest("GET", "/", nil)
w := httptest.NewRecorder()
h.ServeHTTP(w, r, http.StatusOK, nil)
assert.Equal(t, http.StatusOK, w.Code)
assert.Equal(t, w.Body.String(), "")
}
func TestServeHTTPResponseIgnoreLessThan200(t *testing.T) {
r := httptest.NewRequest("GET", "/", nil)
w := httptest.NewRecorder()
h.ServeHTTP(w, r, http.StatusCreated, nil)
assert.Equal(t, http.StatusOK, w.Code)
assert.Equal(t, w.Body.String(), "")
}
func TestServeHTTPResponseErrorExist(t *testing.T) {
r := httptest.NewRequest("GET", "/", nil)
w := httptest.NewRecorder()
h.ServeHTTP(w, r, http.StatusInternalServerError, errors.New("error happened"))
assert.Equal(t, http.StatusInternalServerError, w.Code)
assert.Contains(t, w.Body.String(), "error happened")
}
func TestServeHTTPResponseErrorEmpty(t *testing.T) {
r := httptest.NewRequest("GET", "/", nil)
w := httptest.NewRecorder()
h.ServeHTTP(w, r, http.StatusInternalServerError, nil)
assert.Equal(t, http.StatusInternalServerError, w.Code)
assert.Equal(t, w.Body.String(), "\n")
}
func TestChangeServeHTTP(t *testing.T) {
mux := http.NewServeMux()
h.ServeHTTP = func(w http.ResponseWriter, r *http.Request, status int, err error) {
if status == 200 {
fmt.Fprint(w, "changed")
}
}
mux.Handle("/", h.F(func(w http.ResponseWriter, r *http.Request) (int, error) {
return http.StatusOK, nil
}))
r := httptest.NewRequest("GET", "/", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, r)
assert.Equal(t, http.StatusOK, w.Code)
assert.Equal(t, w.Body.String(), "changed")
}
|
package gateway
import (
"errors"
"net/http"
"github.com/aws/aws-lambda-go/events"
)
type APIRouter struct {
tree map[string]ResourceMap
}
var errHandleNotFound = errors.New("handler not found")
type HandlerAPIFunc func(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error)
type Resource struct {
handler HandlerAPIFunc
}
type ResourceMap map[string]Resource
func NewAPIRouter() APIRouter {
return APIRouter{tree: map[string]ResourceMap{}}
}
func (r APIRouter) Get(path string, handler HandlerAPIFunc) {
if resource, ok := r.tree[path]; ok {
resource[http.MethodGet] = Resource{handler: handler}
} else {
r.tree[path] = ResourceMap{http.MethodGet: Resource{handler: handler}}
}
}
func (r APIRouter) Post(path string, handler HandlerAPIFunc) {
if resource, ok := r.tree[path]; ok {
resource[http.MethodPost] = Resource{handler: handler}
} else {
r.tree[path] = ResourceMap{http.MethodPost: Resource{handler: handler}}
}
}
func (r APIRouter) Put(path string, handler HandlerAPIFunc) {
if resource, ok := r.tree[path]; ok {
resource[http.MethodPut] = Resource{handler: handler}
} else {
r.tree[path] = ResourceMap{http.MethodPut: Resource{handler: handler}}
}
}
func (r APIRouter) Delete(path string, handler HandlerAPIFunc) {
if resource, ok := r.tree[path]; ok {
resource[http.MethodDelete] = Resource{handler: handler}
} else {
r.tree[path] = ResourceMap{http.MethodDelete: Resource{handler: handler}}
}
}
func (r APIRouter) GetResource(path, method string) (*Resource, error) {
if resourceMap, ok := r.tree[path]; ok {
if resource, ok := resourceMap[method]; ok {
return &resource, nil
}
}
return nil, errHandleNotFound
}
|
// Copyright (c) KwanJunWen
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
package estemplate
import "fmt"
// TokenFilterMultiplexer token filter that will emit multiple tokens at the same position, each
// version of the token having been run through a different filter. Identical output tokens at the
// same position will be removed.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/analysis-multiplexer-tokenfilter.html
// for details.
type TokenFilterMultiplexer struct {
TokenFilter
name string
// fields specific to multiplexer token filter
filters []string
preserveOriginal *bool
}
// NewTokenFilterMultiplexer initializes a new TokenFilterMultiplexer.
func NewTokenFilterMultiplexer(name string) *TokenFilterMultiplexer {
return &TokenFilterMultiplexer{
name: name,
filters: make([]string, 0),
}
}
// Name returns field key for the Token Filter.
func (m *TokenFilterMultiplexer) Name() string {
return m.name
}
// Filters sets a list of token filters to apply to incoming tokens. These can be any token filters
// defined elsewhere in the index mappings. Filters can be chained using comma-delimited string,
// so for example "lowercase, porter_stem" would apply `lowercase` filter and then the `porter_stem`
// filter to a single token.
func (m *TokenFilterMultiplexer) Filters(filters ...string) *TokenFilterMultiplexer {
m.filters = append(m.filters, filters...)
return m
}
// PreserveOriginal sets whether to emit the original token in addition to the filtered tokens.
// Defaults to true.
func (m *TokenFilterMultiplexer) PreserveOriginal(preserveOriginal bool) *TokenFilterMultiplexer {
m.preserveOriginal = &preserveOriginal
return m
}
// Validate validates TokenFilterMultiplexer.
func (m *TokenFilterMultiplexer) Validate(includeName bool) error {
var invalid []string
if includeName && m.name == "" {
invalid = append(invalid, "Name")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields or invalid values: %v", invalid)
}
return nil
}
// Source returns the serializable JSON for the source builder.
func (m *TokenFilterMultiplexer) Source(includeName bool) (interface{}, error) {
// {
// "test": {
// "type": "multiplexer",
// "filters": ["lowercase", "lowercase, porter_stem"],
// "preserve_original": true
// }
// }
options := make(map[string]interface{})
options["type"] = "multiplexer"
if len(m.filters) > 0 {
options["filters"] = m.filters
}
if m.preserveOriginal != nil {
options["preserve_original"] = m.preserveOriginal
}
if !includeName {
return options, nil
}
source := make(map[string]interface{})
source[m.name] = options
return source, nil
}
|
package hrtime
import (
"time"
)
var nanoOverhead time.Duration
// Overhead returns approximate overhead for a call to Now() or Since()
func Overhead() time.Duration { return nanoOverhead }
// Since returns time.Duration since start
func Since(start time.Duration) time.Duration { return Now() - start }
func calculateNanosOverhead() {
start := Now()
for i := 0; i < calibrationCalls; i++ {
Now()
}
stop := Now()
nanoOverhead = (stop - start) / (calibrationCalls + 1)
}
|
package main
import (
"net/http"
_ "net/http/pprof"
"os"
optimizer "github.com/rmanzoku/go-next-image-optimizer"
)
var (
version = "none"
revision = "none"
port = "9900"
imageSrc = ""
)
func init() {
if os.Getenv("PORT") != "" {
port = os.Getenv("PORT")
}
if os.Getenv("IMAGE_SRC") != "" {
imageSrc = os.Getenv("IMAGE_SRC")
} else {
panic("IMAGE_SRC environment value is not found")
}
}
func main() {
o := optimizer.NewOptimizer(imageSrc)
http.HandleFunc("/", o.Handler)
http.ListenAndServe(":"+port, nil)
}
|
package store
/*!
MIT License
Copyright (c) 2016 json-iterator
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
// Copied from
// https://github.com/json-iterator/go/blob/master/extra/privat_fields.go
// and made possible to re-use in a particular api config
import (
"strings"
"unicode"
jsoniter "github.com/json-iterator/go"
)
type privateFieldsExtension struct {
jsoniter.DummyExtension
}
func (extension *privateFieldsExtension) UpdateStructDescriptor(structDescriptor *jsoniter.StructDescriptor) {
for _, binding := range structDescriptor.Fields {
isPrivate := unicode.IsLower(rune(binding.Field.Name()[0]))
if isPrivate {
tag, hastag := binding.Field.Tag().Lookup("json")
if !hastag {
binding.FromNames = []string{binding.Field.Name()}
binding.ToNames = []string{binding.Field.Name()}
continue
}
tagParts := strings.Split(tag, ",")
names := calcFieldNames(binding.Field.Name(), tagParts[0], tag)
binding.FromNames = names
binding.ToNames = names
}
}
}
func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string {
// ignore?
if wholeTag == "-" {
return []string{}
}
// rename?
var fieldNames []string
if tagProvidedFieldName == "" {
fieldNames = []string{originalFieldName}
} else {
fieldNames = []string{tagProvidedFieldName}
}
// private?
isNotExported := unicode.IsLower(rune(originalFieldName[0]))
if isNotExported {
fieldNames = []string{}
}
return fieldNames
}
|
package rcap
import (
"net/http"
"testing"
)
func TestDefaultRules(t *testing.T) {
rules := defaultHTTPRules()
t.Run("http allowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil)
if err := rules.requestIsAllowed(req); err != nil {
t.Error("error occurred, should not have")
}
})
t.Run("https allowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "https://example.com", nil)
if err := rules.requestIsAllowed(req); err != nil {
t.Error("error occurred, should not have")
}
})
t.Run("IP allowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://10.11.12.13", nil)
if err := rules.requestIsAllowed(req); err != nil {
t.Error("error occurred, should not have")
}
})
}
func TestAllowedDomains(t *testing.T) {
rules := defaultHTTPRules()
rules.AllowedDomains = []string{"example.com", "another.com", "*.hello.com", "tomorrow.*", "10.*.12.13", "example.com:8080"}
t.Run("example.com:8080 allowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://example.com:8080", nil)
if err := rules.requestIsAllowed(req); err != nil {
t.Error("error occurred, should not have")
}
})
t.Run("example.com:8081 disallowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://example.com:8081", nil)
if err := rules.requestIsAllowed(req); err == nil {
t.Error("error did not occur, should have")
}
})
t.Run("example.com allowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil)
if err := rules.requestIsAllowed(req); err != nil {
t.Error("error occurred, should not have")
}
})
t.Run("another.com allowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://another.com", nil)
if err := rules.requestIsAllowed(req); err != nil {
t.Error("error occurred, should not have")
}
})
t.Run("wildcard allowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://goodbye.hello.com", nil)
if err := rules.requestIsAllowed(req); err != nil {
t.Error("error occurred, should not have")
}
})
t.Run("double wildcard allowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://goodmorning.goodbye.hello.com", nil)
if err := rules.requestIsAllowed(req); err != nil {
t.Error("error occurred, should not have")
}
})
t.Run("end wildcard allowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://tomorrow.eu", nil)
if err := rules.requestIsAllowed(req); err != nil {
t.Error("error occurred, should not have")
}
})
t.Run("double end wildcard disallowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://tomorrow.co.uk", nil)
if err := rules.requestIsAllowed(req); err == nil {
t.Error("error did not occur, should have")
}
})
t.Run("athird.com disallowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://athird.com", nil)
if err := rules.requestIsAllowed(req); err == nil {
t.Error("error did not occur, should have")
}
})
t.Run("wildcard IP allowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://10.11.12.13", nil)
if err := rules.requestIsAllowed(req); err != nil {
t.Error("error occurred, should not have")
}
})
t.Run("IP disallowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://11.12.13.14", nil)
if err := rules.requestIsAllowed(req); err == nil {
t.Error("error did not occur, should have")
}
})
}
func TestBlockedDomains(t *testing.T) {
rules := defaultHTTPRules()
rules.BlockedDomains = []string{"example.com", "another.com", "*.hello.com", "tomorrow.*"}
t.Run("example.com disallowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil)
if err := rules.requestIsAllowed(req); err == nil {
t.Error("error did not occur, should have")
}
})
t.Run("another.com disallowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://another.com", nil)
if err := rules.requestIsAllowed(req); err == nil {
t.Error("error did not occur, should have")
}
})
t.Run("wildcard disallowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://goodbye.hello.com", nil)
if err := rules.requestIsAllowed(req); err == nil {
t.Error("error did not occur, should have")
}
})
t.Run("double wildcard disallowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://goodnight.goodbye.hello.com", nil)
if err := rules.requestIsAllowed(req); err == nil {
t.Error("error did not occur, should have")
}
})
t.Run("end wildcard disallowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://tomorrow.eu", nil)
if err := rules.requestIsAllowed(req); err == nil {
t.Error("error did not occur, should have")
}
})
t.Run("double end wildcard allowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://tomorrow.co.uk", nil)
if err := rules.requestIsAllowed(req); err != nil {
t.Error("error occurred, should not have")
}
})
t.Run("athird.com allowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://athird.com", nil)
if err := rules.requestIsAllowed(req); err != nil {
t.Error("error occurred, should not have")
}
})
t.Run("IP allowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://10.11.12.13", nil)
if err := rules.requestIsAllowed(req); err != nil {
t.Error("error occurred, should not have")
}
})
}
func TestDisallowedIPs(t *testing.T) {
rules := defaultHTTPRules()
rules.AllowIPs = false
t.Run("IP disallowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://10.11.12.13", nil)
if err := rules.requestIsAllowed(req); err == nil {
t.Error("error did not occur, should have")
}
})
t.Run("domain allowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://friday.com", nil)
if err := rules.requestIsAllowed(req); err != nil {
t.Error("error occurred, should not have")
}
})
}
func TestDisallowHTTP(t *testing.T) {
rules := defaultHTTPRules()
rules.AllowHTTP = false
t.Run("HTTP disallowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil)
if err := rules.requestIsAllowed(req); err == nil {
t.Error("error did not occur, should have")
}
})
t.Run("HTTPS allowed", func(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "https://friday.com", nil)
if err := rules.requestIsAllowed(req); err != nil {
t.Error("error occurred, should not have")
}
})
}
|
package simulator
import (
"encoding/json"
"log"
"sync"
"time"
"github.com/dbogatov/dac-lib/dac"
)
type transferable interface {
size() int
name() string
}
// CertificateSize ...
const CertificateSize = 734 // TODO http://fm4dd.com/openssl/certexamples.shtm
var globalBandwidthLock = &sync.Mutex{}
var bandwidthLoggingLock = &sync.Mutex{}
var getLocksLock = &sync.Mutex{}
var networkEventID uint64 = 1
var connectionsMap = make(map[string]*sync.Mutex)
func recordBandwidth(from, to string, object transferable) {
getLocks := func(key string) *sync.Mutex {
getLocksLock.Lock()
defer getLocksLock.Unlock()
lock, exists := connectionsMap[key]
if !exists {
lock = &sync.Mutex{}
connectionsMap[key] = lock
}
return lock
}
getWaitTime := func(bandwidth int) time.Duration {
return time.Duration(1000*(float64(object.size())/float64(bandwidth))) * time.Millisecond
}
fromLock := getLocks(from)
toLock := getLocks(to)
var wg sync.WaitGroup
wg.Add(3)
spinWait := func(lock *sync.Mutex, waitTime time.Duration) {
defer wg.Done()
lock.Lock()
defer lock.Unlock()
time.Sleep(waitTime)
}
waitTimeGlobal := getWaitTime(sysParams.BandwidthGlobal)
waitTimeLocal := getWaitTime(sysParams.BandwidthLocal)
start := time.Now()
go spinWait(fromLock, waitTimeLocal)
go spinWait(toLock, waitTimeLocal)
go spinWait(globalBandwidthLock, waitTimeGlobal)
wg.Wait()
end := time.Now()
bandwidthLoggingLock.Lock()
event, err := json.Marshal(NetworkEvent{
From: from,
To: to,
Object: object.name(),
Size: object.size(),
Start: start.Format(time.RFC3339Nano),
End: end.Format(time.RFC3339Nano),
LocalBandwidth: sysParams.BandwidthLocal,
GlobalBandwidth: sysParams.BandwidthGlobal,
ID: networkEventID,
})
if err != nil {
panic(err)
}
log.Printf("%s,\n", string(event))
logger.Debugf("%s sent %d bytes of %s to %s\n", from, object.size(), object.name(), to)
networkEventID++
bandwidthLoggingLock.Unlock()
}
// NetworkEvent ...
type NetworkEvent struct {
From string
To string
Object string
Size int
Start string
End string
GlobalBandwidth int
LocalBandwidth int
ID uint64
}
/// Credentials
// Credentials ...
type Credentials struct {
*dac.Credentials
}
func (creds Credentials) size() int {
return len(creds.ToBytes())
}
func (creds Credentials) name() string {
return "credentials"
}
/// CredRequest
// CredRequest ...
type CredRequest struct {
*dac.CredRequest
}
func (credReq CredRequest) size() int {
return len(credReq.ToBytes())
}
func (credReq CredRequest) name() string {
return "cred-request"
}
/// Nonce
// Nonce ...
type Nonce struct {
bytes []byte
}
func (nonce Nonce) size() int {
return len(nonce.bytes)
}
func (nonce Nonce) name() string {
return "nonce"
}
|
/*
* @lc app=leetcode id=687 lang=golang
*
* [687] Longest Univalue Path
*
* https://leetcode.com/problems/longest-univalue-path/description/
*
* algorithms
* Easy (35.88%)
* Likes: 1758
* Dislikes: 477
* Total Accepted: 94.1K
* Total Submissions: 259.8K
* Testcase Example: '[5,4,5,1,1,5]'
*
* Given a binary tree, find the length of the longest path where each node in
* the path has the same value. This path may or may not pass through the
* root.
*
* The length of path between two nodes is represented by the number of edges
* between them.
*
*
*
* Example 1:
*
* Input:
*
*
* โ 5
* โ / \
* โ 4 5
* โ / \ \
* โ 1 1 5
*
*
* Output:ย 2
*
*
*
* Example 2:
*
* Input:
*
*
* โ 1
* โ / \
* โ 4 5
* โ / \ \
* โ 4 4 5
*
*
* Output:ย 2
*
*
*
* Note: The given binary tree has not more than 10000 nodes. The height of the
* tree is not more than 1000.
*
*/
// @lc code=start
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func longestUnivaluePath(root *TreeNode) int {
return longestUnivaluePath1(root)
}
// [1,2,2,2,2], [1,null,1,1,1,1,1,1]
func longestUnivaluePath1(root *TreeNode) int {
if root == nil {
return 0
}
maxVal := 0
arrowLength(root, &maxVal)
return maxVal
}
func arrowLength(root *TreeNode, maxVal *int) int {
if root == nil {
return 0
}
left := arrowLength(root.Left, maxVal)
right := arrowLength(root.Right, maxVal)
arrowLeft, arrowRight := 0, 0
if root.Left != nil && root.Val == root.Left.Val {
arrowLeft = left + 1
}
if root.Right != nil && root.Val == root.Right.Val {
arrowRight = right + 1
}
*maxVal = max(*maxVal, arrowLeft+arrowRight)
return max(arrowLeft, arrowRight)
}
func max(val1, val2 int) int {
if val1 > val2 {
return val1
}
return val2
}
// @lc code=end |
package calendar
type OtherEvent struct {
Name string
day int
season string
}
var _ Event = (*OtherEvent)(nil)
func NewOtherEvent(name string, day int, season string) *OtherEvent {
return &OtherEvent{Name: name, day: day, season: season}
}
func (e *OtherEvent) Day() int {
return e.day
}
func (e *OtherEvent) Season() string {
return e.season
}
func (e *OtherEvent) String() string {
return e.Name
}
|
package objects
import (
"fmt"
"github.com/jecolasurdo/marsrover/pkg/spatial"
)
// ErrRoverExpelledFromEnvironment occurs if the rover's underlaying environment
// no longer recognizes the rover as existing.
func ErrRoverExpelledFromEnvironment(rover *Rover) error {
return fmt.Errorf("rover '%v' is no longer recognised by its environment", rover.ID())
}
// ErrRoverIncompatibleObjectDetected is returned if a rover detects an
// incompatible object at some position within its environment.
func ErrRoverIncompatibleObjectDetected(position spatial.Point) error {
return fmt.Errorf("an incompatible object was dectected at position '%v'", position)
}
|
// http://jan.newmarch.name/go/socket/chapter-socket.html
// http://www.dotcoo.com/golang-net-dome
package main
import (
"bytes"
// "bufio"
"fmt"
"net"
)
func main() {
conn, err := net.Dial("unix", "/tmp/socket")
if err != nil {
panic(err)
}
fmt.Fprintf(conn, "hello server\n")
// data, err := bufio.NewReader(conn).ReadString('\n')
var data bytes.Buffer
data.ReadFrom(conn)
fmt.Println("From server: ", data.String())
}
|
package stack
import "testing"
func TestFourPronged(t *testing.T) {
var s string
var rs int
// pass
s = "2+3x5-7"
// s = "2+3x5"
// s = "9+3x8/4"
// s = "3x8/4-10"
s = "3x8/4-10+16x8/4"
s = "3x8/4-10+16x3/4"
// fail
s = "3x8/4-10+16x3/4*10-19"
fp := NewFourPronged(s, len(s))
rs = fp.Operate()
t.Logf("%s=%d", s, rs)
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package conference
import (
"context"
"fmt"
"regexp"
"strconv"
"strings"
"time"
"chromiumos/tast/common/action"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/cuj"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/mouse"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/chrome/webutil"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
)
// ZoomConference implements the Conference interface.
type ZoomConference struct {
cr *chrome.Chrome
br *browser.Browser
tconn *chrome.TestConn
kb *input.KeyboardEventWriter
ui *uiauto.Context
uiHandler cuj.UIActionHandler
zoomConn *chrome.Conn
displayAllParticipantsTime time.Duration
tabletMode bool
roomType RoomType
networkLostCount int
account string
outDir string
}
// Zoom has two versions of ui that need to be captured.
const (
startVideoRegexCapture = "(Start Video|start sending my video|start my video)"
stopVideoRegexCapture = "(Stop Video|stop sending my video|stop my video)"
muteRegexCapture = "(Mute|mute).*"
unmuteRegexCapture = "(Unmute|unmute).*"
audioRegexCapture = "(" + muteRegexCapture + "|" + unmuteRegexCapture + ")"
cameraRegexCapture = "(" + startVideoRegexCapture + "|" + stopVideoRegexCapture + ")"
)
var zoomWebArea = nodewith.NameContaining("Zoom Meeting").Role(role.RootWebArea)
// Join joins a new conference room.
func (conf *ZoomConference) Join(ctx context.Context, room string, toBlur bool) error {
ui := conf.ui
openZoomAndSignIn := func(ctx context.Context) (err error) {
// Set newWindow to true to launch zoom in the first Chrome tab.
conf.zoomConn, err = conf.uiHandler.NewChromeTab(ctx, conf.br, cuj.ZoomURL, true)
if err != nil {
return errors.Wrap(err, "failed to open the zoom website")
}
if err := webutil.WaitForQuiescence(ctx, conf.zoomConn, mediumUITimeout); err != nil {
// Occasionally, there is a timeout when loading the Zoom website on Lacros, but the page actually
// has display elements. So print the error message instead of return error.
testing.ContextLogf(ctx, "Failed to wait for %q to be loaded and achieve quiescence: %q", room, err)
}
zoomMainWebArea := nodewith.NameContaining("Zoom").Role(role.RootWebArea)
zoomMainPage := nodewith.NameRegex(regexp.MustCompile("(?i)sign in|MY ACCOUNT")).Role(role.Link).Ancestor(zoomMainWebArea)
if err := ui.WithTimeout(mediumUITimeout).WaitUntilExists(zoomMainPage)(ctx); err != nil {
return errors.Wrap(err, "failed to load the zoom website")
}
// Maximize the zoom window to show all the browser UI elements for precise clicking.
if !conf.tabletMode {
// Find the zoom browser window.
window, err := ash.FindWindow(ctx, conf.tconn, func(w *ash.Window) bool {
return (w.WindowType == ash.WindowTypeBrowser || w.WindowType == ash.WindowTypeLacros) && strings.Contains(w.Title, "Zoom")
})
if err != nil {
return errors.Wrap(err, "failed to find the zoom window")
}
if err := ash.SetWindowStateAndWait(ctx, conf.tconn, window.ID, ash.WindowStateMaximized); err != nil {
// Just log the error and try to continue.
testing.ContextLog(ctx, "Try to continue the test even though maximizing the zoom window failed: ", err)
}
}
if err := ui.Exists(nodewith.Name("MY ACCOUNT").Role(role.Link))(ctx); err != nil {
testing.ContextLog(ctx, "Start to sign in")
if err := conf.zoomConn.Navigate(ctx, cuj.ZoomSignInURL); err != nil {
return err
}
account := nodewith.Name(conf.account).First()
profilePicture := nodewith.Name("Profile picture").First()
// If the DUT has only one account, it would login to profile page directly.
// Otherwise, it would show list of accounts.
if err := uiauto.Combine("sign in",
uiauto.IfSuccessThen(ui.WithTimeout(shortUITimeout).WaitUntilExists(account),
ui.LeftClickUntil(account, ui.Gone(account))),
ui.WaitUntilExists(profilePicture),
)(ctx); err != nil {
return err
}
} else {
testing.ContextLog(ctx, "It has been signed in")
}
if err := conf.zoomConn.Navigate(ctx, room); err != nil {
return err
}
return nil
}
// allowPerm allows camera and microphone if browser asks for the permissions.
allowPerm := func(ctx context.Context) error {
unableButton := nodewith.NameContaining("Unable to play media.").Role(role.Video)
// If there is an unable button, it will display a alert dialog to allow permission.
if err := ui.WithTimeout(shortUITimeout).WaitUntilGone(unableButton)(ctx); err != nil {
avPerm := nodewith.NameRegex(regexp.MustCompile(".*Use your (microphone|camera).*")).ClassName("RootView").Role(role.AlertDialog).First()
allowButton := nodewith.Name("Allow").Role(role.Button).Ancestor(avPerm)
if err := ui.WaitUntilExists(avPerm)(ctx); err == nil {
if err := uiauto.NamedCombine("allow microphone and camera permissions",
// Immediately clicking the allow button sometimes doesn't work. Sleep 2 seconds.
uiauto.Sleep(2*time.Second),
ui.LeftClick(allowButton),
ui.WaitUntilGone(avPerm),
)(ctx); err != nil {
return err
}
} else {
testing.ContextLog(ctx, "No action is required to allow microphone and camera")
}
}
return allowPagePermissions(conf.tconn)(ctx)
}
// Checks the number of participants in the conference that
// for different tiers testing would ask for different size
checkParticipantsNum := func(ctx context.Context) error {
expectedParticipants := ZoomRoomParticipants[conf.roomType]
participants, err := conf.GetParticipants(ctx)
if err != nil {
return errors.Wrap(err, "failed to get the the number of meeting participants")
}
if int(participants) != expectedParticipants {
return errors.Wrapf(err, "meeting participant number is %d but %d is expected", participants, expectedParticipants)
}
testing.ContextLog(ctx, "Current participants: ", participants)
return nil
}
joinAudio := func(ctx context.Context) error {
audioButton := nodewith.NameRegex(regexp.MustCompile(audioRegexCapture)).Role(role.Button).Focusable()
// Not every room will automatically join audio.
// If there is no automatic join audio, do join audio action.
if err := ui.WaitUntilExists(audioButton)(ctx); err == nil {
testing.ContextLog(ctx, "It has automatically joined audio")
return nil
}
joinAudioButton := nodewith.Name("Join Audio by Computer").Role(role.Button)
testing.ContextLog(ctx, "Join Audio by Computer")
return ui.WithTimeout(mediumUITimeout).LeftClickUntil(joinAudioButton, ui.WithTimeout(shortUITimeout).WaitUntilGone(joinAudioButton))(ctx)
}
startVideo := func(ctx context.Context) error {
cameraButton := nodewith.NameRegex(regexp.MustCompile(cameraRegexCapture)).Role(role.Button)
startVideoButton := nodewith.NameRegex(regexp.MustCompile(startVideoRegexCapture)).Role(role.Button)
stopVideoButton := nodewith.NameRegex(regexp.MustCompile(stopVideoRegexCapture)).Role(role.Button)
// Start video requires camera permission.
// Allow permission doesn't succeed every time. So add retry here.
return ui.Retry(retryTimes, uiauto.NamedCombine("start video",
conf.showInterface,
uiauto.NamedAction("to detect camera button within 15 seconds", ui.WaitUntilExists(cameraButton)),
// Some DUTs start playing video for the first time.
// If there is a stop video button, do nothing.
uiauto.IfSuccessThen(ui.Exists(startVideoButton),
ui.LeftClickUntil(startVideoButton, ui.WithTimeout(shortUITimeout).WaitUntilGone(startVideoButton))),
ui.WaitUntilExists(stopVideoButton),
))(ctx)
}
joinButton := nodewith.Name("Join").Role(role.Button)
video := nodewith.Role(role.Video)
joinFromYourBrowser := nodewith.Name("Join from Your Browser").Role(role.StaticText)
// There are two types of cookie accept dialogs: "ACCEPT COOKIES" and "ACCEPT ALL COOKIES".
acceptCookiesButton := nodewith.NameRegex(regexp.MustCompile("ACCEPT.*COOKIES")).Role(role.Button)
// In Zoom website, the join button may be hidden in tablet mode.
// Make it visible before clicking.
// Since ui.MakeVisible() is not always successful, add a retry here.
clickJoinButton := ui.Retry(retryTimes, uiauto.Combine("click join button",
ui.WaitForLocation(joinButton),
ui.MakeVisible(joinButton),
ui.LeftClickUntil(joinButton, ui.WithTimeout(shortUITimeout).WaitUntilGone(joinButton)),
))
waitForZoomPageToLoad := func(ctx context.Context) error {
// Use 1 minute timeout value because it may take longer to wait for page loading,
// especially for some low end DUTs.
if err := ui.WithTimeout(longUITimeout).WaitUntilExists(zoomWebArea)(ctx); err != nil {
noPermissionText := nodewith.Name("No permission. (200)").Role(role.StaticText)
if ui.Exists(noPermissionText)(ctx) == nil {
return errors.Wrap(err, `the "No Permission" problem is displayed, zoom account may require re-registration`)
}
return err
}
return nil
}
return uiauto.NamedCombine("join conference",
openZoomAndSignIn,
ui.WaitUntilExists(joinFromYourBrowser),
uiauto.IfSuccessThen(ui.WithTimeout(shortUITimeout).WaitUntilExists(acceptCookiesButton),
ui.LeftClickUntil(acceptCookiesButton, ui.WithTimeout(shortUITimeout).WaitUntilGone(acceptCookiesButton))),
ui.LeftClick(joinFromYourBrowser),
ui.WithTimeout(longUITimeout).WaitUntilExists(joinButton),
ui.WaitUntilExists(video),
allowPerm,
clickJoinButton,
waitForZoomPageToLoad,
// Sometimes participants number caught at the beginning is wrong, it will be correct after a while.
// Add retry to get the correct participants number.
ui.WithInterval(time.Second).Retry(10, checkParticipantsNum),
ui.Retry(retryTimes, joinAudio),
startVideo,
)(ctx)
}
// GetParticipants returns the number of meeting participants.
func (conf *ZoomConference) GetParticipants(ctx context.Context) (int, error) {
ui := conf.ui
participant := nodewith.NameContaining("open the participants list pane").Role(role.Button)
noParticipant := nodewith.NameContaining("[0] particpants").Role(role.Button)
if err := uiauto.NamedCombine("wait participants",
ui.WaitUntilExists(participant),
ui.WithTimeout(mediumUITimeout).WaitUntilGone(noParticipant),
)(ctx); err != nil {
return 0, errors.Wrap(err, "failed to wait participant info")
}
node, err := ui.Info(ctx, participant)
if err != nil {
return 0, errors.Wrap(err, "failed to get participant info")
}
testing.ContextLog(ctx, "Get participant info: ", node.Name)
info := strings.Split(node.Name, "[")
info = strings.Split(info[1], "]")
participants, err := strconv.ParseInt(info[0], 10, 64)
if err != nil {
return 0, errors.Wrap(err, "cannot parse number of participants")
}
return int(participants), nil
}
// SetLayoutMax sets the conference UI layout to max tiled grid.
func (conf *ZoomConference) SetLayoutMax(ctx context.Context) error {
return uiauto.Combine("set layout to max",
conf.changeLayout("Gallery View"),
uiauto.Sleep(viewingTime), // After applying new layout, give it 5 seconds for viewing before applying next one.
)(ctx)
}
// SetLayoutMin sets the conference UI layout to minimal tiled grid.
func (conf *ZoomConference) SetLayoutMin(ctx context.Context) error {
return uiauto.Combine("set layout to minimal",
conf.changeLayout("Speaker View"),
uiauto.Sleep(viewingTime), // After applying new layout, give it 5 seconds for viewing before applying next one.
)(ctx)
}
// changeLayout changes the conference UI layout.
func (conf *ZoomConference) changeLayout(mode string) action.Action {
return func(ctx context.Context) error {
ui := conf.ui
viewButton := nodewith.Name("View").Role(role.Button)
viewMenu := nodewith.Role(role.Menu).HasClass("dropdown-menu")
speakerNode := nodewith.Name("Speaker View").Role(role.MenuItem)
// Sometimes the zoom's menu disappears too fast. Add retry to check whether the device supports
// speaker and gallery view.
if err := uiauto.Combine("check view button",
conf.showInterface,
ui.LeftClickUntil(viewButton, ui.WithTimeout(shortUITimeout).WaitUntilExists(viewMenu)),
ui.WithTimeout(shortUITimeout).WaitUntilExists(speakerNode),
)(ctx); err != nil {
// Some DUTs don't support 'Speacker View' and 'Gallery View'.
testing.ContextLog(ctx, "Speaker and Gallery View is not supported on this device, ignore changing the layout")
return nil
}
modeNode := nodewith.Name(mode).Role(role.MenuItem)
actionName := "Change layout to " + mode
return ui.Retry(retryTimes, uiauto.NamedCombine(actionName,
conf.showInterface,
uiauto.IfSuccessThen(ui.Gone(modeNode), ui.LeftClick(viewButton)),
ui.LeftClick(modeNode),
))(ctx)
}
}
// VideoAudioControl controls the video and audio during conference.
func (conf *ZoomConference) VideoAudioControl(ctx context.Context) error {
ui := conf.ui
toggleVideo := func(ctx context.Context) error {
cameraButton := nodewith.NameRegex(regexp.MustCompile(cameraRegexCapture)).Role(role.Button).Focusable()
info, err := ui.Info(ctx, cameraButton)
if err != nil {
return errors.Wrap(err, "failed to wait for the meet camera switch button to show")
}
startVideoButton := nodewith.NameRegex(regexp.MustCompile(startVideoRegexCapture)).Role(role.Button).Focusable()
if err := ui.Exists(startVideoButton)(ctx); err == nil {
testing.ContextLog(ctx, "Turn camera from off to on")
} else {
testing.ContextLog(ctx, "Turn camera from on to off")
}
nowCameraButton := nodewith.Name(info.Name).Role(role.Button).Focusable()
if err := ui.WithTimeout(mediumUITimeout).DoDefaultUntil(nowCameraButton, ui.WaitUntilGone(nowCameraButton))(ctx); err != nil {
return errors.Wrap(err, "failed to switch camera")
}
return nil
}
toggleAudio := func(ctx context.Context) error {
audioButton := nodewith.NameRegex(regexp.MustCompile(audioRegexCapture)).Role(role.Button).Focusable()
info, err := ui.Info(ctx, audioButton)
if err != nil {
return errors.Wrap(err, "failed to wait for the meet microphone switch button to show")
}
unmuteButton := nodewith.NameRegex(regexp.MustCompile(unmuteRegexCapture)).Role(role.Button).Focusable()
if err := ui.Exists(unmuteButton)(ctx); err == nil {
testing.ContextLog(ctx, "Turn microphone from mute to unmute")
} else {
testing.ContextLog(ctx, "Turn microphone from unmute to mute")
}
nowAudioButton := nodewith.Name(info.Name).Role(role.Button).Focusable()
if err := ui.WithTimeout(mediumUITimeout).DoDefaultUntil(nowAudioButton, ui.WaitUntilGone(nowAudioButton))(ctx); err != nil {
return errors.Wrap(err, "failed to switch microphone")
}
return nil
}
return uiauto.Combine("toggle video and audio",
// Remain in the state for 5 seconds after each action.
toggleVideo, uiauto.Sleep(viewingTime),
toggleVideo, uiauto.Sleep(viewingTime),
toggleAudio, uiauto.Sleep(viewingTime),
toggleAudio, uiauto.Sleep(viewingTime),
)(ctx)
}
// SwitchTabs switches the chrome tabs.
func (conf *ZoomConference) SwitchTabs(ctx context.Context) error {
testing.ContextLog(ctx, "Open wiki page")
// Set newWindow to false to make the tab in the same Chrome window.
wikiConn, err := conf.uiHandler.NewChromeTab(ctx, conf.br, cuj.WikipediaURL, false)
if err != nil {
return errors.Wrap(err, "failed to open the wiki url")
}
defer wikiConn.Close()
if err := webutil.WaitForQuiescence(ctx, wikiConn, longUITimeout); err != nil {
return errors.Wrap(err, "failed to wait for wiki page to finish loading")
}
return uiauto.Combine("switch tab",
uiauto.NamedAction("stay wiki page for 3 seconds", uiauto.Sleep(3*time.Second)),
uiauto.NamedAction("switch to zoom tab", conf.uiHandler.SwitchToChromeTabByName("Zoom")),
)(ctx)
}
// TypingInChat opens chat window and type.
func (conf *ZoomConference) TypingInChat(ctx context.Context) error {
const message = "Hello! How are you?"
// Close all notifications to prevent them from covering the chat text field.
if err := ash.CloseNotifications(ctx, conf.tconn); err != nil {
return errors.Wrap(err, "failed to close otifications")
}
chatButton := nodewith.Name("open the chat pane").Role(role.Button)
chatTextField := nodewith.Name("Type message here ...").Role(role.TextField)
messageText := nodewith.Name(message).Role(role.StaticText).First()
manageChatPanel := nodewith.Name("Manage Chat Panel").Role(role.PopUpButton)
manageChatPanelMenu := nodewith.Name("Manage Chat Panel").Role(role.Menu)
closeButton := nodewith.Name("Close").Role(role.MenuItem).Ancestor(manageChatPanelMenu)
typeMessage := uiauto.NamedCombine("type message : "+message,
conf.ui.LeftClickUntil(chatTextField, conf.ui.WithTimeout(shortUITimeout).WaitUntilExists(chatTextField.Focused())),
conf.kb.AccelAction("Ctrl+A"),
conf.kb.TypeAction(message),
conf.kb.AccelAction("enter"),
conf.ui.WaitUntilExists(messageText))
return uiauto.NamedCombine("open chat window and type",
conf.ui.DoDefault(chatButton),
conf.ui.WaitUntilExists(chatTextField),
conf.ui.Retry(retryTimes, typeMessage),
uiauto.Sleep(viewingTime), // After typing, wait 5 seconds for viewing.
conf.ui.LeftClick(manageChatPanel),
conf.ui.LeftClick(closeButton),
)(ctx)
}
// BackgroundChange changes the background to patterned background and reset to none.
//
// Zoom doesn't have background blur option for web version so changing background is used to fullfil
// the requirement.
func (conf *ZoomConference) BackgroundChange(ctx context.Context) error {
const (
noneBackground = "None"
staticBackground = "San Francisco.jpg"
)
ui := conf.ui
changeBackground := func(backgroundOption string) error {
settingsButton := nodewith.Name("Settings").Role(role.Button).Ancestor(zoomWebArea)
settingsWindow := nodewith.Name("settings dialog window").Role(role.Application).Ancestor(zoomWebArea)
backgroundTab := nodewith.Name("Background").Role(role.Tab).Ancestor(settingsWindow)
backgroundItem := nodewith.NameContaining(backgroundOption).Role(role.ListBoxOption).Ancestor(settingsWindow)
closeButton := nodewith.Role(role.Button).HasClass("settings-dialog__close").Ancestor(settingsWindow)
openBackgroundPanel := func(ctx context.Context) error {
var actions []action.Action
if err := conf.showInterface(ctx); err != nil {
return err
}
if err := ui.Exists(settingsButton)(ctx); err == nil {
actions = append(actions,
uiauto.NamedAction("click settings button",
ui.WithTimeout(longUITimeout).DoDefaultUntil(settingsButton, ui.WaitUntilExists(backgroundTab)),
))
} else {
// If the screen width is not enough, the settings button will be moved to more options.
moreOptions := nodewith.Name("More meeting control").Ancestor(zoomWebArea)
moreSettingsButton := nodewith.Name("Settings").Role(role.MenuItem).Ancestor(zoomWebArea)
actions = append(actions,
uiauto.NamedAction("click more option", ui.LeftClick(moreOptions)),
uiauto.NamedAction("click settings menu item", ui.LeftClick(moreSettingsButton)),
)
}
actions = append(actions, ui.LeftClick(backgroundTab))
if err := uiauto.Combine("open background panel", actions...)(ctx); err != nil {
return errors.Wrap(err, "failed to background panel")
}
return nil
}
return uiauto.NamedCombine("change background to "+backgroundOption,
ui.Retry(retryTimes, openBackgroundPanel), // Open "Background" panel.
// Some low end DUTs need more time to load the background settings.
ui.WithTimeout(longUITimeout).DoDefaultUntil(backgroundItem,
ui.WithTimeout(shortUITimeout).WaitUntilExists(backgroundItem.Focused())),
// After applying the new background, give it 3 seconds to load the new background before closing the settings.
uiauto.Sleep(shortUITimeout),
ui.LeftClick(closeButton), // Close "Background" panel.
takeScreenshot(conf.cr, conf.outDir, fmt.Sprintf("change-background-to-background-%q", backgroundOption)),
// Double click to enter full screen.
doFullScreenAction(conf.tconn, ui.DoubleClick(zoomWebArea), "Zoom", true),
// After applying new background, give it 5 seconds for viewing before applying next one.
uiauto.Sleep(viewingTime),
// Double click to exit full screen.
doFullScreenAction(conf.tconn, ui.DoubleClick(zoomWebArea), "Zoom", false),
)(ctx)
}
if err := conf.uiHandler.SwitchToChromeTabByName("Zoom")(ctx); err != nil {
return CheckSignedOutError(ctx, conf.tconn, errors.Wrap(err, "failed to switch to zoom page"))
}
if err := changeBackground(staticBackground); err != nil {
return errors.Wrap(err, "failed to change background to static background")
}
if err := changeBackground(noneBackground); err != nil {
return errors.Wrap(err, "failed to change background to none")
}
return nil
}
// Presenting creates Google Slides and Google Docs, shares screen and presents
// the specified application to the conference.
func (conf *ZoomConference) Presenting(ctx context.Context, application googleApplication) (err error) {
tconn := conf.tconn
ui := uiauto.New(tconn)
var appTabName string
switch application {
case googleSlides:
appTabName = slideTabName
case googleDocs:
appTabName = docTabName
}
// shareScreen shares screen by "Chrome Tab" and selects the tab which is going to present.
shareScreen := func(ctx context.Context) error {
shareScreenButton := nodewith.Name("Share Screen").Role(role.StaticText)
presenMode := nodewith.Name("Chrome Tab").Role(role.Tab).ClassName("Tab")
presentTab := nodewith.ClassName("AXVirtualView").Role(role.Cell).Name(appTabName)
shareButton := nodewith.Name("Share").Role(role.Button)
stopSharing := nodewith.Name("Stop sharing").Role(role.Button).First()
return uiauto.NamedCombine("share Screen",
conf.uiHandler.SwitchToChromeTabByName("Zoom"),
conf.showInterface,
ui.LeftClickUntil(shareScreenButton, ui.WithTimeout(shortUITimeout).WaitUntilExists(presenMode)),
ui.LeftClick(presenMode),
ui.LeftClick(presentTab),
ui.LeftClick(shareButton),
ui.WithTimeout(mediumUITimeout).WaitUntilExists(stopSharing),
)(ctx)
}
stopPresenting := func(ctx context.Context) error {
stopSharing := nodewith.Name("Stop sharing").Role(role.Button).First()
return ui.LeftClickUntil(stopSharing, ui.WithTimeout(shortUITimeout).WaitUntilGone(stopSharing))(ctx)
}
// Present on internal display by default.
presentOnExtendedDisplay := false
if err := presentApps(ctx, tconn, conf.uiHandler, conf.cr, conf.br, shareScreen, stopPresenting,
application, conf.outDir, presentOnExtendedDisplay); err != nil {
return errors.Wrapf(err, "failed to present %s", string(application))
}
return nil
}
// End closes all windows in the end.
func (conf *ZoomConference) End(ctx context.Context) error {
return cuj.CloseAllWindows(ctx, conf.tconn)
}
// CloseConference closes the conference.
func (conf *ZoomConference) CloseConference(ctx context.Context) error {
if err := conf.zoomConn.CloseTarget(ctx); err != nil {
return errors.Wrap(err, "failed to close target")
}
if err := conf.zoomConn.Close(); err != nil {
return errors.Wrap(err, "failed to close connection")
}
return nil
}
var _ Conference = (*ZoomConference)(nil)
// showInterface moves mouse or taps in web area in order to make the menu interface reappear.
func (conf *ZoomConference) showInterface(ctx context.Context) error {
ui := conf.ui
information := nodewith.Name("Meeting information").Role(role.Button).Ancestor(zoomWebArea)
return testing.Poll(ctx, func(ctx context.Context) error {
if err := ui.Exists(information)(ctx); err == nil {
return nil
}
if conf.tabletMode {
testing.ContextLog(ctx, "Tap web area to show interface")
if err := conf.uiHandler.Click(zoomWebArea)(ctx); err != nil {
return errors.Wrap(err, "failed to click the web area")
}
} else {
testing.ContextLog(ctx, "Mouse move to show interface")
webAreaInfo, err := ui.Info(ctx, zoomWebArea)
if err != nil {
return err
}
if err := mouse.Move(conf.tconn, webAreaInfo.Location.TopLeft(), 200*time.Millisecond)(ctx); err != nil {
return errors.Wrap(err, "failed to move mouse to top left corner of the web area")
}
if err := ui.MouseMoveTo(zoomWebArea, 200*time.Millisecond)(ctx); err != nil {
return errors.Wrap(err, "failed to move mouse to the center of the web area")
}
}
if err := ui.WaitUntilExists(information)(ctx); err != nil {
return err
}
return nil
}, &testing.PollOptions{Timeout: mediumUITimeout})
}
// SetBrowser sets browser to chrome or lacros.
func (conf *ZoomConference) SetBrowser(br *browser.Browser) {
conf.br = br
}
// LostNetworkCount returns the count of lost network connections.
func (conf *ZoomConference) LostNetworkCount() int {
return conf.networkLostCount
}
// DisplayAllParticipantsTime returns the loading time for displaying all participants.
func (conf *ZoomConference) DisplayAllParticipantsTime() time.Duration {
return conf.displayAllParticipantsTime
}
// NewZoomConference creates Zoom conference room instance which implements Conference interface.
func NewZoomConference(cr *chrome.Chrome, tconn *chrome.TestConn, kb *input.KeyboardEventWriter,
uiHandler cuj.UIActionHandler, tabletMode bool, roomType RoomType, account, outDir string) *ZoomConference {
ui := uiauto.New(tconn)
return &ZoomConference{
cr: cr,
tconn: tconn,
kb: kb,
ui: ui,
uiHandler: uiHandler,
tabletMode: tabletMode,
roomType: roomType,
account: account,
outDir: outDir,
}
}
|
package main //linter warns here???
import (
"bufio"
"fmt"
"log"
"os"
"strconv"
"strings"
)
func main() {
file, err := os.Open("input.txt")
if err != nil {
log.Fatal(err)
}
defer file.Close()
horizontal, depth, newDepth, aim := 0, 0, 0, 0
scanner := bufio.NewScanner(file)
for scanner.Scan() {
row := scanner.Text()
parts := strings.Split(row, " ")
value, _ := strconv.Atoi(parts[1])
switch parts[0] {
case "forward":
horizontal += value
newDepth += aim * value
case "up":
depth -= value
aim -= value
case "down":
depth += value
aim += value
}
}
fmt.Printf("Horizontal position: %d\n", depth*horizontal)
fmt.Printf("Horizontal position part 2: %d\n", newDepth*horizontal)
//fmt.Println("test")
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package taskmanager contains local Tast tests that exercise task manager.
package taskmanager
|
package buildkit
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"math/rand"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/loft-sh/devspace/pkg/devspace/pipeline/env"
"mvdan.cc/sh/v3/expand"
devspacecontext "github.com/loft-sh/devspace/pkg/devspace/context"
command2 "github.com/loft-sh/utils/pkg/command"
cliconfig "github.com/docker/cli/cli/config"
"github.com/docker/docker/api/types"
"github.com/loft-sh/devspace/pkg/devspace/build/builder/helper"
"github.com/loft-sh/devspace/pkg/devspace/config/versions/latest"
dockerpkg "github.com/loft-sh/devspace/pkg/devspace/docker"
"github.com/loft-sh/devspace/pkg/devspace/kubectl"
logpkg "github.com/loft-sh/devspace/pkg/util/log"
"github.com/pkg/errors"
"k8s.io/client-go/tools/clientcmd"
)
// EngineName is the name of the building engine
const EngineName = "buildkit"
// Builder holds the necessary information to build and push docker images
type Builder struct {
helper *helper.BuildHelper
skipPush bool
skipPushOnLocalKubernetes bool
}
// NewBuilder creates a new docker Builder instance
func NewBuilder(ctx devspacecontext.Context, imageConf *latest.Image, imageTags []string, skipPush, skipPushOnLocalKubernetes bool) (*Builder, error) {
// ensure namespace
if imageConf.BuildKit != nil && imageConf.BuildKit.InCluster != nil && imageConf.BuildKit.InCluster.Namespace != "" {
err := kubectl.EnsureNamespace(ctx.Context(), ctx.KubeClient(), imageConf.BuildKit.InCluster.Namespace, ctx.Log())
if err != nil {
return nil, err
}
}
return &Builder{
helper: helper.NewBuildHelper(ctx, EngineName, imageConf, imageTags),
skipPush: skipPush,
skipPushOnLocalKubernetes: skipPushOnLocalKubernetes,
}, nil
}
// Build implements the interface
func (b *Builder) Build(ctx devspacecontext.Context) error {
return b.helper.Build(ctx, b)
}
// ShouldRebuild determines if an image has to be rebuilt
func (b *Builder) ShouldRebuild(ctx devspacecontext.Context, forceRebuild bool) (bool, error) {
// Check if image is present in local registry
imageCache, _ := ctx.Config().LocalCache().GetImageCache(b.helper.ImageConf.Name)
imageName := imageCache.ResolveImage() + ":" + imageCache.Tag
rebuild, err := b.helper.ShouldRebuild(ctx, forceRebuild)
// Check if image is present in local docker daemon
if !rebuild && err == nil && b.helper.ImageConf.BuildKit.InCluster == nil {
if b.skipPushOnLocalKubernetes && ctx.KubeClient() != nil && kubectl.IsLocalKubernetes(ctx.KubeClient()) {
dockerClient, err := dockerpkg.NewClientWithMinikube(ctx.Context(), ctx.KubeClient(), b.helper.ImageConf.BuildKit.PreferMinikube == nil || *b.helper.ImageConf.BuildKit.PreferMinikube, ctx.Log())
if err != nil {
return false, err
}
found, err := b.helper.IsImageAvailableLocally(ctx, dockerClient)
if !found && err == nil {
ctx.Log().Infof("Rebuild image %s because it was not found in local docker daemon", imageName)
return true, nil
}
}
}
return rebuild, err
}
// BuildImage builds a dockerimage with the docker cli
// contextPath is the absolute path to the context path
// dockerfilePath is the absolute path to the dockerfile WITHIN the contextPath
func (b *Builder) BuildImage(ctx devspacecontext.Context, contextPath, dockerfilePath string, entrypoint []string, cmd []string) error {
buildKitConfig := b.helper.ImageConf.BuildKit
// create the builder
builder, err := ensureBuilder(ctx.Context(), ctx.WorkingDir(), ctx.Environ(), ctx.KubeClient(), buildKitConfig, ctx.Log())
if err != nil {
return err
}
// create the context stream
body, writer, _, buildOptions, err := b.helper.CreateContextStream(contextPath, dockerfilePath, entrypoint, cmd, ctx.Log())
defer writer.Close()
if err != nil {
return err
}
// We skip pushing when it is the minikube client
usingLocalKubernetes := ctx.KubeClient() != nil && kubectl.IsLocalKubernetes(ctx.KubeClient())
if b.skipPushOnLocalKubernetes && usingLocalKubernetes {
b.skipPush = true
}
// Should we use the minikube docker daemon?
useMinikubeDocker := false
if ctx.KubeClient() != nil && kubectl.IsMinikubeKubernetes(ctx.KubeClient()) && (buildKitConfig.PreferMinikube == nil || *buildKitConfig.PreferMinikube) {
useMinikubeDocker = true
}
// Should we build with cli?
skipPush := b.skipPush || b.helper.ImageConf.SkipPush
return buildWithCLI(ctx.Context(), ctx.WorkingDir(), ctx.Environ(), body, writer, ctx.KubeClient(), builder, buildKitConfig, *buildOptions, useMinikubeDocker, skipPush, ctx.Log())
}
func buildWithCLI(ctx context.Context, dir string, environ expand.Environ, context io.Reader, writer io.Writer, kubeClient kubectl.Client, builder string, imageConf *latest.BuildKitConfig, options types.ImageBuildOptions, useMinikubeDocker, skipPush bool, log logpkg.Logger) error {
command := []string{"docker", "buildx"}
if len(imageConf.Command) > 0 {
command = imageConf.Command
}
args := []string{"build"}
if options.BuildArgs != nil {
for k, v := range options.BuildArgs {
if v == nil {
continue
}
args = append(args, "--build-arg", k+"="+*v)
}
}
if options.NetworkMode != "" {
args = append(args, "--network", options.NetworkMode)
}
for _, tag := range options.Tags {
args = append(args, "--tag", tag)
}
if !skipPush {
if len(options.Tags) > 0 {
args = append(args, "--push")
}
} else if builder != "" {
if imageConf.InCluster == nil || !imageConf.InCluster.NoLoad {
args = append(args, "--load")
}
}
if options.Dockerfile != "" {
args = append(args, "--file", options.Dockerfile)
}
if options.Target != "" {
args = append(args, "--target", options.Target)
}
if builder != "" {
tempFile, err := tempKubeContextFromClient(kubeClient)
if err != nil {
return err
}
defer os.Remove(tempFile)
args = append(args, "--builder", builder)
// TODO: find a better solution than this
// we wait here a little bit, otherwise it might be possible that we get issues during
// parallel image building, as it seems that docker buildx has problems if the
// same builder is used at the same time for multiple builds and the BuildKit deployment
// is created in parallel.
time.Sleep(time.Millisecond * time.Duration(rand.Intn(3000)+500))
}
args = append(args, imageConf.Args...)
args = append(args, "-")
log.Infof("Execute BuildKit command with: %s %s", strings.Join(command, " "), strings.Join(args, " "))
completeArgs := []string{}
completeArgs = append(completeArgs, command[1:]...)
completeArgs = append(completeArgs, args...)
var (
minikubeEnv map[string]string
err error
)
if useMinikubeDocker {
minikubeEnv, err = dockerpkg.GetMinikubeEnvironment(ctx, kubeClient.CurrentContext())
if err != nil {
return fmt.Errorf("error retrieving minikube environment with 'minikube docker-env --shell none'. Try setting the option preferMinikube to false: %v", err)
}
}
err = command2.Command(ctx, dir, env.NewVariableEnvProvider(environ, minikubeEnv), writer, writer, context, command[0], completeArgs...)
if err != nil {
return err
}
if skipPush && kubeClient != nil && kubectl.GetKindContext(kubeClient.CurrentContext()) != "" {
// Load image if it is a kind-context
for _, tag := range options.Tags {
command := []string{"kind", "load", "docker-image", "--name", kubectl.GetKindContext(kubeClient.CurrentContext()), tag}
completeArgs := []string{}
completeArgs = append(completeArgs, command[1:]...)
err = command2.Command(ctx, dir, env.NewVariableEnvProvider(environ, minikubeEnv), writer, writer, nil, command[0], completeArgs...)
if err != nil {
log.Info(errors.Errorf("error during image load to kind cluster: %v", err))
}
log.Info("Image loaded to kind cluster")
}
}
return nil
}
type NodeGroup struct {
Name string
Driver string
Nodes []Node
Dynamic bool
}
type Node struct {
Name string
Endpoint string
Platforms []interface{}
Flags []string
ConfigFile string
DriverOpts map[string]string
}
func ensureBuilder(ctx context.Context, workingDir string, environ expand.Environ, kubeClient kubectl.Client, imageConf *latest.BuildKitConfig, log logpkg.Logger) (string, error) {
if imageConf.InCluster == nil {
return "", nil
} else if kubeClient == nil {
return "", fmt.Errorf("cannot build in cluster wth build kit without a correct kubernetes context")
}
namespace := kubeClient.Namespace()
if imageConf.InCluster.Namespace != "" {
namespace = imageConf.InCluster.Namespace
}
name := "devspace-" + namespace
if imageConf.InCluster.Name != "" {
name = imageConf.InCluster.Name
}
// check if we should skip
if imageConf.InCluster.NoCreate {
return name, nil
}
command := []string{"docker", "buildx"}
if len(imageConf.Command) > 0 {
command = imageConf.Command
}
args := []string{"create", "--driver", "kubernetes", "--driver-opt", "namespace=" + namespace, "--name", name}
if imageConf.InCluster.Rootless {
args = append(args, "--driver-opt", "rootless=true")
}
if imageConf.InCluster.Image != "" {
args = append(args, "--driver-opt", "image="+imageConf.InCluster.Image)
}
if imageConf.InCluster.NodeSelector != "" {
args = append(args, "--driver-opt", "nodeselector="+imageConf.InCluster.NodeSelector)
}
if len(imageConf.InCluster.CreateArgs) > 0 {
args = append(args, imageConf.InCluster.CreateArgs...)
}
completeArgs := []string{}
completeArgs = append(completeArgs, command[1:]...)
completeArgs = append(completeArgs, args...)
// check if builder already exists
builderPath := filepath.Join(getConfigStorePath(), "instances", name)
_, err := os.Stat(builderPath)
if err == nil {
if imageConf.InCluster.NoRecreate {
return name, nil
}
// update the builder if necessary
b, err := os.ReadFile(builderPath)
if err != nil {
log.Warnf("Error reading builder %s: %v", builderPath, err)
return name, nil
}
// parse builder config
ng := &NodeGroup{}
err = json.Unmarshal(b, ng)
if err != nil {
log.Warnf("Error decoding builder %s: %v", builderPath, err)
return name, nil
}
// check for: correct driver name, driver opts
if strings.ToLower(ng.Driver) == "kubernetes" && len(ng.Nodes) == 1 {
node := ng.Nodes[0]
// check driver options
namespaceCorrect := node.DriverOpts["namespace"] == namespace
if node.DriverOpts["rootless"] == "" {
node.DriverOpts["rootless"] = "false"
}
rootlessCorrect := strconv.FormatBool(imageConf.InCluster.Rootless) == node.DriverOpts["rootless"]
imageCorrect := imageConf.InCluster.Image == node.DriverOpts["image"]
nodeSelectorCorrect := imageConf.InCluster.NodeSelector == node.DriverOpts["nodeselector"]
// if builder up to date, exit here
if namespaceCorrect && rootlessCorrect && imageCorrect && nodeSelectorCorrect {
return name, nil
}
}
// recreate the builder
log.Infof("Recreate BuildKit builder because builder options differ")
// create a temporary kube context
tempFile, err := tempKubeContextFromClient(kubeClient)
if err != nil {
return "", err
}
defer os.Remove(tempFile)
// prepare the command
rmArgs := []string{}
rmArgs = append(rmArgs, command[1:]...)
rmArgs = append(rmArgs, "rm", name)
// execute the command
out, err := command2.CombinedOutput(ctx, workingDir, env.NewVariableEnvProvider(environ, map[string]string{
"KUBECONFIG": tempFile,
}), command[0], rmArgs...)
if err != nil {
log.Warnf("error deleting BuildKit builder: %s => %v", string(out), err)
}
}
// create the builder
log.Infof("Create BuildKit builder with: %s %s", strings.Join(command, " "), strings.Join(args, " "))
// This is necessary because docker would otherwise save the used kube config
// which we don't want because we will override it with our own temp kube config
// during building.
out, err := command2.CombinedOutput(ctx, workingDir, env.NewVariableEnvProvider(environ, map[string]string{
"KUBECONFIG": "",
}), command[0], completeArgs...)
if err != nil {
if !strings.Contains(string(out), "existing instance") {
return "", fmt.Errorf("error creating BuildKit builder: %s => %v", string(out), err)
}
}
return name, nil
}
// getConfigStorePath will look for correct configuration store path;
// if `$BUILDX_CONFIG` is set - use it, otherwise use parent directory
// of Docker config file (i.e. `${DOCKER_CONFIG}/buildx`)
func getConfigStorePath() string {
if buildxConfig := os.Getenv("BUILDX_CONFIG"); buildxConfig != "" {
return buildxConfig
}
stderr := &bytes.Buffer{}
configFile := cliconfig.LoadDefaultConfigFile(stderr)
buildxConfig := filepath.Join(filepath.Dir(configFile.Filename), "buildx")
return buildxConfig
}
func tempKubeContextFromClient(kubeClient kubectl.Client) (string, error) {
rawConfig, err := kubeClient.ClientConfig().RawConfig()
if err != nil {
return "", errors.Wrap(err, "get raw kube config")
}
if !kubeClient.IsInCluster() {
rawConfig.CurrentContext = kubeClient.CurrentContext()
}
bytes, err := clientcmd.Write(rawConfig)
if err != nil {
return "", err
}
tempFile, err := os.CreateTemp("", "")
if err != nil {
return "", err
}
_, err = tempFile.Write(bytes)
if err != nil {
return "", errors.Wrap(err, "error writing to file")
}
return tempFile.Name(), nil
}
|
package models
//Response will be used to structure our http response
type Response struct {
Message string `json:"message"`
Error string `json:"error"`
Data interface{} `json:"data"`
}
|
package utils
import (
"encoding/json"
"errors"
"time"
"github.com/golang/protobuf/proto"
lpb "github.com/xuperchain/xupercore/bcs/ledger/xledger/xldgpb"
bftPb "github.com/xuperchain/xupercore/kernel/consensus/base/driver/chained-bft/pb"
bftStorage "github.com/xuperchain/xupercore/kernel/consensus/base/driver/chained-bft/storage"
"github.com/xuperchain/xupercore/kernel/contract"
)
var (
EmptyValidors = errors.New("Current validators is empty.")
NotValidContract = errors.New("Cannot get valid res with contract.")
EmptyJustify = errors.New("Justify is empty.")
InvalidJustify = errors.New("Justify structure is invalid.")
MaxMapSize = 1000
StatusOK = 200
StatusBadRequest = 400
StatusErr = 500
)
func NewContractOKResponse(json []byte) *contract.Response {
return &contract.Response{
Status: StatusOK,
Message: "success",
Body: json,
}
}
func NewContractErrResponse(status int, msg string) *contract.Response {
return &contract.Response{
Status: status,
Message: msg,
}
}
// AddressEqual ๅคๆญไธคไธชvalidatorsๅฐๅๆฏๅฆ็ธ็ญ
func AddressEqual(a []string, b []string) bool {
if len(a) != len(b) {
return false
}
for i, _ := range a {
if a[i] != b[i] {
return false
}
}
return true
}
func CleanProduceMap(isProduce map[int64]bool, period int64) {
// ๅ ้คๅทฒ็ป่ฝ็็ๆๆkey
if len(isProduce) <= MaxMapSize {
return
}
t := time.Now().UnixNano() / int64(time.Millisecond)
key := t / period
for k, _ := range isProduce {
if k <= key-int64(MaxMapSize) {
delete(isProduce, k)
}
}
}
///////////////////// lpbๅ
ผๅฎน้ป่พ /////////////////////
// ๅๅฒๅ
ฑ่ฏๅญๅจๅญๆฎต
type ConsensusStorage struct {
Justify *lpb.QuorumCert `json:"justify,omitempty"`
CurTerm int64 `json:"curTerm,omitempty"`
CurBlockNum int64 `json:"curBlockNum,omitempty"`
// TargetBits ๆฏไธไธชtrickๅฎ็ฐ
// 1. ๅจbcsๅฑไฝไธบไธไธชๅค็จๅญๆฎต๏ผ่ฎฐๅฝChainedBFTๅ็ๅๆปๆถ๏ผๅฝๅ็TipHeight๏ผๆญคๅค็จint32ไปฃๆฟint64๏ผ็่ฎบไธๅฏ่ฝ้ ๆ้่ฏฏ
TargetBits int32 `json:"targetBits,omitempty"`
}
// ParseOldQCStorage ๅฐๆJustify็ปๆ็่ๅ
ฑ่ฏ็ปๆ่งฃๆๅบๆฅ
func ParseOldQCStorage(storage []byte) (*ConsensusStorage, error) {
old := &ConsensusStorage{}
if err := json.Unmarshal(storage, &old); err != nil {
return nil, err
}
return old, nil
}
// OldQCToNew ไธบ่็QC pb็ปๆ่ฝฌๅไธบๆฐ็QC็ปๆ
func OldQCToNew(storage []byte) (bftStorage.QuorumCertInterface, error) {
oldS, err := ParseOldQCStorage(storage)
if err != nil {
return nil, err
}
oldQC := oldS.Justify
if oldQC == nil {
return nil, InvalidJustify
}
justifyBytes := oldQC.ProposalMsg
justifyQC := &lpb.QuorumCert{}
err = proto.Unmarshal(justifyBytes, justifyQC)
if err != nil {
return nil, err
}
newQC := bftStorage.NewQuorumCert(
&bftStorage.VoteInfo{
ProposalId: oldQC.ProposalId,
ProposalView: oldQC.ViewNumber,
ParentId: justifyQC.ProposalId,
ParentView: justifyQC.ViewNumber,
}, nil, OldSignToNew(storage))
return newQC, nil
}
// NewToOldQC ไธบๆฐ็QC pb็ปๆ่ฝฌๅไธบ่pb็ปๆ
func NewToOldQC(new *bftStorage.QuorumCert) (*lpb.QuorumCert, error) {
oldParentQC := &lpb.QuorumCert{
ProposalId: new.VoteInfo.ParentId,
ViewNumber: new.VoteInfo.ParentView,
}
b, err := proto.Marshal(oldParentQC)
if err != nil {
return nil, err
}
oldQC := &lpb.QuorumCert{
ProposalId: new.VoteInfo.ProposalId,
ViewNumber: new.VoteInfo.ProposalView,
ProposalMsg: b,
}
sign := NewSignToOld(new.GetSignsInfo())
ss := &lpb.QCSignInfos{
QCSignInfos: sign,
}
oldQC.SignInfos = ss
return oldQC, nil
}
// OldSignToNew ่็็ญพๅ็ปๆ่ฝฌๅไธบๆฐ็็ญพๅ็ปๆ
func OldSignToNew(storage []byte) []*bftPb.QuorumCertSign {
oldS, err := ParseOldQCStorage(storage)
if err != nil {
return nil
}
oldQC := oldS.Justify
if oldQC == nil || oldQC.GetSignInfos() == nil {
return nil
}
old := oldQC.GetSignInfos().QCSignInfos
var newS []*bftPb.QuorumCertSign
for _, s := range old {
newS = append(newS, &bftPb.QuorumCertSign{
Address: s.Address,
PublicKey: s.PublicKey,
Sign: s.Sign,
})
}
return newS
}
// NewSignToOld ๆฐ็็ญพๅ็ปๆ่ฝฌๅไธบ่็็ญพๅ็ปๆ
func NewSignToOld(new []*bftPb.QuorumCertSign) []*lpb.SignInfo {
var oldS []*lpb.SignInfo
for _, s := range new {
oldS = append(oldS, &lpb.SignInfo{
Address: s.Address,
PublicKey: s.PublicKey,
Sign: s.Sign,
})
}
return oldS
}
|
package registry
import (
"context"
"fmt"
"net/http"
"time"
)
const httpRequestTimeout = time.Second * 10
const userAgent = "dependency locker"
const npmRegistryUrl = "https://registry.npmjs.org"
const pypiRegistryUrl = "https://pypi.python.org/simple"
var cache map[string]bool
func init() {
cache = map[string]bool{}
}
func IsPypiPackageAvailableForRegistration(packageName string) (bool, error) {
url := fmt.Sprintf("%v/%v", pypiRegistryUrl, packageName)
result, found := cache[url]
if found {
return result, nil
}
result, err := isPackageAvailableForRegistration(url)
if err != nil {
cache[url] = result
}
return result, err
}
func IsNpmPackageAvailableForRegistration(packageName string) (bool, error) {
url := fmt.Sprintf("%v/%v", npmRegistryUrl, packageName)
result, found := cache[url]
if found {
return result, nil
}
result, err := isPackageAvailableForRegistration(url)
if err != nil {
cache[url] = result
}
return result, err
}
func isPackageAvailableForRegistration(url string) (bool, error) {
ctx, cancel := context.WithTimeout(context.Background(), httpRequestTimeout)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodHead, url, nil)
if err != nil {
return false, err
}
req.Header.Set("User-Agent", userAgent)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return false, err
}
isRegistered := resp.StatusCode >= http.StatusOK && resp.StatusCode < http.StatusBadRequest
return !isRegistered, nil
}
|
package main
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"io"
"net"
"os"
)
func main() {
// ๆๅกๅจIPๅฐๅๅ็ซฏๅฃ๏ผๅๅปบ้ไฟกๅฅๆฅๅญ
conn, err := net.Dial("tcp", "192.168.3.12:8989")
if err != nil {
fmt.Println("dial failed, err", err)
return
}
defer conn.Close()
// ่ทๅ็จๆท้ฎ็่พๅ
ฅ๏ผstdin๏ผ๏ผๅฐ่พๅ
ฅๆฐๆฎๅ้็ปๆๅกๅจ
go func(){
for {
str := make([]byte, 4096)
n, err := os.Stdin.Read(str)
if err != nil {
fmt.Println("os.Stdin.Read err:", err)
continue
}
//ๅ็ปๆๅกๅจ๏ผ่ฏปๅคๅฐ๏ผๅๅคๅฐ
//conn.Write(str[:n])
//็ผ็ ๅค็
data, err := Encode(string(str[:n]))
if err != nil {
fmt.Println("encode msg failed, err:", err)
return
}
conn.Write(data)
}
// ๆต่ฏ็ฒๅ
ๅๅๅ
//for i := 0; i < 20; i++ {
// msg := `Hello, Hello. How are you?`
// data, err := Encode(msg)
// if err != nil {
// fmt.Println("encode msg failed, err:", err)
// return
// }
// conn.Write(data)
//}
}()
// ๅๆพๆๅกๅจๅๅ็ๅคงไบๆฐๆฎ
//buf := make([]byte, 4096)
//n, err := conn.Read(buf)
reader := bufio.NewReader(conn)
for {
msg, err := Decode(reader)
if err == io.EOF { //io.EOFๅจ็ฝ็ป็ผ็จไธญ่กจ็คบๅฏน็ซฏๆ้พๆฅๅ
ณ้ญไบ
fmt.Println("ๆฃๆตๅฐๆๅกๅจๅทฒ็ปๅ
ณ้ญ")
return
}
//fmt.Println("err != nil:", err != nil)
if err != nil {
fmt.Println("decode msg failed, err:", err)
return
}
//fmt.Println("serverๅๆฅ็ๆฐๆฎ๏ผ", msg)
fmt.Println(msg)
}
for {
;
}
}
// Encode ๅฐๆถๆฏ็ผ็
func Encode(message string) ([]byte, error) {
// ่ฏปๅๆถๆฏ็้ฟๅบฆ๏ผ่ฝฌๆขๆint32็ฑปๅ๏ผๅ 4ไธชๅญ่๏ผ
var length = int32(len(message))
var pkg = new(bytes.Buffer)
// ๅๅ
ฅๆถๆฏๅคด
err := binary.Write(pkg, binary.LittleEndian, length)
if err != nil {
return nil, err
}
// ๅๅ
ฅๆถๆฏๅฎไฝ
err = binary.Write(pkg, binary.LittleEndian, []byte(message))
if err != nil {
return nil, err
}
return pkg.Bytes(), nil
}
// Decode ่งฃ็ ๆถๆฏ
func Decode(reader *bufio.Reader) (string, error) {
// ่ฏปๅๆถๆฏ็้ฟๅบฆ
lengthByte, _ := reader.Peek(4) // ่ฏปๅๅ4ไธชๅญ่็ๆฐๆฎ
lengthBuff := bytes.NewBuffer(lengthByte)
var length int32
err := binary.Read(lengthBuff, binary.LittleEndian, &length)
if err != nil {
return "", err
}
// Buffered่ฟๅ็ผๅฒไธญ็ฐๆ็ๅฏ่ฏปๅ็ๅญ่ๆฐใ
if int32(reader.Buffered()) < length+4 {
return "", err
}
// ่ฏปๅ็ๆญฃ็ๆถๆฏๆฐๆฎ
pack := make([]byte, int(4+length))
_, err = reader.Read(pack)
if err != nil {
return "", err
}
return string(pack[4:]), nil
}
|
/*
Links
* http://wegicel.github.com/1.html
* http://wegicel.github.com/3.html
* http://wegicel.github.com/4.html
* http://wegicel.github.com/5.html
* http://wegicel.github.com/6.html
* http://wegicel.github.com/7.html
* http://wegicel.github.com/9.html
* http://wegicel.github.com/10.html
* http://wegicel.github.com/11.html
* http://wegicel.github.com/12.html
*/
package abc
|
// https://programmers.co.kr/learn/courses/30/lessons/43165
package main
func p43165(numbers []int, target int) int {
result := 0
s(0, 0, target, numbers, &result)
return result
}
func s(depth, now, target int, numbers []int, result *int) {
if depth == len(numbers) {
if now == target {
*result++
}
return
}
s(depth+1, now+numbers[depth], target, numbers, result)
s(depth+1, now-numbers[depth], target, numbers, result)
}
// Other Solution
func sol(numbers []int, target int) int {
if len(numbers) == 0 && target == 0 {
return 1
} else if len(numbers) == 0 {
return 0
}
return sol(numbers[1:], target-numbers[0]) + sol(numbers[1:], target+numbers[0])
}
|
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT License was not distributed with this
// file, you can obtain one at https://opensource.org/licenses/MIT.
//
// Copyright (c) DUSK NETWORK. All rights reserved.
package kadcast
import (
"net"
"github.com/dusk-network/dusk-blockchain/pkg/p2p/kadcast/encoding"
"github.com/dusk-network/dusk-blockchain/pkg/p2p/peer"
"github.com/dusk-network/dusk-blockchain/pkg/p2p/wire/protocol"
"github.com/dusk-network/dusk-blockchain/pkg/util/nativeutils/eventbus"
"github.com/dusk-network/dusk-blockchain/pkg/util/nativeutils/rcudp"
)
const (
redundancyFactor = uint8(2)
)
// RaptorCodeReader is rc-udp based listener that reads Broadcast messages from
// the Kadcast network and delegates their processing to the messageRouter.
type RaptorCodeReader struct {
base *baseReader
rcUDPReader *rcudp.UDPReader
}
// NewRaptorCodeReader makes an instance of RaptorCodeReader.
func NewRaptorCodeReader(lpeerInfo encoding.PeerInfo, publisher eventbus.Publisher,
gossip *protocol.Gossip, processor *peer.MessageProcessor) *RaptorCodeReader {
// TODO: handle this by configs
lpeerInfo.Port += 10000
addr := lpeerInfo.Address()
lAddr, err := net.ResolveUDPAddr("udp4", addr)
if err != nil {
log.Panicf("invalid kadcast peer address %s", addr)
}
r := new(RaptorCodeReader)
r.base = newBaseReader(lpeerInfo, publisher, gossip, processor)
r.rcUDPReader, err = rcudp.NewUDPReader(lAddr, rcudp.MessageCollector(r.base.handleBroadcast))
if err != nil {
panic(err)
}
log.WithField("l_addr", lAddr.String()).Infoln("Starting Reader")
return r
}
// Close closes reader TCP listener.
func (r *RaptorCodeReader) Close() error {
if r.rcUDPReader != nil {
// TODO: r.rcUDPReader.Close()
}
return nil
}
// Serve starts accepting and processing TCP connection and packets.
func (r *RaptorCodeReader) Serve() {
r.rcUDPReader.Serve()
}
|
package loader
import (
"bytes"
"os"
"github.com/loft-sh/devspace/pkg/devspace/config/versions/latest"
yaml "gopkg.in/yaml.v3"
)
// Save writes the data of a config to its yaml file
func Save(path string, config *latest.Config) error {
var buffer bytes.Buffer
yamlEncoder := yaml.NewEncoder(&buffer)
yamlEncoder.SetIndent(2)
err := yamlEncoder.Encode(config)
if err != nil {
return err
}
// Path to save the configuration to
err = os.WriteFile(path, buffer.Bytes(), os.ModePerm)
if err != nil {
return err
}
return nil
}
|
package solutions
func partition(head *ListNode, x int) *ListNode {
if head == nil {
return head
}
smaller, greater := &ListNode{0, nil}, &ListNode{0, nil}
left, right := smaller, greater
for head != nil {
if head.Val < x {
left.Next = head
left = left.Next
} else {
right.Next = head
right = right.Next
}
head = head.Next
}
right.Next = nil
left.Next = greater.Next
return smaller.Next
}
|
package lib
import (
"io"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go"
"github.com/uber/jaeger-client-go/config"
"github.com/uber/jaeger-client-go/log"
"github.com/uber/jaeger-lib/metrics"
)
func CreateTracer(serviceName string) (opentracing.Tracer, io.Closer, error) {
var cfg config.Configuration
jLogger := log.StdLogger
jMetricsFactory := metrics.NullFactory
cfg.ServiceName = serviceName
return cfg.NewTracer(
config.Logger(jLogger),
config.Metrics(jMetricsFactory),
)
}
func InitGlobalTracer(serviceName string) (io.Closer, error) {
return initGlobalTracer(config.Configuration{
Sampler: &config.SamplerConfig{
Type: jaeger.SamplerTypeConst,
Param: 1,
},
Reporter: &config.ReporterConfig{
LogSpans: true,
},
}, serviceName)
}
func InitGlobalTracerProduction(serviceName string) (io.Closer, error) {
return initGlobalTracer(config.Configuration{}, serviceName)
}
func initGlobalTracer(cfg config.Configuration, serviceName string) (io.Closer, error) {
jLogger := log.StdLogger
jMetricsFactory := metrics.NullFactory
return cfg.InitGlobalTracer(
serviceName,
config.Logger(jLogger),
config.Metrics(jMetricsFactory),
)
}
|
package util
import (
"testing"
)
func TestAverage(t *testing.T) {
var v float64
input := []float64{1, 2, 3}
v = Average(input)
if v != 2 {
t.Error("Expected 2, got ", v)
}
}
func TestCallURL(t *testing.T) {
done := make(chan bool)
go CallURL(done)
v := <- done
if !v {
t.Error("Expected true, got ", v)
}
}
|
package tgo
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"regexp"
"time"
)
// ConnectionsResponse holds the response from `GET /network/connections`
type ConnectionsResponse struct {
Incoming bool `json:"incoming"`
PeerID string `json:"peer_id"`
IDPoint struct {
Address string `json:"addr"`
Port int64 `json:"port"`
} `json:"id_point"`
RemoteSocketPort int64 `json:"remote_socket_port"`
Versions []struct {
Name string `json:"name"`
Major int64 `json:"magor"`
Minor int64 `json:"miner"`
} `json:"versions"`
Private bool `json:"private"`
LocalMetadata struct {
DisableMempool bool `json:"disable_mempool"`
PrivateNode bool `json:"private_node"`
} `json:"local_metadata"`
RemoteMetadata struct {
DisableMempool bool `json:"disable_mempool"`
PrivateNode bool `json:"private_node"`
} `json:"remote_metadata"`
}
// GetConnections calls GET /network/connections
func (rpc *RPC) GetConnections() ([]ConnectionsResponse, error) {
resp, err := rpc.Client.Get(fmt.Sprintf("%s/network/connections", rpc.URL))
if err != nil {
return nil, err
}
defer resp.Body.Close()
respBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
cp := []ConnectionsResponse{}
err = json.Unmarshal(respBytes, &cp)
if err != nil {
return nil, err
}
return cp, nil
}
// GetPeerID calls GET /network/connections/<peer_id>
func (rpc *RPC) GetPeerID(peerID string) (ConnectionsResponse, error) {
resp, err := rpc.Client.Get(fmt.Sprintf("%s/network/connections/%s", rpc.URL, peerID))
if err != nil {
return ConnectionsResponse{}, err
}
defer resp.Body.Close()
respBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return ConnectionsResponse{}, err
}
cp := ConnectionsResponse{}
err = json.Unmarshal(respBytes, &cp)
if err != nil {
return ConnectionsResponse{}, err
}
return cp, nil
}
// RemovePeers can be used to remove multiple peers at once
// Calls DELETE /network/connections/<peer_id>
func (rpc *RPC) RemovePeers(peers map[string]bool) ([]string, error) {
processedPeers := []string{}
for k, v := range peers {
err := rpc.RemovePeer(k, v)
if err != nil {
return processedPeers, err
}
processedPeers = append(processedPeers, k)
}
return processedPeers, nil
}
// RemovePeer calls DELETE /network/connections/<peer_id>
func (rpc *RPC) RemovePeer(peerID string, wait bool) error {
url := fmt.Sprintf("%s/network/connections/%s", rpc.URL, peerID)
if wait {
url = fmt.Sprintf("%s?wait", url)
}
req, err := http.NewRequest("DELETE", url, nil)
if err != nil {
return err
}
resp, err := rpc.Client.Do(req)
defer resp.Body.Close()
if resp.Status != "200 OK" {
return fmt.Errorf("expected status '200 OK' got %s", resp.Status)
}
return nil
}
// ClearGreylist calls GET /network/greylist/clear
func (rpc *RPC) ClearGreylist() error {
resp, err := rpc.Client.Get(fmt.Sprintf("%s/network/greylist/clear", rpc.URL))
if err != nil {
return err
}
defer resp.Body.Close()
if resp.Status != "200 OK" {
return fmt.Errorf("expected status '200 OK' got %s", resp.Status)
}
return nil
}
// GetNetworkLog calls GET /network/log
// NOTE: Currently semi-bugged, closed after the first response
func (rpc *RPC) GetNetworkLog(waitTime time.Duration) error {
url := fmt.Sprintf("%s/network/log", rpc.URL)
resp, err := rpc.Client.Get(url)
if err != nil {
return err
}
go func() {
time.Sleep(waitTime)
resp.Body.Close()
}()
//defer resp.Body.Close()
decoder := json.NewDecoder(resp.Body)
token, err := decoder.Token()
if err != nil {
return err
}
if delim, ok := token.(json.Delim); !ok || delim != '{' {
return errors.New("expected object")
}
for decoder.More() {
_, err := decoder.Token()
if err != nil {
return err
}
var v interface{}
err = decoder.Decode(&v)
if err != nil {
return err
}
fmt.Printf("%+v\n", v)
}
return nil
}
type NetworkPeers struct {
PublicKeyHash string
Score int64 `json:"score"`
Trusted bool `json:"trusted"`
ConnMetadata struct {
DisableMempool bool `json:"disable_mempool"`
PrivateNode bool `json:"private_node"`
} `json:"conn_metadata"`
State string `json:"state"`
ReachableAt struct {
Addr string `json:"addr"`
Port int64 `json:"port"`
} `json:"reachable_at"`
Stat struct {
TotalSent int64 `json:"total_sent"`
TotalRecv int64 `json:"total_recv"`
CurrentInflow int64 `json:"current_inflow"`
CurrentOutflow int64 `json:"current_outflow"`
} `json:"stat"`
LastFailedConnection struct {
Addr string `json:"addr"`
Port int64 `json:"port"`
Timestamp int64
} `json:"last_failed_connection,omitempty"`
LastRejectedConnection struct {
Addr string `json:"addr"`
Port int64 `json:"port"`
Timestamp int64
} `json:"last_rejected_connection,omitempty"`
LastEstablishedConnection struct {
Addr string `json:"addr"`
Port int64 `json:"port"`
Timestamp int64
} `json:"last_established_connection,omitempty"`
LastDisconnection struct {
Addr string `json:"addr"`
Port int64 `json:"port"`
Timestamp int64
} `json:"last_disconnection,omitempty"`
LastSeen struct {
Addr string `json:"addr"`
Port string `json:"port"`
Timestamp int64
} `json:"last_seen,omitempty"`
LastMiss struct {
Addr string `json:"addr"`
Port string `json:"port"`
Timestamp int64
} `json:"last_miss,omitempty"`
}
// GetNetworkPeers calls GET /network/peers
//TODO: implement filter
func (rpc *RPC) GetNetworkPeers() error {
url := fmt.Sprintf("%s/network/peers", rpc.URL)
resp, err := rpc.Client.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
respBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
var raw interface{}
err = json.Unmarshal(respBytes, &raw)
if err != nil {
return err
}
peers := [][]NetworkPeers{}
b, err := json.Marshal(raw)
if err != nil {
return err
}
err = json.Unmarshal(b, &peers)
if err != nil {
return err
}
return nil
}
type NetworkPeer struct {
Score int64 `json:"score,string"`
Trusted bool `json:"trusted"`
ConnMetadata struct {
DisableMempool bool `json:"disable_mempool"`
PrivateNode bool `json:"private_node"`
} `json:"conn_metadata"`
State string `json:"state"`
ReachableAt struct {
Addr string `json:"addr"`
Port int64 `json:"port"`
} `json:"reachable_at"`
Stat struct {
TotalSent string `json:"total_sent"`
TotalRecv string `json:"total_recv"`
CurrentInflow int64 `json:"current_inflow,string"`
CurrentOutflow int64 `json:"current_outflow,string"`
} `json:"stat"`
LastFailedConnection struct {
Addr string `json:"addr"`
Port string `json:"port,omitempty"`
Timestamp int64
} `json:"last_failed_connection,omitempty"`
LastRejectedConnection []struct {
Addr string `json:"addr"`
Port string `json:"port,omitempty"`
//Timestamp int64
} `json:"last_rejected_connection,omitempty"`
LastEstablishedConnection struct {
Addr string `json:"addr"`
Port int64 `json:"port,omitempty"`
Timestamp int64
} `json:"last_established_connection,omitempty"`
LastDisconnection struct {
Addr string `json:"addr"`
Port int64 `json:"port,omitempty"`
Timestamp int64
} `json:"last_disconnection,omitempty"`
LastSeen struct {
Addr string `json:"addr"`
Port int64 `json:"port,omitempty"`
Timestamp int64
} `json:"last_seen,omitempty"`
LastMiss struct {
Addr string `json:"addr"`
Port int64 `json:"port,omitempty"`
Timestamp int64
} `json:"last_miss,omitempty"`
}
func (rpc *RPC) GetNetworkPeer(peerID string) error {
url := fmt.Sprintf("%s/network/peers/%s", rpc.URL, peerID)
resp, err := rpc.Client.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
respBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
re := regexp.MustCompile(`(":\s*)([\d\.]+)(\s*[,}])`)
respBytes = re.ReplaceAll(respBytes, []byte(`$1"$2"$3`))
peer := NetworkPeer{}
err = json.Unmarshal(respBytes, &peer)
if err != nil {
return err
}
fmt.Printf("%+v\n", peer)
return nil
}
|
package main
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"path"
"strings"
"text/template"
"github.com/gorilla/mux"
_ "github.com/lib/pq"
"github.com/delba/requestbin/model"
"github.com/jinzhu/gorm"
)
var db gorm.DB
func handle(err error) {
if err != nil {
panic(err)
}
}
func init() {
url := os.Getenv("DATABASE_URL")
if url == "" {
url = "dbname=requestbin sslmode=disable"
}
db = func() gorm.DB {
db, err := gorm.Open("postgres", url)
handle(err)
return db
}()
// db.DropTable(&model.Bin{})
// db.DropTable(&model.Request{})
db.CreateTable(&model.Bin{})
db.CreateTable(&model.Request{})
db.LogMode(true)
}
func main() {
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
r := mux.NewRouter()
r.HandleFunc("/", BinsIndex).Methods("GET")
r.HandleFunc("/favicon.ico", ServeFileHandler).Methods("GET")
r.HandleFunc("/bins", BinsCreate).Methods("POST")
r.HandleFunc("/{token}", BinsShow).Methods("GET")
r.HandleFunc("/{token}", RequestsCreate).Methods("POST")
http.Handle("/", r)
http.ListenAndServe(":"+port, nil)
}
func ServeFileHandler(w http.ResponseWriter, r *http.Request) {
fmt.Println("Serve static file")
}
func BinsIndex(w http.ResponseWriter, r *http.Request) {
fmt.Println("bins#index")
tokens := getTokens(r)
files := []string{
path.Join("templates", "layouts", "application.html"),
path.Join("templates", "bins", "index.html"),
}
t, err := template.ParseFiles(files...)
handle(err)
err = t.Execute(w, tokens)
handle(err)
}
func BinsCreate(w http.ResponseWriter, r *http.Request) {
fmt.Println("bins#create")
var bin model.Bin
db.Create(&bin)
addToken(bin.Token, w, r)
http.Redirect(w, r, "/"+bin.Token, 302)
}
func BinsShow(w http.ResponseWriter, r *http.Request) {
fmt.Println("bins#show")
token := mux.Vars(r)["token"]
fmt.Println(token)
bin := findBin(r)
var requests []model.Request
db.Model(&bin).Related(&requests)
files := []string{
path.Join("templates", "layouts", "application.html"),
path.Join("templates", "bins", "show.html"),
}
t, err := template.ParseFiles(files...)
handle(err)
type Data struct {
Bin model.Bin
Requests []model.Request
}
data := Data{Bin: bin, Requests: requests}
err = t.Execute(w, data)
handle(err)
}
func RequestsCreate(w http.ResponseWriter, r *http.Request) {
fmt.Println("requests#create")
bin := findBin(r)
fmt.Println(bin)
body, err := ioutil.ReadAll(r.Body)
handle(err)
defer r.Body.Close()
fmt.Println("Bin id is", bin.ID)
request := model.Request{Body: body, BinID: bin.ID}
db.Create(&request)
http.Redirect(w, r, "/"+bin.Token, 302)
}
func findBin(r *http.Request) model.Bin {
token := mux.Vars(r)["token"]
var bin model.Bin
db.Where(&model.Bin{Token: token}).First(&bin)
return bin
}
func getTokens(r *http.Request) []string {
var tokens []string
cookie, err := r.Cookie("tokens")
if err != nil {
return tokens
}
tokens = strings.Split(cookie.Value, ",")
return tokens
}
func addToken(token string, w http.ResponseWriter, r *http.Request) {
tokens := getTokens(r)
tokens = append(tokens, token)
cookie := &http.Cookie{
Name: "tokens",
Value: strings.Join(tokens, ","),
}
http.SetCookie(w, cookie)
}
|
package skiplist
import (
"fmt"
"math/rand"
"time"
)
var R = rand.New(rand.NewSource(time.Now().UnixNano()))
type Value interface {
Less(Value) bool
Index() int
}
type NodeValue struct {
Index int
Data interface{}
}
type Node struct {
Value Value
level int
Left *Node
Right *Node
Top *Node
Bottom *Node
}
type Skiplist struct {
Head *Node
totalLevel int
}
func NewSkipList() *Skiplist {
return &Skiplist{
totalLevel: 5,
}
}
func (s *Skiplist) Search(target Value) bool {
var now = s.Head
if now == nil {
return false
}
for now.Value.Less(target) {
if now.Right != nil {
now = now.Right
} else {
if now.level > 1 {
now = now.Bottom
}
}
if now.Value == target {
return true
}
}
return false
}
func (s *Skiplist) Add(num Value) {
var now = s.Head
//new head
if s.Head == nil {
s.Head = &Node{
Value: num,
level: s.totalLevel,
}
now = s.Head
for i := s.totalLevel - 1; i > 0; i-- {
node := &Node{
Value: num,
level: i,
}
node.Top = now
now.Bottom = node
now = node
}
return
}
if num.Less(s.Head.Value) {
var oldHeadValue = s.Head.Value
now.Value = num
//ๅฐhead.Valueๆฟๆขไธบๆฐๅผ
for now.Bottom != nil {
now = now.Bottom
now.Value = num
}
var oldRight = now.Right
//ๆๅ
ฅๆง็value
var node = &Node{
Value: oldHeadValue,
level: 1,
}
if oldRight != nil {
node.Right = oldRight
oldRight.Left = node
}
node.Left = now
now.Right = node
//ไธๅฑ
for i := 2; i <= s.totalLevel; i++ {
if !NeedNode() {
break
}
var topNode = &Node{
level: i,
Value: oldHeadValue,
}
topNode.Bottom = node
node.Top = topNode
//ๅๆบฏไธไธๅฑ
if now.Top != nil {
now = now.Top
if now.Right != nil {
var oldRight = now.Right
topNode.Left = now
topNode.Right = oldRight
now.Right = topNode
} else {
topNode.Left = now
now.Right = topNode
}
}
}
return
}
// find insert place
for {
if now.Right != nil {
// now = now.Right
if num.Less(now.Right.Value) {
if now.Right.level == 1 {
break
} else {
now = now.Bottom
continue
}
}
now = now.Right
} else {
if now.Bottom != nil {
now = now.Bottom
} else {
break
}
}
}
//insert
var oldRight = now.Right
var node = &Node{
level: 1,
Value: num,
}
node.Left = now
now.Right = node
if oldRight != nil {
node.Right = oldRight
oldRight.Left = node
}
for i := 2; i <= s.totalLevel; i++ {
if !NeedNode() {
break
}
var topNode = &Node{
level: i,
Value: num,
}
topNode.Bottom = node
node.Top = topNode
//ๅๆบฏไธไธๅฑ
for {
if now.level == s.totalLevel {
break
}
if now.Top != nil {
now = now.Top
break
} else {
now = now.Left
}
}
if now.Right != nil {
var oldRight = now.Right
topNode.Left = now
topNode.Right = oldRight
now.Right = topNode
} else {
topNode.Left = now
now.Right = topNode
}
node = topNode
}
}
func (s *Skiplist) Erase(num Value) bool {
var now = s.Head
if now == nil {
return false
}
for {
if now.Right == nil && now.Bottom == nil {
return false
}
if now.Right != nil {
now = now.Right
} else {
if now.Bottom != nil {
now = now.Bottom
}
}
if !now.Value.Less(num) && !num.Less(now.Value) {
break
}
}
//ๅ ้คๅคด
if num == s.Head.Value {
//ๅฐ็ฌฌไบไธชๅ
็ด ๅ็บงไธบๅคด
for now.Bottom != nil {
now = now.Bottom
}
//ๅชๆๅคด ๅ
จ้จๅ ้ค
if now.Right == nil {
s.Head = nil
return true
}
var secondValue = now.Right.Value
now.Value = secondValue
var headNow = now
for headNow.Top != nil {
headNow = headNow.Top
headNow.Value = secondValue
}
now = now.Right
}
for {
if now == nil {
return true
}
var left = now.Left
var right = now.Right
if right != nil {
left.Right = right
right.Left = left
} else {
left.Right = nil
}
now = now.Bottom
}
}
func (s *Skiplist) List() []Value {
var now = s.Head
if now == nil {
return nil
}
for now.Bottom != nil {
now = now.Bottom
}
var arr = make([]Value, 0)
for now != nil {
arr = append(arr, now.Value)
now = now.Right
}
return arr
}
func (s *Skiplist) Graph() {
var head = s.Head
if s.Head == nil {
return
}
fmt.Println()
for i := s.totalLevel; i > 0; i-- {
var now = head
var arr = make([]Value, 0)
for now != nil {
arr = append(arr, now.Value)
now = now.Right
}
fmt.Println(arr)
head = head.Bottom
}
}
func NeedNode() bool {
return R.Int()%2 == 0
}
|
package models
import(
"encoding/json"
)
/**
* Type definition for Type29Enum enum
*/
type Type29Enum int
/**
* Value collection for Type29Enum enum
*/
const (
Type29_KREGULAR Type29Enum = 1 + iota
Type29_KRPO
)
func (r Type29Enum) MarshalJSON() ([]byte, error) {
s := Type29EnumToValue(r)
return json.Marshal(s)
}
func (r *Type29Enum) UnmarshalJSON(data []byte) error {
var s string
json.Unmarshal(data, &s)
v := Type29EnumFromValue(s)
*r = v
return nil
}
/**
* Converts Type29Enum to its string representation
*/
func Type29EnumToValue(type29Enum Type29Enum) string {
switch type29Enum {
case Type29_KREGULAR:
return "kRegular"
case Type29_KRPO:
return "kRPO"
default:
return "kRegular"
}
}
/**
* Converts Type29Enum Array to its string Array representation
*/
func Type29EnumArrayToValue(type29Enum []Type29Enum) []string {
convArray := make([]string,len( type29Enum))
for i:=0; i<len(type29Enum);i++ {
convArray[i] = Type29EnumToValue(type29Enum[i])
}
return convArray
}
/**
* Converts given value to its enum representation
*/
func Type29EnumFromValue(value string) Type29Enum {
switch value {
case "kRegular":
return Type29_KREGULAR
case "kRPO":
return Type29_KRPO
default:
return Type29_KREGULAR
}
}
|
package animo
import (
"context"
"errors"
"github.com/go-kit/kit/endpoint"
)
type ResolveProfilesAliasesRequest struct {
ProfilesAliases []string `json:"profilesAliases"`
}
type ResolveProfilesAliasesResponse struct {
ProfilesIds []string `json:"profilesIds"`
Err string `json:"err,omitempty"`
}
type InternalGetProfilesRequest struct {
ProfilesIds []string `json:"profilesIds"`
}
type InternalGetProfilesResponse struct {
Profiles []*Profile `json:"profiles"`
Err string `json:"err,omitempty"`
}
type GetProfilesRequest struct {
ProfilesAliases []string `json:"profilesAliases"`
}
type GetProfilesResponse struct {
Profiles []*Profile `json:"profiles"`
Err string `json:"err,omitempty"`
}
type SearchProfilesRequest struct {
Filter string `json:"filter"`
}
type SearchProfilesResponse struct {
Profiles []*Profile `json:"profiles"`
Err string `json:"err,omitempty"`
}
type UpdateProfilesRequest struct {
ProfilesAliases []string `json:"profilesAliases"`
Profiles []*Profile `json:"profiles"`
}
type UpdateProfilesResponse struct {
Profiles []*Profile `json:"profiles"`
Err string `json:"err,omitempty"`
}
type Endpoints struct {
ResolveAliasesEndpoint endpoint.Endpoint
InternalGetProfilesEndpoint endpoint.Endpoint
GetProfilesEndpoint endpoint.Endpoint
SearchProfilesEndpoint endpoint.Endpoint
UpdateProfilesEndpoint endpoint.Endpoint
}
func MakeResolveProfilesAliasesEndpoint(svc AnimoService) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(ResolveProfilesAliasesRequest)
ids, err := svc.ResolveProfilesAliases(ctx, req.ProfilesAliases)
if err != nil {
return ResolveProfilesAliasesResponse{[]string{}, err.Error()}, nil
}
return ResolveProfilesAliasesResponse{ids, ""}, nil
}
}
func MakeInternalGetProfilesEndpoint(svc AnimoService) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(InternalGetProfilesRequest)
profiles, err := svc.GetProfiles(ctx, req.ProfilesIds)
if err != nil {
return InternalGetProfilesResponse{[]*Profile{}, err.Error()}, nil
}
return InternalGetProfilesResponse{profiles, ""}, nil
}
}
func MakeGetProfilesEndpoint(svc AnimoService) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(GetProfilesRequest)
profilesIds, err := svc.ResolveProfilesAliases(ctx, req.ProfilesAliases)
profiles, err := svc.GetProfiles(ctx, profilesIds)
if err != nil {
return GetProfilesResponse{[]*Profile{}, err.Error()}, nil
}
return GetProfilesResponse{profiles, ""}, nil
}
}
func MakeSearchProfilesEndpoint(svc AnimoService) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(SearchProfilesRequest)
profiles, err := svc.SearchProfiles(ctx, req.Filter)
if err != nil {
return SearchProfilesResponse{[]*Profile{}, err.Error()}, nil
}
return SearchProfilesResponse{profiles, ""}, nil
}
}
func MakeUpdateProfilesEndpoint(svc AnimoService) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(UpdateProfilesRequest)
if len(req.ProfilesAliases) != 1 || req.ProfilesAliases[0] != "me" {
return UpdateProfilesResponse{[]*Profile{}, "access is allowed only to self resource"}, nil
}
if len(req.ProfilesAliases) != len(req.Profiles) {
return nil, errors.New("aliases and profiles have different lengths")
}
profilesIds, err := svc.ResolveProfilesAliases(ctx, req.ProfilesAliases)
profiles, err := svc.UpdateProfiles(ctx, profilesIds, req.Profiles)
if err != nil {
return UpdateProfilesResponse{[]*Profile{}, err.Error()}, nil
}
return UpdateProfilesResponse{profiles, ""}, nil
}
}
|
package zoidberg
type Discoverer interface {
Discover() (Discovery, error)
}
type Discovery struct {
Balancers []Balancer `json:"balancers"`
Apps Apps `json:"apps"`
}
|
package unimatrix
func NewArtifactsOperation(realm string) *Operation {
return NewRealmOperation(realm, "artifacts")
}
|
/*
Copyright 2015 All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package main provides a transparent authentication proxy suited for use with Keycloak as OIDC identity provider
*/
package main
import (
"time"
)
// Config is the configuration for the proxy
type Config struct {
// ConfigFile is the binding interface
ConfigFile string `json:"config" yaml:"config" usage:"path the a configuration file" env:"CONFIG_FILE"`
// Listen defines the binding interface for main listener, e.g. {address}:{port}. This is required and there is no default value.
Listen string `json:"listen" yaml:"listen" usage:"Defines the binding interface for main listener, e.g. {address}:{port}. This is required and there is no default value" env:"LISTEN"`
// ListenHTTP is the interface to bind the http only service on
ListenHTTP string `json:"listen-http" yaml:"listen-http" usage:"interface we should be listening to for HTTP traffic" env:"LISTEN_HTTP"`
// ListenAdmin defines the interface to bind admin-only endpoint (live-status, debug, prometheus...). If not defined, this defaults to the main listener defined by Listen.
ListenAdmin string `json:"listen-admin" yaml:"listen-admin" usage:"defines the interface to bind admin-only endpoint (live-status, debug, prometheus...). If not defined, this defaults to the main listener defined by Listen" env:"LISTEN_ADMIN"`
// ListenAdminScheme defines the scheme admin endpoints are served with. If not defined, same as main listener.
ListenAdminScheme string `json:"listen-admin-scheme" yaml:"listen-admin-scheme" usage:"scheme to serve admin-only endpoint (http or https)." env:"LISTEN_ADMIN_SCHEME"`
// DiscoveryURL is the url for the keycloak server
DiscoveryURL string `json:"discovery-url" yaml:"discovery-url" usage:"discovery url to retrieve the openid configuration" env:"DISCOVERY_URL"`
// ClientID is the client id
ClientID string `json:"client-id" yaml:"client-id" usage:"client id used to authenticate to the oauth service" env:"CLIENT_ID"`
// ClientSecret is the secret for AS
ClientSecret string `json:"client-secret" yaml:"client-secret" usage:"client secret used to authenticate to the oauth service" env:"CLIENT_SECRET"`
// RedirectionURL the redirection url
RedirectionURL string `json:"redirection-url" yaml:"redirection-url" usage:"redirection url for the oauth callback url, defaults to host header is absent" env:"REDIRECTION_URL"`
// RevocationEndpoint is the token revocation endpoint to revoke refresh tokens
RevocationEndpoint string `json:"revocation-url" yaml:"revocation-url" usage:"url for the revocation endpoint to revoke refresh token" env:"REVOCATION_URL"`
// SkipOpenIDProviderTLSVerify skips the tls verification for openid provider communication
SkipOpenIDProviderTLSVerify bool `json:"skip-openid-provider-tls-verify" yaml:"skip-openid-provider-tls-verify" usage:"skip the verification of any TLS communication with the openid provider"`
// OpenIDProviderProxy proxy for openid provider communication
OpenIDProviderProxy string `json:"openid-provider-proxy" yaml:"openid-provider-proxy" usage:"proxy for communication with the openid provider"`
// OpenIDProviderTimeout is the timeout used to pulling the openid configuration from the provider
OpenIDProviderTimeout time.Duration `json:"openid-provider-timeout" yaml:"openid-provider-timeout" usage:"timeout for openid configuration on .well-known/openid-configuration"`
// OpenIDProviderCA is the certificate authority issuing the TLS certificate for the OpenID provider
OpenIDProviderCA string `json:"openid-provider-ca" yaml:"openid-provider-ca" usage:"certificate authority for openid configuration endpoints"`
// BaseURI is prepended to all the generated URIs
BaseURI string `json:"base-uri" yaml:"base-uri" usage:"common prefix for all URIs" env:"BASE_URI"`
// OAuthURI is the uri for the oauth endpoints for the proxy
OAuthURI string `json:"oauth-uri" yaml:"oauth-uri" usage:"the uri for proxy oauth endpoints" env:"OAUTH_URI"`
// Scopes is a list of scope we should request
Scopes []string `json:"scopes" yaml:"scopes" usage:"list of scopes requested when authenticating the user"`
// RequiredScopes is a list of scope we require for a token to be valid
RequiredScopes []string `json:"required-scopes" yaml:"required-scopes" usage:"list of scopes required when authenticating the user"`
// Upstream is the upstream endpoint i.e whom were proxying to
Upstream string `json:"upstream-url" yaml:"upstream-url" usage:"url for the upstream endpoint you wish to proxy" env:"UPSTREAM_URL"`
// UpstreamCA is the path to a CA certificate in PEM format to validate the upstream certificate
UpstreamCA string `json:"upstream-ca" yaml:"upstream-ca" usage:"the path to a file container a CA certificate to validate the upstream tls endpoint" env:"UPSTREAM_CA"`
// Resources is a list of protected resources
Resources []*Resource `json:"resources" yaml:"resources" usage:"list of resources 'uri=/admin*|methods=GET,PUT|roles=role1,role2'"`
// Headers permits adding customs headers across the board
Headers map[string]string `json:"headers" yaml:"headers" usage:"custom headers to the upstream request, key=value"`
// PreserveHost preserves the host header of the proxied request in the upstream request. Disabled by default.
PreserveHost bool `json:"preserve-host" yaml:"preserve-host" usage:"preserve the host header of the proxied request in the upstream request. Disabled by default" env:"PRESERVE_HOST"`
// RequestIDHeader is the header name for request ids
RequestIDHeader string `json:"request-id-header" yaml:"request-id-header" usage:"the http header name for request id" env:"REQUEST_ID_HEADER"`
// ResponseHeader is a map of response headers to add to the response
ResponseHeaders map[string]string `json:"response-headers" yaml:"response-headers" usage:"custom headers to be added to the http response key=value"`
// EnableSelfSignedTLS indicates we should create a self-signed certificate for the service
EnabledSelfSignedTLS bool `json:"enable-self-signed-tls" yaml:"enable-self-signed-tls" usage:"create self signed certificates for the proxy" env:"ENABLE_SELF_SIGNED_TLS"`
// SelfSignedTLSHostnames is the list of hostnames to place on the certificate
SelfSignedTLSHostnames []string `json:"self-signed-tls-hostnames" yaml:"self-signed-tls-hostnames" usage:"a list of hostnames to place on the self-signed certificate"`
// SelfSignedTLSExpiration is the expiration time of the tls certificate before rotation occurs
SelfSignedTLSExpiration time.Duration `json:"self-signed-tls-expiration" yaml:"self-signed-tls-expiration" usage:"the expiration of the certificate before rotation"`
// EnableRequestID indicates the proxy should add request id if none if found
EnableRequestID bool `json:"enable-request-id" yaml:"enable-request-id" usage:"indicates we should add a request id if none found" env:"ENABLE_REQUEST_ID"`
// EnableLogoutRedirect indicates we should redirect to the identity provider for logging out
EnableLogoutRedirect bool `json:"enable-logout-redirect" yaml:"enable-logout-redirect" usage:"indicates we should redirect to the identity provider for logging out"`
// EnableDefaultDeny indicates we should deny by default all requests
EnableDefaultDeny bool `json:"enable-default-deny" yaml:"enable-default-deny" usage:"enables a default denial on all requests, you have to explicitly say what is permitted (recommended)" env:"ENABLE_DEFAULT_DENY"`
// EnableDefaultNotFound: makes explicit resources routing mandatory (i.e. responds with 404 NotFound, even if authenticated)
EnableDefaultNotFound bool `json:"enable-default-notfound" yaml:"enable-default-notfound" usage:"makes explicit resources routing mandatory (i.e. responds with 404 NotFound, even if authenticated)" env:"ENABLE_DEFAULT_NOTFOUND"`
// EnableEncryptedToken indicates the access token should be encoded
EnableEncryptedToken bool `json:"enable-encrypted-token" yaml:"enable-encrypted-token" usage:"enable encryption for the access tokens"`
// ForceEncryptedCookie indicates that the access token in the cookie should be encoded, regardless what EnableEncryptedToken says. This way, gatekeeper may receive tokens in header in the clear, whereas tokens in cookies remain encrypted
ForceEncryptedCookie bool `json:"force-encrypted-cookie" yaml:"force-encrypted-cookie" usage:"force encryption for the access tokens in cookies"`
// EnableLogging indicates if we should log all the requests
EnableLogging bool `json:"enable-logging" yaml:"enable-logging" usage:"enable http logging of the requests"`
// EnableJSONLogging is the logging format
EnableJSONLogging bool `json:"enable-json-logging" yaml:"enable-json-logging" usage:"switch on json logging rather than text"`
// EnableForwarding enables the forwarding proxy
EnableForwarding bool `json:"enable-forwarding" yaml:"enable-forwarding" usage:"enables the forwarding proxy mode, signing outbound request"`
// EnableSecurityFilter enables the security handler
EnableSecurityFilter bool `json:"enable-security-filter" yaml:"enable-security-filter" usage:"enables the security filter handler" env:"ENABLE_SECURITY_FILTER"`
// EnableRefreshTokens indicate's you wish to ignore using refresh tokens and re-auth on expiration of access token
EnableRefreshTokens bool `json:"enable-refresh-tokens" yaml:"enable-refresh-tokens" usage:"enables the handling of the refresh tokens" env:"ENABLE_REFRESH_TOKEN"`
// EnableSessionCookies indicates the cookies, both token and refresh should not be persisted
EnableSessionCookies bool `json:"enable-session-cookies" yaml:"enable-session-cookies" usage:"access and refresh tokens are session only i.e. removed browser close" env:"ENABLE_SESSION_COOKIES"`
// EnableCSRF will generate a new session object (e.g.a cookie, or in a supported backend storage) to store a CSRF token.
// To enable CSRF on upstream endpoints, an additional EnableCSRF is needed in the Resource config section.
EnableCSRF bool `json:"enable-csrf" yaml:"enable-csrf" usage:"when enabled, this automatically adds a CSRF token to all responses. Matching token expected for next request is stored in the session (e.g. cookie or storage)" env:"ENABLE_CSRF"`
// CSRFCookieName sets the name of the CSRF (encrypted) cookie, when session storage is a cookie (defaults to kc-csrf).
// Note that in this case EncryptionKey is required to encrypt the cookie.
CSRFCookieName string `json:"csrf-cookie-name" yaml:"csrf-cookie-name" usage:"the name of CSRF cookie. Defaults to: kc-csrf" env:"CSRF_COOKIE_NAME"`
// CSRFHeader sets the header used in requests and response for the CSRF challenge (defaults to X-CSRF-Token)
CSRFHeader string `json:"csrf-header" yaml:"csrf-header" usage:"the header added to responses by gatekeeper and to be added by requests to check against replayed credentials (CSRF). Defaults to: X-CSRF-Token" env:"CSRF_HEADER"`
// EnableLoginHandler indicates we want the login handler enabled
EnableLoginHandler bool `json:"enable-login-handler" yaml:"enable-login-handler" usage:"enables the handling of the refresh tokens" env:"ENABLE_LOGIN_HANDLER"`
// EnableTokenHeader adds the JWT token to the upstream authentication headers as X-Auth-Token header
EnableTokenHeader bool `json:"enable-token-header" yaml:"enable-token-header" usage:"enables the token authentication header X-Auth-Token to upstream" env:"ENABLE_TOKEN_HEADER"`
// EnableClaimsHeaders adds decoded claims as headers X-Auth-{claim} to the upstream endpoint
EnableClaimsHeaders bool `json:"enable-claims-headers" yaml:"enable-claims-headers" usage:"adds decoded claims as headers X-Auth-{claim} to the upstream endpoint. Defaults to true" env:"ENABLE_CLAIMS_HEADERS"`
// EnableAuthorizationHeader indicates we should pass the authorization header to the upstream endpoint
EnableAuthorizationHeader bool `json:"enable-authorization-header" yaml:"enable-authorization-header" usage:"adds the authorization header to the proxy request" env:"ENABLE_AUTHORIZATION_HEADER"`
// EnableAuthorizationCookies indicates we should pass the authorization cookies to the upstream endpoint. Defaults to false.
EnableAuthorizationCookies bool `json:"enable-authorization-cookies" yaml:"enable-authorization-cookies" usage:"adds the authorization cookies to the uptream proxy request. Defaults to false" env:"ENABLE_AUTHORIZATION_COOKIES"`
// EnableHTTPSRedirect indicate we should redirect http -> https
EnableHTTPSRedirect bool `json:"enable-https-redirection" yaml:"enable-https-redirection" usage:"enable the http to https redirection on the http service"`
// EnableProfiling indicates if profiles is switched on
EnableProfiling bool `json:"enable-profiling" yaml:"enable-profiling" usage:"switching on the golang profiling via pprof on /debug/pprof, /debug/pprof/heap etc" env:"ENABLE_PROFILING"`
// EnableMetrics indicates if the metrics is enabled (default: true)
EnableMetrics bool `json:"enable-metrics" yaml:"enable-metrics" usage:"enable the prometheus metrics collector on /oauth/metrics (enabled by default)" env:"ENABLE_METRICS"`
// TracingExporter defines the exporter for traces. Default is jaeger.
TracingExporter string `json:"tracing-exporter" yaml:"tracing-exporter" usage:"select tracing exporter (jaeger|datadog). Default is jaeger"`
// EnableTracing indicates if a tracing exporter is enabled
EnableTracing bool `json:"enable-tracing" yaml:"enable-tracing" usage:"enable the opencensus trace collector on /oauth/zpages" env:"ENABLE_TRACING"`
// TracingAgentEndpoint register the jaeger agent collecting trace spans
TracingAgentEndpoint string `json:"tracing-agent-endpoint" yaml:"tracing-agent-endpoint" usage:"register the opencensus trace collector agent" env:"TRACING_AGENT_ENDPOINT"`
// EnableBrowserXSSFilter indicates you want the filter on
EnableBrowserXSSFilter bool `json:"filter-browser-xss" yaml:"filter-browser-xss" usage:"enable the adds the X-XSS-Protection header with mode=block"`
// EnableContentNoSniff indicates you want the filter on
EnableContentNoSniff bool `json:"filter-content-nosniff" yaml:"filter-content-nosniff" usage:"adds the X-Content-Type-Options header with the value nosniff"`
// EnableFrameDeny indicates the filter is on
EnableFrameDeny bool `json:"filter-frame-deny" yaml:"filter-frame-deny" usage:"enable to the frame deny header"`
// ContentSecurityPolicy allows the Content-Security-Policy header value to be set with a custom value
ContentSecurityPolicy string `json:"content-security-policy" yaml:"content-security-policy" usage:"specify the content security policy"`
// EnableSTS adds the X-Transport-Strict-Transport-Security with some sensible default seconds and subdomains allowed (no STS preload)
EnableSTS bool `json:"filter-sts" yaml:"filter-sts" usage:"adds the X-Transport-Strict-Transport-Security header, without the preload option"`
// EnableSTSPreload adds the X-Transport-Strict-Transport-Security with some sensible default seconds and subdomains allowed (with STS preload)
EnableSTSPreload bool `json:"filter-sts-preload" yaml:"filter-sts-preload" usage:"adds the X-Transport-Strict-Transport-Security header (with STS preload)"`
// LocalhostMetrics indicates that metrics can only be consumed from localhost
LocalhostMetrics bool `json:"localhost-metrics" yaml:"localhost-metrics" usage:"enforces the metrics page can only been requested from 127.0.0.1"`
// AccessTokenDuration is default duration applied to the access token cookie
AccessTokenDuration time.Duration `json:"access-token-duration" yaml:"access-token-duration" usage:"fallback cookie duration for the access token when using refresh tokens"`
// CookieDomain is a list of domains the cookie is available to
CookieDomain string `json:"cookie-domain" yaml:"cookie-domain" usage:"domain the access cookie is available to, defaults host header" env:"COOKIE_DOMAIN"`
// CookieAccessName is the name of the access cookie holding the access token
CookieAccessName string `json:"cookie-access-name" yaml:"cookie-access-name" usage:"name of the cookie use to hold the access token"`
// CookieRefreshName is the name of the refresh cookie
CookieRefreshName string `json:"cookie-refresh-name" yaml:"cookie-refresh-name" usage:"name of the cookie used to hold the encrypted refresh token"`
// SameSiteCookie enforces cookies to be send only to same site requests. Defaults to Lax.
SameSiteCookie string `json:"same-site-cookie" yaml:"same-site-cookie" usage:"enforces cookies to be send only to same site requests according to the policy (can be Strict|Lax|None). Defaults to Lax" env:"SAME_SITE_COOKIE"`
// SecureCookie enforces the cookie as secure. Defaults to true.
SecureCookie bool `json:"secure-cookie" yaml:"secure-cookie" usage:"enforces the cookie to be secure. Defaults to true." env:"SECURE_COOKIE"`
// HTTPOnlyCookie enforces the cookie as http only. Defaults to true.
HTTPOnlyCookie bool `json:"http-only-cookie" yaml:"http-only-cookie" usage:"enforces the cookie is in http only mode. Defaults to true" env:"HTTP_ONLY_COOKIE"`
// MatchClaims is a series of checks, the claims in the token must match those here
MatchClaims map[string]string `json:"match-claims" yaml:"match-claims" usage:"keypair values for matching access token claims e.g. aud=myapp, iss=http://example.*"`
// AddClaims is a series of claims that should be added to the auth headers
AddClaims []string `json:"add-claims" yaml:"add-claims" usage:"extra claims from the token and inject into headers, e.g given_name -> X-Auth-Given-Name"`
// TLSCertificate is the location for a tls certificate
TLSCertificate string `json:"tls-cert" yaml:"tls-cert" usage:"path to ths TLS certificate" env:"TLS_CERTIFICATE"`
// TLSPrivateKey is the location of a tls private key
TLSPrivateKey string `json:"tls-private-key" yaml:"tls-private-key" usage:"path to the private key for TLS" env:"TLS_PRIVATE_KEY"`
// TLSCaCertificate is the CA certificate which the client cert must be signed
TLSCaCertificate string `json:"tls-ca-certificate" yaml:"tls-ca-certificate" usage:"path to the ca certificate used for signing requests" env:"TLS_CA_CERTIFICATE"`
// TLSCaPrivateKey is the CA private key used for signing
TLSCaPrivateKey string `json:"tls-ca-key" yaml:"tls-ca-key" usage:"path the ca private key, used by the forward signing proxy" env:"TLS_CA_PRIVATE_KEY"`
// TLSClientCertificate is path to a client certificate to use for outbound connections
TLSClientCertificate string `json:"tls-client-certificate" yaml:"tls-client-certificate" usage:"path to the client certificate for outbound connections in reverse and forwarding proxy modes" env:"TLS_CLIENT_CERTIFICATE"`
// TLSClientCertificates is an array of paths to client certificates to use for outbound connections
TLSClientCertificates []string `json:"tls-client-certificates" yaml:"tls-client-certificates" usage:"paths to client certificates for outbound connections in reverse and forwarding proxy modes" env:"TLS_CLIENT_CERTIFICATES"`
// TLSUseModernSettings sets all TLS options for proxy listener to modern settings (TLS 1.2, advanced cipher suites, ...)
TLSUseModernSettings bool `json:"tls-use-modern-settings" yaml:"tls-use-modern-settings" usage:"sets all TLS options for proxy listener to modern settings (TLS 1.2, advanced cipher suites, ...)" env:"TLS_USE_MODERN_SETTINGS"`
// TLSMinVersion is the minimum TLS protocol version accepted by proxy listener. TLS 1.0 is the default.
TLSMinVersion string `json:"tls-min-version" yaml:"tls-min-version" usage:"the minimum TLS protocol version accepted by proxy listener. Accepted values are: SSL3.0, TLS1.0, TLS1.1, TLS1.2. TLS1.0 is the default" env:"TLS_MIN_VERSION"`
// TLSCipherSuites is the list of cipher suites accepted by server during TLS negotiation. Defaults to golang TLS supported suites.
TLSCipherSuites []string `json:"tls-cipher-suites" yaml:"tls-cipher-suites" usage:"the list of cipher suites accepted by server during TLS negotiation. Defaults to golang TLS supported suites" env:"TLS_CIPHER_SUITES"`
// TLSPreferServerCipherSuites indicates the TLS negotiation prefers server cipher suites
TLSPreferServerCipherSuites bool `json:"tls-prefer-server-cipher-suites" yaml:"tls-prefer-server-cipher-suites" usage:"indicates the TLS negotiation prefers server cipher suites" env:"TLS_PREFER_SERVER_CIPHER_SUITES"`
// TLSCurvePreferences indicate the server preferred cipher curves
TLSCurvePreferences []string `json:"tls-curve-preferences" yaml:"tls-curve-preferences" usage:"the server preferred cipher curves" env:"TLS_CURVE_PREFERENCES"`
// SkipUpstreamTLSVerify skips the verification of any upstream tls
SkipUpstreamTLSVerify bool `json:"skip-upstream-tls-verify" yaml:"skip-upstream-tls-verify" usage:"skip the verification of any upstream TLS" env:"SKIP_UPSTREAM_TLS_VERIFY"`
// TLSAdminCertificate is the location for a tls certificate for admin https endpoint. Defaults to TLSCertificate.
TLSAdminCertificate string `json:"tls-admin-cert" yaml:"tls-admin-cert" usage:"path to ths TLS certificate" env:"TLS_ADMIN_CERTIFICATE"`
// TLSAdminPrivateKey is the location of a tls private key for admin https endpoint. Defaults to TLSPrivateKey
TLSAdminPrivateKey string `json:"tls-admin-private-key" yaml:"tls-admin-private-key" usage:"path to the private key for TLS" env:"TLS_ADMIN_PRIVATE_KEY"`
// TLSCaCertificate is the CA certificate which the client cert must be signed
TLSAdminCaCertificate string `json:"tls-admin-ca-certificate" yaml:"tls-admin-ca-certificate" usage:"path to the ca certificate used for signing requests" env:"TLS_ADMIN_CA_CERTIFICATE"`
// TLSAdminClientCertificate is path to a client certificate to use for admin endpoint
TLSAdminClientCertificate string `json:"tls-admin-client-certificate" yaml:"tls-admin-client-certificate" usage:"path to the client certificate for admin endpoint" env:"TLS_ADMIN_CLIENT_CERTIFICATE"`
// TLSAdminClientCertificates is an array of paths to client certificates to use for admin endpoint
TLSAdminClientCertificates []string `json:"tls-admin-client-certificates" yaml:"tls-admin-client-certificates" usage:"paths to client certificates for admin endpoint" env:"TLS_ADMIN_CLIENT_CERTIFICATES"`
// CorsOrigins is a list of origins permitted
CorsOrigins []string `json:"cors-origins" yaml:"cors-origins" usage:"origins to add to the CORE origins control (Access-Control-Allow-Origin)"`
// CorsMethods is a set of access control methods
CorsMethods []string `json:"cors-methods" yaml:"cors-methods" usage:"methods permitted in the access control (Access-Control-Allow-Methods)"`
// CorsHeaders is a set of cors headers
CorsHeaders []string `json:"cors-headers" yaml:"cors-headers" usage:"set of headers to add to the CORS access control (Access-Control-Allow-Headers)"`
// CorsExposedHeaders are the exposed header fields
CorsExposedHeaders []string `json:"cors-exposed-headers" yaml:"cors-exposed-headers" usage:"expose cors headers access control (Access-Control-Expose-Headers)"`
// CorsCredentials set the credentials flag
CorsCredentials bool `json:"cors-credentials" yaml:"cors-credentials" usage:"credentials access control header (Access-Control-Allow-Credentials)"`
// CorsMaxAge is the age for CORS
CorsMaxAge time.Duration `json:"cors-max-age" yaml:"cors-max-age" usage:"max age applied to cors headers (Access-Control-Max-Age)"`
// CorsDisableUpstream disables CORS headers prepared by the gatekeeper from the relayed upstream response (deprecated)
CorsDisableUpstream bool `json:"cors-disable-upstream" yaml:"cors-disable-upstream" usage:"Deprecated: do not extend CORS support to upstream responses: only gatekeeper endpoints are CORS-enabled"`
// Hostnames is a list of hostname's the service should response to
Hostnames []string `json:"hostnames" yaml:"hostnames" usage:"list of hostnames the service will respond to"`
// Store is a url for a store resource, used to hold the refresh tokens
StoreURL string `json:"store-url" yaml:"store-url" usage:"url for the storage subsystem, e.g redis://127.0.0.1:6379, file:///etc/tokens.file"`
// EncryptionKey is the encryption key used to encrypt the refresh token
EncryptionKey string `json:"encryption-key" yaml:"encryption-key" usage:"encryption key used to encryption the session state" env:"ENCRYPTION_KEY"`
// InvalidAuthRedirectsWith303 will make requests with invalid auth headers redirect using HTTP 303 instead of HTTP 307. See github.com/keycloak/keycloak-gatekeeper/issues/292 for context.
InvalidAuthRedirectsWith303 bool `json:"invalid-auth-redirects-with-303" yaml:"invalid-auth-redirects-with-303" usage:"use HTTP 303 redirects instead of 307 for invalid auth tokens"`
// NoRedirects informs we should hand back a 401 not a redirect
NoRedirects bool `json:"no-redirects" yaml:"no-redirects" usage:"do not have back redirects when no authentication is present, 401 them"`
// SkipTokenVerification tells the service to skip verifying the access token - for testing purposes
SkipTokenVerification bool `json:"skip-token-verification" yaml:"skip-token-verification" usage:"TESTING ONLY; bypass token verification, only expiration and roles enforced"`
// UpstreamKeepalives specifies whether we use keepalives on the upstream
UpstreamKeepalives bool `json:"upstream-keepalives" yaml:"upstream-keepalives" usage:"enables or disables the keepalive connections for upstream endpoint"`
// UpstreamTimeout is the maximum amount of time a dial will wait for a connect to complete. Defaults to 10s
UpstreamTimeout time.Duration `json:"upstream-timeout" yaml:"upstream-timeout" usage:"maximum amount of time a dial will wait for a connect to complete. Defaults to 10s" env:"UPSTREAM_TIMEOUT"`
// UpstreamKeepaliveTimeout is the upstream keepalive timeout. Defaults to 10s
UpstreamKeepaliveTimeout time.Duration `json:"upstream-keepalive-timeout" yaml:"upstream-keepalive-timeout" usage:"specifies the keep-alive period for an active network connection. Defaults to 10s" env:"UPSTREAM_KEEPALIVE_TIMEOUT"`
// UpstreamTLSHandshakeTimeout is the timeout for upstream to tls handshake
UpstreamTLSHandshakeTimeout time.Duration `json:"upstream-tls-handshake-timeout" yaml:"upstream-tls-handshake-timeout" usage:"the timeout placed on the tls handshake for upstream"`
// UpstreamResponseHeaderTimeout is the timeout for upstream header response
UpstreamResponseHeaderTimeout time.Duration `json:"upstream-response-header-timeout" yaml:"upstream-response-header-timeout" usage:"the timeout placed on the response header for upstream"`
// UpstreamExpectContinueTimeout is the timeout expect continue for upstream
UpstreamExpectContinueTimeout time.Duration `json:"upstream-expect-continue-timeout" yaml:"upstream-expect-continue-timeout" usage:"the timeout placed on the expect continue for upstream"`
// Verbose switches on debug logging
Verbose bool `json:"verbose" yaml:"verbose" usage:"switch on debug / verbose logging"`
// EnableProxyProtocol controls the proxy protocol
EnableProxyProtocol bool `json:"enabled-proxy-protocol" yaml:"enabled-proxy-protocol" usage:"enable proxy protocol"`
// MaxIdleConns is the max idle connections to keep alive, ready for reuse
MaxIdleConns int `json:"max-idle-connections" yaml:"max-idle-connections" usage:"max idle upstream / keycloak connections to keep alive, ready for reuse"`
// MaxIdleConnsPerHost limits the number of idle connections maintained per host
MaxIdleConnsPerHost int `json:"max-idle-connections-per-host" yaml:"max-idle-connections-per-host" usage:"limits the number of idle connections maintained per host"`
// ServerReadTimeout is the read timeout on the http server
ServerReadTimeout time.Duration `json:"server-read-timeout" yaml:"server-read-timeout" usage:"the server read timeout on the http server"`
// ServerWriteTimeout is the write timeout on the http server. Defaults to 11s (should be larger than UpstreamTimeout)
ServerWriteTimeout time.Duration `json:"server-write-timeout" yaml:"server-write-timeout" usage:"the server write timeout on the http server"`
// ServerIdleTimeout is the idle timeout on the http server
ServerIdleTimeout time.Duration `json:"server-idle-timeout" yaml:"server-idle-timeout" usage:"the server idle timeout on the http server" env:"SERVER_IDLE_TIMEOUT"`
// UseLetsEncrypt controls if we should use letsencrypt to retrieve certificates
UseLetsEncrypt bool `json:"use-letsencrypt" yaml:"use-letsencrypt" usage:"use letsencrypt for certificates"`
// LetsEncryptCacheDir is the path to store letsencrypt certificates
LetsEncryptCacheDir string `json:"letsencrypt-cache-dir" yaml:"letsencrypt-cache-dir" usage:"path where cached letsencrypt certificates are stored"`
// SignInPage is the relative url for the sign in page
SignInPage string `json:"sign-in-page" yaml:"sign-in-page" usage:"path to custom template displayed for signin"`
// ForbiddenPage is a access forbidden page
ForbiddenPage string `json:"forbidden-page" yaml:"forbidden-page" usage:"path to custom template used for access forbidden"`
// Tags is passed to the templates
Tags map[string]string `json:"tags" yaml:"tags" usage:"keypairs passed to the templates at render,e.g title=Page"`
// ForwardingUsername is the username to login to the oauth service
ForwardingUsername string `json:"forwarding-username" yaml:"forwarding-username" usage:"username to use when logging into the openid provider" env:"FORWARDING_USERNAME"`
// ForwardingPassword is the password to use for the above
ForwardingPassword string `json:"forwarding-password" yaml:"forwarding-password" usage:"password to use when logging into the openid provider" env:"FORWARDING_PASSWORD"`
// ForwardingDomains is a collection of domains to signs
ForwardingDomains []string `json:"forwarding-domains" yaml:"forwarding-domains" usage:"list of domains which should be signed; everything else is relayed unsigned"`
// DisableAllLogging indicates no logging at all
DisableAllLogging bool `json:"disable-all-logging" yaml:"disable-all-logging" usage:"disables all logging to stdout and stderr"`
}
// RequestScope is a request level context scope passed between middleware
type RequestScope struct {
// AccessDenied indicates the request should not be proxied on
AccessDenied bool
// Identity is the user Identity of the request
Identity *userContext
}
// tokenResponse
type tokenResponse struct {
TokenType string `json:"token_type"`
AccessToken string `json:"access_token"`
IDToken string `json:"id_token"`
RefreshToken string `json:"refresh_token,omitempty"`
ExpiresIn int `json:"expires_in"`
Scope string `json:"scope,omitempty"`
}
|
/*
* Copyright (c) 2018 WSO2 Inc. (http:www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http:www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package cell
import (
"crypto/rsa"
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"reflect"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/runtime"
corev1informers "k8s.io/client-go/informers/core/v1"
networkv1informers "k8s.io/client-go/informers/networking/v1"
"k8s.io/client-go/kubernetes"
corev1listers "k8s.io/client-go/listers/core/v1"
networkv1listers "k8s.io/client-go/listers/networking/v1"
"k8s.io/client-go/tools/cache"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/cellery-io/mesh-controller/pkg/apis/mesh"
"github.com/cellery-io/mesh-controller/pkg/apis/mesh/v1alpha1"
meshclientset "github.com/cellery-io/mesh-controller/pkg/client/clientset/versioned"
meshinformers "github.com/cellery-io/mesh-controller/pkg/client/informers/externalversions/mesh/v1alpha1"
istioinformers "github.com/cellery-io/mesh-controller/pkg/client/informers/externalversions/networking/v1alpha3"
listers "github.com/cellery-io/mesh-controller/pkg/client/listers/mesh/v1alpha1"
istiov1alpha1listers "github.com/cellery-io/mesh-controller/pkg/client/listers/networking/v1alpha3"
"github.com/cellery-io/mesh-controller/pkg/controller"
"github.com/cellery-io/mesh-controller/pkg/controller/cell/config"
"github.com/cellery-io/mesh-controller/pkg/controller/cell/resources"
controller_commons "github.com/cellery-io/mesh-controller/pkg/controller/commons"
)
type cellHandler struct {
kubeClient kubernetes.Interface
meshClient meshclientset.Interface
networkPilicyLister networkv1listers.NetworkPolicyLister
secretLister corev1listers.SecretLister
cellLister listers.CellLister
gatewayLister listers.GatewayLister
tokenServiceLister listers.TokenServiceLister
serviceLister listers.ServiceLister
envoyFilterLister istiov1alpha1listers.EnvoyFilterLister
cellerySecret config.Secret
virtualSvcLister istiov1alpha1listers.VirtualServiceLister
logger *zap.SugaredLogger
}
func NewController(
kubeClient kubernetes.Interface,
meshClient meshclientset.Interface,
cellInformer meshinformers.CellInformer,
gatewayInformer meshinformers.GatewayInformer,
tokenServiceInformer meshinformers.TokenServiceInformer,
serviceInformer meshinformers.ServiceInformer,
networkPolicyInformer networkv1informers.NetworkPolicyInformer,
secretInformer corev1informers.SecretInformer,
envoyFilterInformer istioinformers.EnvoyFilterInformer,
systemSecretInformer corev1informers.SecretInformer,
virtualSvcInformer istioinformers.VirtualServiceInformer,
logger *zap.SugaredLogger,
) *controller.Controller {
h := &cellHandler{
kubeClient: kubeClient,
meshClient: meshClient,
cellLister: cellInformer.Lister(),
serviceLister: serviceInformer.Lister(),
gatewayLister: gatewayInformer.Lister(),
tokenServiceLister: tokenServiceInformer.Lister(),
networkPilicyLister: networkPolicyInformer.Lister(),
secretLister: secretInformer.Lister(),
envoyFilterLister: envoyFilterInformer.Lister(),
virtualSvcLister: virtualSvcInformer.Lister(),
logger: logger.Named("cell-controller"),
}
c := controller.New(h, h.logger, "Cell")
h.logger.Info("Setting up event handlers")
cellInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.Enqueue,
UpdateFunc: func(old, new interface{}) {
h.logger.Debugw("Informer update", "old", old, "new", new)
c.Enqueue(new)
},
DeleteFunc: c.Enqueue,
})
systemSecretInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: h.updateSecret,
UpdateFunc: func(old, new interface{}) {
h.updateSecret(new)
},
})
return c
}
func (h *cellHandler) Handle(key string) error {
h.logger.Infof("Handle called with %s", key)
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
h.logger.Errorf("invalid resource key: %s", key)
return nil
}
cellOriginal, err := h.cellLister.Cells(namespace).Get(name)
if err != nil {
if errors.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("cell '%s' in work queue no longer exists", key))
return nil
}
return err
}
h.logger.Debugw("lister instance", key, cellOriginal)
cell := cellOriginal.DeepCopy()
if err = h.handle(cell); err != nil {
return err
}
if _, err = h.updateStatus(cell); err != nil {
return err
}
return nil
}
func (h *cellHandler) handle(cell *v1alpha1.Cell) error {
if err := h.handleNetworkPolicy(cell); err != nil {
return err
}
if err := h.handleSecret(cell); err != nil {
return err
}
if err := h.handleGateway(cell); err != nil {
return err
}
if err := h.handleTokenService(cell); err != nil {
return err
}
if err := h.handleServices(cell); err != nil {
return err
}
if err := h.handleVirtualService(cell); err != nil {
return err
}
h.updateCellStatus(cell)
return nil
}
func (h *cellHandler) handleNetworkPolicy(cell *v1alpha1.Cell) error {
networkPolicy, err := h.networkPilicyLister.NetworkPolicies(cell.Namespace).Get(resources.NetworkPolicyName(cell))
if errors.IsNotFound(err) {
networkPolicy, err = h.kubeClient.NetworkingV1().NetworkPolicies(cell.Namespace).Create(resources.CreateNetworkPolicy(cell))
if err != nil {
h.logger.Errorf("Failed to create NetworkPolicy %v", err)
return err
}
h.logger.Debugw("NetworkPolicy created", resources.NetworkPolicyName(cell), networkPolicy)
} else if err != nil {
return err
}
return nil
}
func (h *cellHandler) handleSecret(cell *v1alpha1.Cell) error {
secret, err := h.secretLister.Secrets(cell.Namespace).Get(resources.SecretName(cell))
if errors.IsNotFound(err) {
desiredSecret, err := resources.CreateKeyPairSecret(cell, h.cellerySecret)
if err != nil {
h.logger.Errorf("Cannot build the Cell Secret %v", err)
return err
}
secret, err = h.kubeClient.CoreV1().Secrets(cell.Namespace).Create(desiredSecret)
if err != nil {
h.logger.Errorf("Failed to create cell Secret %v", err)
return err
}
h.logger.Debugw("Secret created", resources.SecretName(cell), secret)
} else if err != nil {
return err
}
return nil
}
func (h *cellHandler) handleGateway(cell *v1alpha1.Cell) error {
gateway, err := h.gatewayLister.Gateways(cell.Namespace).Get(resources.GatewayName(cell))
if errors.IsNotFound(err) {
gateway = resources.CreateGateway(cell)
lastAppliedConfig, err := json.Marshal(buildLastAppliedConfig(gateway))
if err != nil {
h.logger.Errorf("Failed to create Gateway last applied config %v", err)
return err
}
annotate(gateway, corev1.LastAppliedConfigAnnotation, string(lastAppliedConfig))
gateway, err = h.meshClient.MeshV1alpha1().Gateways(cell.Namespace).Create(gateway)
if err != nil {
h.logger.Errorf("Failed to create Gateway %v", err)
return err
}
h.logger.Debugw("Gateway created", resources.GatewayName(cell), gateway)
} else if err != nil {
return err
}
cell.Status.GatewayHostname = gateway.Status.HostName
cell.Status.GatewayStatus = gateway.Status.Status
return nil
}
func buildLastAppliedConfig(gw *v1alpha1.Gateway) *v1alpha1.Gateway {
return &v1alpha1.Gateway{
TypeMeta: metav1.TypeMeta{
Kind: "Gateway",
APIVersion: v1alpha1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: gw.Name,
Namespace: gw.Namespace,
},
Spec: gw.Spec,
}
}
func annotate(gw *v1alpha1.Gateway, name string, value string) {
annotations := make(map[string]string, len(gw.ObjectMeta.Annotations)+1)
annotations[name] = value
for k, v := range gw.ObjectMeta.Annotations {
annotations[k] = v
}
gw.Annotations = annotations
}
func (h *cellHandler) handleTokenService(cell *v1alpha1.Cell) error {
tokenService, err := h.tokenServiceLister.TokenServices(cell.Namespace).Get(resources.TokenServiceName(cell))
if errors.IsNotFound(err) {
tokenService, err = h.meshClient.MeshV1alpha1().TokenServices(cell.Namespace).Create(resources.CreateTokenService(cell))
if err != nil {
h.logger.Errorf("Failed to create TokenService %v", err)
return err
}
h.logger.Debugw("TokenService created", resources.TokenServiceName(cell), tokenService)
} else if err != nil {
return err
}
return nil
}
func (h *cellHandler) handleServices(cell *v1alpha1.Cell) error {
servicesSpecs := cell.Spec.ServiceTemplates
cell.Status.ServiceCount = 0
for _, serviceSpec := range servicesSpecs {
service, err := h.serviceLister.Services(cell.Namespace).Get(resources.ServiceName(cell, serviceSpec))
if errors.IsNotFound(err) {
service, err = h.meshClient.MeshV1alpha1().Services(cell.Namespace).Create(resources.CreateService(cell, serviceSpec))
if err != nil {
h.logger.Errorf("Failed to create Service: %s : %v", serviceSpec.Name, err)
return err
}
h.logger.Debugw("Service created", resources.ServiceName(cell, serviceSpec), service)
} else if err != nil {
return err
}
if service != nil {
// service exists. if the new obj is not equal to old one, perform an update.
newService := resources.CreateService(cell, serviceSpec)
// set the previous service's `ResourceVersion` to the newService
// Else the issue `metadata.resourceVersion: Invalid value: 0x0: must be specified for an update` will occur.
newService.ResourceVersion = service.ResourceVersion
if !isEqual(service, newService) {
service, err = h.meshClient.MeshV1alpha1().Services(cell.Namespace).Update(newService)
if err != nil {
h.logger.Errorf("Failed to update Service: %s : %v", service.Name, err)
return err
}
h.logger.Debugw("Service updated", resources.ServiceName(cell, serviceSpec), service)
}
}
if service.Status.AvailableReplicas > 0 || service.Spec.IsZeroScaled() || service.Spec.Type == v1alpha1.ServiceTypeJob {
cell.Status.ServiceCount++
}
}
return nil
}
func (h *cellHandler) handleVirtualService(cell *v1alpha1.Cell) error {
cellVs, err := h.virtualSvcLister.VirtualServices(cell.Namespace).Get(resources.CellVirtualServiceName(cell))
if errors.IsNotFound(err) {
cellVs, err = resources.CreateCellVirtualService(cell, h.cellLister)
if err != nil {
h.logger.Errorf("Failed to create Cell VS object %v for instance %s", err, cell.Name)
return err
}
if cellVs == nil {
h.logger.Debugf("No VirtualService created for cell instance %s", cell.Name)
return nil
}
lastAppliedConfig, err := json.Marshal(controller_commons.BuildVirtualServiceLastAppliedConfig(cellVs))
if err != nil {
h.logger.Errorf("Failed to create Cell VS %v for instance %s", err, cell.Name)
return err
}
controller_commons.Annotate(cellVs, corev1.LastAppliedConfigAnnotation, string(lastAppliedConfig))
cellVs, err = h.meshClient.NetworkingV1alpha3().VirtualServices(cell.Namespace).Create(cellVs)
if err != nil {
h.logger.Errorf("Failed to create Cell VirtualService %v for instance %s", err, cell.Name)
return err
}
h.logger.Debugw("Cell VirtualService created", resources.CellVirtualServiceName(cell), cellVs)
} else if err != nil {
return err
}
return nil
}
func isEqual(oldService *v1alpha1.Service, newService *v1alpha1.Service) bool {
// we only consider equality of the spec
return reflect.DeepEqual(oldService.Spec, newService.Spec)
}
func (h *cellHandler) updateStatus(cell *v1alpha1.Cell) (*v1alpha1.Cell, error) {
latestCell, err := h.cellLister.Cells(cell.Namespace).Get(cell.Name)
if err != nil {
return nil, err
}
if !reflect.DeepEqual(latestCell.Status, cell.Status) {
latestCell.Status = cell.Status
return h.meshClient.MeshV1alpha1().Cells(cell.Namespace).Update(latestCell)
}
return cell, nil
}
func (h *cellHandler) updateCellStatus(cell *v1alpha1.Cell) {
if cell.Status.GatewayStatus == "Ready" && int(cell.Status.ServiceCount) == len(cell.Spec.ServiceTemplates) {
cell.Status.Status = "Ready"
c := []v1alpha1.CellCondition{
{
Type: v1alpha1.CellReady,
Status: corev1.ConditionTrue,
},
}
cell.Status.Conditions = c
} else {
cell.Status.Status = "NotReady"
c := []v1alpha1.CellCondition{
{
Type: v1alpha1.CellReady,
Status: corev1.ConditionFalse,
},
}
cell.Status.Conditions = c
}
}
func (h *cellHandler) updateSecret(obj interface{}) {
secret, ok := obj.(*corev1.Secret)
if !ok {
return
}
if secret.Name != mesh.SystemSecretName {
return
}
s := config.Secret{}
if keyBytes, ok := secret.Data["tls.key"]; ok {
block, _ := pem.Decode(keyBytes)
parsedKey, err := x509.ParsePKCS8PrivateKey(block.Bytes)
if err != nil {
h.logger.Errorf("error while parsing cellery-secret tls.key: %v", err)
s.PrivateKey = nil
}
key, ok := parsedKey.(*rsa.PrivateKey)
if !ok {
h.logger.Errorf("error while parsing cellery-secret tls.key: non rsa private key")
s.PrivateKey = nil
}
s.PrivateKey = key
} else {
h.logger.Errorf("Missing tls.key in the cellery secret.")
}
if certBytes, ok := secret.Data["tls.crt"]; ok {
block, _ := pem.Decode(certBytes)
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
h.logger.Errorf("error while parsing cellery-secret tls.crt: %v", err)
s.PrivateKey = nil
}
s.Certificate = cert
} else {
h.logger.Errorf("Missing tls.cert in the cellery secret.")
}
if certBundle, ok := secret.Data["cert-bundle.pem"]; ok {
s.CertBundle = certBundle
} else {
h.logger.Errorf("Missing cert-bundle.pem in the cellery secret.")
}
h.cellerySecret = s
}
|
/*
* Wager service APIs
*
* APIs for a wager system
*
* API version: 1.0.0
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package controllers
import (
"github.com/ryannguyen89/wager-service/business"
"github.com/ryannguyen89/wager-service/models"
"github.com/ryannguyen89/wager-service/models/errors"
"github.com/ryannguyen89/wager-service/utils"
"log"
"net/http"
"github.com/gin-gonic/gin"
)
// ListWagersGet -
func ListWagersGet(c *gin.Context) {
var (
req models.ListWagerRequest
)
if err := c.ShouldBind(&req); err != nil {
log.Printf("Bind request error: %v", err)
c.JSON(http.StatusBadRequest, models.Error{Error:err.Error()})
return
}
log.Printf("Request: %s", utils.JsonSerialize(req))
// Validate request
code, msg := validateListWagerRequest(&req)
if code != http.StatusOK {
log.Printf("Validate request error: %s", msg)
c.JSON(code, models.Error{Error:msg})
return
}
lWagers, code, msg := business.ListWagers(c.Request.Context(), &req)
if code != http.StatusOK {
c.JSON(code, models.Error{Error:msg})
return
}
c.JSON(http.StatusOK, lWagers)
}
func validateListWagerRequest(r *models.ListWagerRequest) (code int, message string) {
if r.Page <= 0 {
code = http.StatusBadRequest
message = errors.InvalidPage
return
}
if r.Limit <= 0 {
code = http.StatusBadRequest
message = errors.InvalidLimit
return
}
code = http.StatusOK
return
}
|
/*
* aggro
*
* Functions for outputting prefix lists in various formats
*
* Copyright (c) 2017 Noora Halme - please see LICENSE.md
*
*/
package main
import (
"fmt"
)
// output prefix list in Linux iptables format
func outputIptables(name string, prefixList *[]string) {
fmt.Printf("iptables -F %s-ingress\n", name);
fmt.Printf("iptables -F %s-egress\n", name);
fmt.Printf("iptables -X %s-ingress\n", name);
fmt.Printf("iptables -X %s-egress\n", name);
fmt.Printf("iptables -N %s-ingress\n", name);
fmt.Printf("iptables -N %s-egress\n", name);
for i := 0; i < len(*prefixList); i++ {
fmt.Printf("iptables -A %s-ingress -s %s -j DROP\n", name, (*prefixList)[i]);
fmt.Printf("iptables -A %s-egress -d %s -j DROP\n", name, (*prefixList)[i]);
}
}
// output prefix list in Linux nptables format
func outputNptables(name string, prefixList *[]string) {
fmt.Printf("nft add table ip %s\n", name);
fmt.Printf("nft add chain ip %s input { type filter hook input priority 0 ; }\n", name);
fmt.Printf("nft add chain ip %s output { type filter hook output priority 0 ; }\n", name);
for i := 0; i < len(*prefixList); i++ {
fmt.Printf("nft add rule ip %s input saddr %s drop\n", name, (*prefixList)[i]);
fmt.Printf("nft add rule ip %s output daddr %s drop\n", name, (*prefixList)[i]);
}
}
// output prefix list in BSD/macOS pf format
func outputPf(name string, prefixList *[]string) {
fmt.Printf("table <%s> {", name);
for i := 0; i < len(*prefixList); i++ {
fmt.Printf(" %s", (*prefixList)[i]);
}
fmt.Printf(" }\n");
fmt.Printf("block drop in quick from { <%s> } to any\n", name);
fmt.Printf("block drop out quick from any to { <%s> }\n", name);
}
// output prefix list in JunOS policy-options prefix-list
func outputJunos(name string, prefixList *[]string) {
fmt.Printf("set policy-options prefix-list %s [ ", name);
for i := 0; i < len(*prefixList); i++ {
fmt.Printf("%s ", (*prefixList)[i]);
}
fmt.Printf("];\n");
fmt.Printf("set firewall family inet filter %s term prefix-match from prefix-list %s then reject;\n", name, name);
fmt.Printf("set firewall family inet filter %s term pass-through then accept;\n", name);
}
// output prefix list in plain CIDR format
func outputPlain(prefixList *[]string) {
for i := 0; i < len(*prefixList); i++ {
fmt.Printf("%s\n", (*prefixList)[i]);
}
}
|
package model
// Signature ะฅัะฐะฝะธั ะธะทะพะฑัะฐะถะตะฝะธะต ัะพัะฟะธัะธ
type Signature struct {
// Code ะะฐะบะพะดะธัะพะฒะฐะฝะฝะพะต ะธะทะพะฑัะฐะถะตะฝะธะต
Code int64 `bson:"signature" json:"signature"`
}
|
// Copyright 2018 Drone.IO Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package web
import "time"
// Options defines website handler options.
type Options struct {
sync time.Duration
path string
docs string
}
// Option configures the website handler.
type Option func(*Options)
// WithSync configures the website handler with the duration value
// used to determine if the user account requires synchronization.
func WithSync(d time.Duration) Option {
return func(o *Options) {
o.sync = d
}
}
// WithDir configures the website handler with the directory value
// used to serve the website from the local filesystem.
func WithDir(s string) Option {
return func(o *Options) {
o.path = s
}
}
// WithDocs configures the website handler with the documentation
// website address, which should be included in the user interface.
func WithDocs(s string) Option {
return func(o *Options) {
o.docs = s
}
}
|
package registry
import (
pb "github.com/raviupreti85/naiveDB/registry/proto"
"github.com/raviupreti85/naiveDB/ring"
"golang.org/x/net/context"
)
type RegistryServer struct {
registry *Registry
}
func NewRegistryServer() *RegistryServer {
return &RegistryServer{registry: NewEmptyRegistry()}
}
func (r *RegistryServer) Register(ctx context.Context, node *pb.Node) (*pb.RegisterResponse, error) {
token, err := r.registry.Register(ring.NewNode(node.Id, node.Host, int(node.Port)))
if err != nil {
return nil, err
}
return &pb.RegisterResponse{Token: token}, nil
}
func (r *RegistryServer) Unregister(ctx context.Context, request *pb.UnregisterRequest) (*pb.Empty, error) {
err := r.registry.Unregister(request.Id, request.Token)
if err != nil {
return nil, err
}
return &pb.Empty{}, nil
}
func (r *RegistryServer) GetRing(ctx context.Context, request *pb.RingRequest) (*pb.Ring, error) {
ring, err := r.registry.Ring(request.Id, request.Token)
if err != nil {
return nil, err
}
return toGrpcResponse(ring), nil
}
func toGrpcResponse(ring *ring.Ring) *pb.Ring {
return &pb.Ring{Leader: toProtoNode(ring.Leader()), Followers: toProtoFollowers(ring)}
}
func toProtoNode(node *ring.Node) *pb.Node {
return &pb.Node{Id: node.GetId(), Host: node.GetHost(), Port: int32(node.GetPort())}
}
func toProtoFollowers(ring *ring.Ring) []*pb.Node {
followers := make([]*pb.Node, 0)
for _, value := range ring.Followers() {
followers = append(followers, toProtoNode(value))
}
return followers
}
func (r *RegistryServer) GetInfo() string {
return r.registry.RingInformation()
}
|
package B
import "github.com/gofiber/fiber/v2"
func two(ctx *fiber.Ctx) error {
return ctx.SendString("B, 2 ๐!")
}
|
package for_c_lang
/*
#include <stdio.h>
static void SayHello(const char *name){
puts(name);
}
*/
import "C"
func BasicApp(){
C.SayHello(C.CString("Hello,World!\n"))
} |
package main
import (
"github.com/1and1/oneandone-cloudserver-sdk-go"
"github.com/codegangsta/cli"
)
var privateNetOps []cli.Command
func init() {
pnIdFlag := cli.StringFlag{
Name: "id, i",
Usage: "ID of the private network.",
}
pnNameFlag := cli.StringFlag{
Name: "desc, d",
Usage: "Description of the private network.",
}
pnDescFlag := cli.StringFlag{
Name: "name, n",
Usage: "Name of the private network.",
}
pnAddressFlag := cli.StringFlag{
Name: "netip",
Usage: "Private network IP address.",
}
pnMaskFlag := cli.StringFlag{
Name: "netmask",
Usage: "Subnet mask.",
}
pnServerIdFlag := cli.StringFlag{
Name: "serverid",
Usage: "ID of the server.",
}
dcIdFlag := cli.StringFlag{
Name: "datacenterid",
Usage: "Data center ID of the private network.",
}
pnCreateFlags := []cli.Flag{dcIdFlag, pnDescFlag, pnNameFlag, pnAddressFlag, pnMaskFlag}
privateNetOps = []cli.Command{
{
Name: "privatenet",
Description: "1&1 private network operations",
Usage: "Private network operations.",
Subcommands: []cli.Command{
{
Name: "assign",
Usage: "Assigns servers to private network.",
Flags: []cli.Flag{
pnIdFlag,
cli.StringSliceFlag{
Name: "serverid",
Usage: "List of server IDs.",
},
},
Action: assignPrivateNetServers,
},
{
Name: "create",
Usage: "Creates new private network.",
Flags: pnCreateFlags,
Action: createPrivateNet,
},
{
Name: "info",
Usage: "Shows information about private network.",
Flags: []cli.Flag{pnIdFlag},
Action: showPrivateNet,
},
{
Name: "list",
Usage: "Lists available private networks.",
Flags: queryFlags,
Action: listPrivateNets,
},
{
Name: "server",
Usage: "Shows information about server attached to private network.",
Flags: []cli.Flag{pnIdFlag, pnServerIdFlag},
Action: showPrivateNetServer,
},
{
Name: "servers",
Usage: "Lists servers attached to private network.",
Flags: []cli.Flag{pnIdFlag},
Action: listPrivateNetServers,
},
{
Name: "rm",
Usage: "Removes private network.",
Flags: []cli.Flag{pnIdFlag},
Action: deletePrivateNet,
},
{
Name: "unassign",
Usage: "Unassigns server from private network.",
Flags: []cli.Flag{pnIdFlag, pnServerIdFlag},
Action: removePrivateNetServer,
},
{
Name: "update",
Usage: "Updates private network.",
Flags: []cli.Flag{pnIdFlag, pnDescFlag, pnNameFlag, pnAddressFlag, pnMaskFlag},
Action: updatePrivateNet,
},
},
},
}
}
func listPrivateNets(ctx *cli.Context) {
pNets, err := api.ListPrivateNetworks(getQueryParams(ctx))
exitOnError(err)
data := make([][]string, len(pNets))
for i, pn := range pNets {
data[i] = []string{
pn.Id,
pn.Name,
pn.NetworkAddress,
pn.SubnetMask,
pn.State,
getDatacenter(pn.Datacenter),
}
}
header := []string{"ID", "Name", "Network Address", "Subnet Mask", "State", "Data Center"}
output(ctx, pNets, "", false, &header, &data)
}
func showPrivateNet(ctx *cli.Context) {
pnId := getRequiredOption(ctx, "id")
pNet, err := api.GetPrivateNetwork(pnId)
exitOnError(err)
output(ctx, pNet, "", true, nil, nil)
}
func createPrivateNet(ctx *cli.Context) {
req := oneandone.PrivateNetworkRequest{
Name: getRequiredOption(ctx, "name"),
DatacenterId: ctx.String("datacenterid"),
Description: ctx.String("desc"),
NetworkAddress: ctx.String("netip"),
SubnetMask: ctx.String("netmask"),
}
_, privateNet, err := api.CreatePrivateNetwork(&req)
exitOnError(err)
output(ctx, privateNet, okWaitMessage, false, nil, nil)
}
func updatePrivateNet(ctx *cli.Context) {
pnId := getRequiredOption(ctx, "id")
req := oneandone.PrivateNetworkRequest{
Name: ctx.String("name"),
Description: ctx.String("desc"),
NetworkAddress: ctx.String("netip"),
SubnetMask: ctx.String("netmask"),
}
privateNet, err := api.UpdatePrivateNetwork(pnId, &req)
exitOnError(err)
output(ctx, privateNet, okWaitMessage, false, nil, nil)
}
func deletePrivateNet(ctx *cli.Context) {
pnId := getRequiredOption(ctx, "id")
privateNet, err := api.DeletePrivateNetwork(pnId)
exitOnError(err)
output(ctx, privateNet, okWaitMessage, false, nil, nil)
}
func listPrivateNetServers(ctx *cli.Context) {
pnId := getRequiredOption(ctx, "id")
servers, err := api.ListPrivateNetworkServers(pnId)
exitOnError(err)
data := make([][]string, len(servers))
for i, server := range servers {
data[i] = []string{server.Id, server.Name}
}
header := []string{"ID", "Name"}
output(ctx, servers, "", false, &header, &data)
}
func assignPrivateNetServers(ctx *cli.Context) {
pnId := getRequiredOption(ctx, "id")
serverIds := getStringSliceOption(ctx, "serverid", true)
privateNet, err := api.AttachPrivateNetworkServers(pnId, serverIds)
exitOnError(err)
output(ctx, privateNet, okWaitMessage, false, nil, nil)
}
func showPrivateNetServer(ctx *cli.Context) {
pnId := getRequiredOption(ctx, "id")
serverId := getRequiredOption(ctx, "serverid")
server, err := api.GetPrivateNetworkServer(pnId, serverId)
exitOnError(err)
output(ctx, server, "", true, nil, nil)
}
func removePrivateNetServer(ctx *cli.Context) {
pnId := getRequiredOption(ctx, "id")
serverId := getRequiredOption(ctx, "serverid")
privateNet, err := api.DetachPrivateNetworkServer(pnId, serverId)
exitOnError(err)
output(ctx, privateNet, okWaitMessage, false, nil, nil)
}
|
package block
import (
"time"
)
var (
blocks = []*BlockChain{genesisBlock}
)
var genesisBlock = &BlockChain{
Index: 0,
PrevBlockChainHash: []byte(""),
Timestamp: 1465154705,
Data: []byte(""),
Hash: []byte("0000001ecea26a0894fbd46de4a2217a18e1c7ab965ca6b8b2b57cb62cbceeec"),
Nonce: 15542467,
}
type BlockChain struct {
Index int
Timestamp int64
Data []byte
PrevBlockChainHash []byte
Hash []byte
Nonce int
}
func NewBlockChain(data []byte) *BlockChain {
prevBlockChain := GetLastBlock()
b := &BlockChain{prevBlockChain.Index+1,time.Now().Unix(), data,prevBlockChain.Hash, []byte{}, 0}
pow := NewProofOfWork(b)
nonce, hash := pow.CalculateHashForBlock()
b.Hash = hash
b.Nonce = nonce
return b
}
func GetLastBlock() *BlockChain {
return blocks[len(blocks) - 1]
}
|
package ffmpeg
//#include <libavutil/pixfmt.h>
import "C"
type ColorPrimaries int
const (
ColorPrimaries_Unspecified = 2
)
func (cp ColorPrimaries) ctype() C.enum_AVColorPrimaries {
return (C.enum_AVColorPrimaries)(cp)
}
|
// +build !android
package main
import (
"flag"
"fmt"
"log"
"runtime"
"strconv"
"strings"
"github.com/tideland/goas/v2/loop"
glfw "github.com/go-gl/glfw3"
"github.com/remogatto/mandala"
)
func main() {
runtime.LockOSThread()
verbose := flag.Bool("verbose", false, "produce verbose output")
debug := flag.Bool("debug", false, "produce debug output")
size := flag.String("size", "320x480", "set the size of the window")
flag.Parse()
if *verbose {
mandala.Verbose = true
}
if *debug {
mandala.Debug = true
}
dims := strings.Split(strings.ToLower(*size), "x")
width, err := strconv.Atoi(dims[0])
if err != nil {
panic(err)
}
height, err := strconv.Atoi(dims[1])
if err != nil {
panic(err)
}
if !glfw.Init() {
panic("Can't init glfw!")
}
defer glfw.Terminate()
// Enable OpenGL ES 2.0.
glfw.WindowHint(glfw.ClientApi, glfw.OpenglEsApi)
glfw.WindowHint(glfw.ContextVersionMajor, 2)
window, err := glfw.CreateWindow(width, height, "{{.AppName}}", nil, nil)
if err != nil {
panic(err)
}
mandala.Init(window)
// Create a rendering loop control struct containing a set of
// channels that control rendering.
renderLoopControl := newRenderLoopControl()
// Start the rendering loop
loop.GoRecoverable(
renderLoopFunc(renderLoopControl),
func(rs loop.Recoverings) (loop.Recoverings, error) {
for _, r := range rs {
log.Printf("%s\n%s", r.Reason, mandala.Stacktrace())
}
return rs, fmt.Errorf("Unrecoverable loop\n")
},
)
// Start the event loop
loop.GoRecoverable(
eventLoopFunc(renderLoopControl),
func(rs loop.Recoverings) (loop.Recoverings, error) {
for _, r := range rs {
log.Printf("%s\n%s", r.Reason, mandala.Stacktrace())
}
return rs, fmt.Errorf("Unrecoverable loop\n")
},
)
for !window.ShouldClose() {
glfw.WaitEvents()
}
}
|
package docs
import "io/ioutil"
var w = ioutil.Discard
type House struct {
Frontdoor Door // aggregation
Windows []*Window // composition
}
func (me *House) Rooms() int { return 0 }
type Door struct {
Material string
}
func (me *Door) Materials() []string {
return []string{}
}
type Window struct {
Model string
}
func (me *Window) Materials() []string {
return []string{}
}
type Part interface {
Materials() []string
}
|
package main
import "fmt"
func main() {
var x interface{}
var y interface{} = []int{3, 5}
_ = x == x
_ = x == y
fmt.Printf("%T", x)
//_ = y == y // panic ไธคไธชๆฏ่พๅผ็ๅจๆ็ฑปๅไธบๅไธไธชไธๅฏๆฏ่พ็ฑปๅใ
}
|
// Package store defines an interface for storing metrics to a store,
// such as redis, and it also implments the RedisStore, which is the
// required implmementation for the demo. Note we abstract the store
// into an interface so we can run unit tests without having to actually
// store to redis, among other reasons.
package store
import (
"time"
"github.com/gdotgordon/locator-demo/locator/locking"
"github.com/gdotgordon/locator-demo/locator/types"
"github.com/go-redis/redis"
)
// Store is the data store abstraction.
type Store interface {
StoreLatency(d time.Duration) error
AddSuccess() error
AddError() error
Clear() error
AcquireLock() (*locking.Lock, error)
Unlock(lock *locking.Lock) error
}
// RedisStore implments the Store interface for the Redis client.
type RedisStore struct {
cli *redis.Client
}
// NewRedisStore does
func NewRedisStore(cli *redis.Client) Store {
return &RedisStore{cli: cli}
}
// AcquireLock does
func (rs *RedisStore) AcquireLock() (*locking.Lock, error) {
lck := locking.New(rs.cli, 1*time.Minute, 10)
return lck, lck.Lock()
}
func (rs *RedisStore) Unlock(lock *locking.Lock) error {
return lock.Unlock()
}
func (rs *RedisStore) Clear() error {
return rs.cli.FlushDB().Err()
}
func (rs *RedisStore) StoreLatency(d time.Duration) error {
return rs.cli.LPush(types.LatencyKey, int64(d)).Err()
}
func (rs *RedisStore) AddSuccess() error {
return rs.cli.Incr(types.SuccessKey).Err()
}
func (rs *RedisStore) AddError() error {
return rs.cli.Incr(types.ErrorKey).Err()
}
|
package game
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestValidateStep(t *testing.T) {
testCases := []struct {
desc string
prevStep Event
nextStep Event
expected bool
}{
{
desc: "Left != Right",
prevStep: Left,
nextStep: Right,
expected: false,
},
{
desc: "Left == Left",
prevStep: Left,
nextStep: Left,
expected: true,
},
{
desc: "Left == Up",
prevStep: Left,
nextStep: Up,
expected: true,
},
{
desc: "Left == Down",
prevStep: Left,
nextStep: Down,
expected: true,
},
{
desc: "Up != Down",
prevStep: Up,
nextStep: Down,
expected: false,
},
{
desc: "Up == Left",
prevStep: Up,
nextStep: Left,
expected: true,
},
}
for _, tC := range testCases {
tC := tC
t.Run(tC.desc, func(t *testing.T) {
t.Parallel()
require.Equal(t, tC.expected, validateStep(tC.prevStep, tC.nextStep), tC.desc)
})
}
}
|
/*
Copyright 2018 Bitnine Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ag
import "testing"
type testElement struct{}
func (_ testElement) readElements(b []byte) ([]interface{}, error) {
return []interface{}{}, nil
}
func TestArrayScanError(t *testing.T) {
tests := []interface{}{
nil,
0,
(*byte)(nil),
&testElement{},
&[]byte{},
&[]testElement{},
}
for _, i := range tests {
err := Array(i).Scan(nil)
if err == nil {
t.Errorf("error expected for %T", i)
}
}
}
type testElementEx struct {
testElement
}
func (_ testElementEx) Scan(src interface{}) error {
return nil
}
func TestArrayScanNullSlice(t *testing.T) {
var es []testElementEx
err := Array(&es).Scan(nil)
if err != nil {
t.Fatalf("got %v, want NULL", es)
}
if es != nil {
t.Fatalf("got %v, want nil", es)
}
}
func TestArrayScanNullArray(t *testing.T) {
es := [1]testElementEx{}
err := Array(&es).Scan(nil)
if err == nil {
t.Fatalf("got %v, want error", es)
}
if _, ok := err.(NullArrayError); !ok {
t.Errorf("NullArrayError expected")
}
}
func TestArrayScanType(t *testing.T) {
src := 0
var es []testElementEx
err := Array(&es).Scan(src)
if err == nil {
t.Errorf("error expected for %T", src)
}
}
func TestArrayScanZero(t *testing.T) {
var src interface{} = []byte(nil)
var es []testElementEx
err := Array(&es).Scan(src)
if err == nil {
t.Errorf("error expected for %T", src)
}
}
func TestArrayScanArrayLen(t *testing.T) {
es := [1]testElementEx{}
err := Array(&es).Scan([]byte("dummy"))
if err == nil {
t.Errorf("error expected for %T", es)
}
}
func TestArrayValue(t *testing.T) {
var es []testElementEx
_, err := Array(&es).Value()
if err == nil {
t.Errorf("error expected for Value() on Array")
}
}
|
package utils
// FindInSlice finds an integer element in a slice passed by
func FindInSlice(s []int, element int) bool {
for _, n := range s {
if element == n {
return true
}
}
return false
}
|
// Copyright 2019 - 2022 The Samply Community
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fhir
import "encoding/json"
// THIS FILE IS GENERATED BY https://github.com/samply/golang-fhir-models
// PLEASE DO NOT EDIT BY HAND
// Bundle is documented here http://hl7.org/fhir/StructureDefinition/Bundle
type Bundle struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Meta *Meta `bson:"meta,omitempty" json:"meta,omitempty"`
ImplicitRules *string `bson:"implicitRules,omitempty" json:"implicitRules,omitempty"`
Language *string `bson:"language,omitempty" json:"language,omitempty"`
Identifier *Identifier `bson:"identifier,omitempty" json:"identifier,omitempty"`
Type BundleType `bson:"type" json:"type"`
Timestamp *string `bson:"timestamp,omitempty" json:"timestamp,omitempty"`
Total *int `bson:"total,omitempty" json:"total,omitempty"`
Link []BundleLink `bson:"link,omitempty" json:"link,omitempty"`
Entry []BundleEntry `bson:"entry,omitempty" json:"entry,omitempty"`
Signature *Signature `bson:"signature,omitempty" json:"signature,omitempty"`
}
type BundleLink struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Relation string `bson:"relation" json:"relation"`
Url string `bson:"url" json:"url"`
}
type BundleEntry struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Link []BundleLink `bson:"link,omitempty" json:"link,omitempty"`
FullUrl *string `bson:"fullUrl,omitempty" json:"fullUrl,omitempty"`
Resource json.RawMessage `bson:"resource,omitempty" json:"resource,omitempty"`
Search *BundleEntrySearch `bson:"search,omitempty" json:"search,omitempty"`
Request *BundleEntryRequest `bson:"request,omitempty" json:"request,omitempty"`
Response *BundleEntryResponse `bson:"response,omitempty" json:"response,omitempty"`
}
type BundleEntrySearch struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Mode *SearchEntryMode `bson:"mode,omitempty" json:"mode,omitempty"`
Score *json.Number `bson:"score,omitempty" json:"score,omitempty"`
}
type BundleEntryRequest struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Method HTTPVerb `bson:"method" json:"method"`
Url string `bson:"url" json:"url"`
IfNoneMatch *string `bson:"ifNoneMatch,omitempty" json:"ifNoneMatch,omitempty"`
IfModifiedSince *string `bson:"ifModifiedSince,omitempty" json:"ifModifiedSince,omitempty"`
IfMatch *string `bson:"ifMatch,omitempty" json:"ifMatch,omitempty"`
IfNoneExist *string `bson:"ifNoneExist,omitempty" json:"ifNoneExist,omitempty"`
}
type BundleEntryResponse struct {
Id *string `bson:"id,omitempty" json:"id,omitempty"`
Extension []Extension `bson:"extension,omitempty" json:"extension,omitempty"`
ModifierExtension []Extension `bson:"modifierExtension,omitempty" json:"modifierExtension,omitempty"`
Status string `bson:"status" json:"status"`
Location *string `bson:"location,omitempty" json:"location,omitempty"`
Etag *string `bson:"etag,omitempty" json:"etag,omitempty"`
LastModified *string `bson:"lastModified,omitempty" json:"lastModified,omitempty"`
Outcome json.RawMessage `bson:"outcome,omitempty" json:"outcome,omitempty"`
}
type OtherBundle Bundle
// MarshalJSON marshals the given Bundle as JSON into a byte slice
func (r Bundle) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
OtherBundle
ResourceType string `json:"resourceType"`
}{
OtherBundle: OtherBundle(r),
ResourceType: "Bundle",
})
}
// UnmarshalBundle unmarshals a Bundle.
func UnmarshalBundle(b []byte) (Bundle, error) {
var bundle Bundle
if err := json.Unmarshal(b, &bundle); err != nil {
return bundle, err
}
return bundle, nil
}
|
package main
import (
"log"
"os"
"strconv"
"time"
)
var logfile *os.File
const logDir string = "logs/"
func initLogger() {
ld := dirPrefix + logDir
if _, err := os.Stat(ld); os.IsNotExist(err) {
os.Mkdir(ld, 0750)
}
var err error
time := strconv.Itoa(int(time.Now().Unix()))
logfile, err = os.OpenFile(ld+"access_"+time+".log", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
panic(err)
}
}
type logLevel int
const (
info logLevel = 0
warn logLevel = 1
logError logLevel = 2
critical logLevel = 3
success logLevel = 4
)
func loglevelToPrefix(level logLevel) string {
switch level {
case info:
{
return "[Info]"
}
case warn:
{
return "[Warning]"
}
case logError:
{
return "[error]"
}
case critical:
{
return "[*!critical!*]"
}
case success:
{
return "[success]"
}
}
return "[low]"
}
func genLogTime() string {
return "[" + time.Now().Format(time.Stamp) + "]"
}
//WriteLog appends a message to the logfile
func WriteLog(level logLevel, message string) {
logMessage := genLogTime() + " " + loglevelToPrefix(level) + " " + message + "\r\n"
if _, err := logfile.Write([]byte(logMessage)); err != nil {
log.Panic(err)
}
}
|
package schedule
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/coredns/coredns/core/dnsserver"
"github.com/coredns/coredns/plugin"
"github.com/mholt/caddy"
)
func init() {
caddy.RegisterPlugin("schedule", caddy.Plugin{
ServerType: "dns",
Action: setup,
})
}
func setup(c *caddy.Controller) error {
sch := Schedule{
ScheduleEntries: []ScheduleEntry{},
}
for c.Next() {
// Get the zone
if !c.NextArg() {
return plugin.Error("schedule", fmt.Errorf("expected zone"))
}
z := c.Val()
// Get the start time
if !c.NextArg() {
return plugin.Error("schedule", fmt.Errorf("expected start time"))
}
startTimeStr := c.Val()
// Get the end time
if !c.NextArg() {
return plugin.Error("schedule", fmt.Errorf("expected end time"))
}
endTimeStr := c.Val()
// Expect no additional configuration
if c.NextArg() {
return plugin.Error("schedule", c.ArgErr())
}
// Parse the start-time
startTimeHr, startTimeMnt, err := ParseHourMinutePair(startTimeStr)
if err != nil {
return plugin.Error("schedule", fmt.Errorf("failed to parse time: %s", err))
}
// Parse the end-time
endTimeHr, endTimeMnt, err := ParseHourMinutePair(endTimeStr)
if err != nil {
return plugin.Error("schedule", fmt.Errorf("failed to parse time: %s", err))
}
// Append a new schedule entry
sch.ScheduleEntries = append(sch.ScheduleEntries, ScheduleEntry{
Zone: z,
StartTime: DayTime{Hour: startTimeHr, Minute: startTimeMnt},
EndTime: DayTime{Hour: endTimeHr, Minute: endTimeMnt},
})
}
// Add the Plugin to CoreDNS
dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler {
sch.Next = next
return sch
})
return nil
}
// ParseHourMinutePair parses a string with the format HH:MM into
// an hour and minute integers.
func ParseHourMinutePair(s string) (int, int, error) {
ps := strings.Split(s, ":")
if len(ps) != 2 {
return 0, 0, fmt.Errorf("invalid time. Should be of the format HH:MM")
}
hr, err := strconv.Atoi(ps[0])
if err != nil {
return 0, 0, fmt.Errorf("failed to parse hour: %s", err)
}
if hr < 0 || hr > 23 {
return 0, 0, fmt.Errorf("Hour is out of range. 0 <= hr <= 23")
}
mnt, err := strconv.Atoi(ps[1])
if err != nil {
return 0, 0, fmt.Errorf("failed to parse minute: %s", err)
}
if mnt < 0 || mnt > 60 {
return 0, 0, fmt.Errorf("Minute is out of range. 0 <= hr <= 60")
}
return hr, mnt, nil
}
// TimeOfDay returns hr:mnt in the given day of t
func TimeOfDay(t time.Time, hr int, mnt int) (time.Time, error) {
if hr < 0 || hr > 23 {
return time.Time{}, fmt.Errorf("Hour is out of range. 0 <= hr <= 23")
}
if mnt < 0 || mnt > 60 {
return time.Time{}, fmt.Errorf("Minute is out of range. 0 <= hr <= 60")
}
y, m, d := t.Date()
loc := t.Location()
return time.Date(y, m, d, hr, mnt, 0, 0, loc), nil
}
// ParseTime parses an HH:MM string and returns
// that time within the day of the given time.
func ParseTime(t time.Time, s string) (time.Time, error) {
hr, mnt, err := ParseHourMinutePair(s)
if err != nil {
return time.Time{}, err
}
tod, err := TimeOfDay(t, hr, mnt)
if err != nil {
return time.Time{}, err
}
return tod, nil
}
|
package fuse
import (
"os"
"strings"
"time"
"github.com/minio/minio-go/v7"
//minio "github.com/minio/minio-go"
)
func NewFileInfo(objectInfo minio.ObjectInfo) os.FileInfo {
return &fileInfo{objectInfo: objectInfo}
}
type fileInfo struct {
objectInfo minio.ObjectInfo
}
func (f *fileInfo) Name() string {
return f.objectInfo.Key
}
func (f *fileInfo) Size() int64 {
return f.objectInfo.Size
}
func (f *fileInfo) Mode() os.FileMode {
return 0
}
func (f *fileInfo) ModTime() time.Time {
return f.objectInfo.LastModified
}
func (f *fileInfo) IsDir() bool {
return strings.HasSuffix(f.Name(), "/")
}
func (f *fileInfo) Sys() interface{} {
return f.objectInfo
}
|
// +build stats
package parser_test
import (
"encoding/json"
"fmt"
"github.com/bytesparadise/libasciidoc/pkg/parser"
"github.com/bytesparadise/libasciidoc/pkg/types"
. "github.com/bytesparadise/libasciidoc/testsupport"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("paragraphs", func() {
It("multiline paragraph with rich content", func() {
source := `:fds-version: 6.7.4
PyroSim is a graphical user interface for the https://github.com/firemodels/fds/releases/tag/FDS{fds-version}/[Fire Dynamics Simulator (FDS) version {fds-version}].
FDS is closely integrated into PyroSim.
FDS models can predict smoke, temperature, carbon monoxide, and other substances during fires.
The results of these simulations are used to ensure the safety of buildings before construction, evaluate safety options of existing buildings, reconstruct fires for post-accident investigation, and assist in firefighter training.
`
expected := &types.Document{
Elements: []interface{}{
&types.AttributeDeclaration{
Name: "fds-version",
Value: string("6.7.4"),
},
&types.Paragraph{
Elements: []interface{}{
&types.StringElement{
Content: "PyroSim is a graphical user interface for the ",
},
// https://github.com/firemodels/fds/releases/tag/FDS{fds-version}/[Fire Dynamics Simulator (FDS) version {fds-version}]
&types.InlineLink{
Attributes: types.Attributes{
types.AttrInlineLinkText: "Fire Dynamics Simulator (FDS) version 6.7.4",
},
Location: &types.Location{
Scheme: "https://",
Path: "github.com/firemodels/fds/releases/tag/FDS6.7.4/",
},
},
&types.StringElement{
Content: ".\nFDS is closely integrated into PyroSim.\nFDS models can predict smoke, temperature, carbon monoxide, and other substances during fires.\nThe results of these simulations are used to ensure the safety of buildings before construction, evaluate safety options of existing buildings, reconstruct fires for post-accident investigation, and assist in firefighter training.",
},
},
},
},
}
stats := parser.Stats{}
Expect(ParseDocument(source, parser.Debug(true), parser.Statistics(&stats, "no match"))).To(MatchDocument(expected))
fmt.Printf("ExprCnt: %d\n", stats.ExprCnt)
result, _ := json.MarshalIndent(stats.ChoiceAltCnt, " ", " ")
fmt.Printf("ChoiceAltCnt: \n%s\n", result)
})
})
|
// GENERATED FILE -- DO NOT EDIT
//
package k8smeta
import (
"istio.io/istio/galley/pkg/config/meta/schema/collection"
)
var (
// K8SAppsV1Deployments is the name of collection k8s/apps/v1/deployments
K8SAppsV1Deployments = collection.NewName("k8s/apps/v1/deployments")
// K8SCoreV1Endpoints is the name of collection k8s/core/v1/endpoints
K8SCoreV1Endpoints = collection.NewName("k8s/core/v1/endpoints")
// K8SCoreV1Namespaces is the name of collection k8s/core/v1/namespaces
K8SCoreV1Namespaces = collection.NewName("k8s/core/v1/namespaces")
// K8SCoreV1Nodes is the name of collection k8s/core/v1/nodes
K8SCoreV1Nodes = collection.NewName("k8s/core/v1/nodes")
// K8SCoreV1Pods is the name of collection k8s/core/v1/pods
K8SCoreV1Pods = collection.NewName("k8s/core/v1/pods")
// K8SCoreV1Services is the name of collection k8s/core/v1/services
K8SCoreV1Services = collection.NewName("k8s/core/v1/services")
// K8SExtensionsV1Beta1Ingresses is the name of collection k8s/extensions/v1beta1/ingresses
K8SExtensionsV1Beta1Ingresses = collection.NewName("k8s/extensions/v1beta1/ingresses")
)
// CollectionNames returns the collection names declared in this package.
func CollectionNames() []collection.Name {
return []collection.Name{
K8SAppsV1Deployments,
K8SCoreV1Endpoints,
K8SCoreV1Namespaces,
K8SCoreV1Nodes,
K8SCoreV1Pods,
K8SCoreV1Services,
K8SExtensionsV1Beta1Ingresses,
}
}
|
package notify
import (
"bytes"
"fmt"
"html/template"
"time"
"github.com/fanaticscripter/EggContractor/api"
"github.com/fanaticscripter/EggContractor/util"
)
const _contractMessageTextTmpl = `
{{- define "rewards" -}}
{{- range .}}
- <b>{{.Goal | numfmtwhole}}</b>: {{.Type}} {{.Name}} x{{.Count}};
{{- end}}
{{- end -}}
<b>{{.Name}}</b> ({{.Id}})
Egg: <b>{{.EggType.Display}}</b>
Max coop size: <b>{{.MaxCoopSize}}</b>
Time to complete: <b>{{.Duration | days}}d</b>
Token interval: <b>{{.TokenIntervalMinutes}}m</b>
Expires: <b>{{.ExpiryTime | fmtdate}}</b>
{{if eq (.RewardTiers | len) 2 -}}
{{$eliterewards := (index .RewardTiers 0).Rewards -}}
{{$standardrewards := (index .RewardTiers 1).Rewards -}}
Elite tier:{{template "rewards" $eliterewards}}
Required rate: <b>{{hourlyrate (finalgoal $eliterewards) .Duration | numfmt}}/hr</b>
Standard tier:{{template "rewards" $standardrewards}}
Required rate: <b>{{hourlyrate (finalgoal $standardrewards) .Duration | numfmt}}/hr</b>
{{else -}}
{{template "rewards" .Rewards}}
Required rate: <b>{{hourlyrate (finalgoal .Rewards) .Duration | numfmt}}/hr</b>
{{end -}}
`
var _contractMessageTmpl *template.Template
type ContractNotification struct {
title string
message string
timestamp time.Time
}
func init() {
_contractMessageTmpl = template.Must(template.New("").Funcs(template.FuncMap{
"days": func(d time.Duration) int { return int(d.Hours() / 24) },
"finalgoal": func(r []*api.Reward) float64 { return r[len(r)-1].Goal },
"hourlyrate": func(goal float64, d time.Duration) float64 { return goal / d.Hours() },
"fmtdate": util.FormatDate,
"numfmt": util.Numfmt,
"numfmtwhole": util.NumfmtWhole,
}).Parse(_contractMessageTextTmpl))
}
func NewContractNotification(c *api.ContractProperties) (*ContractNotification, error) {
title := fmt.Sprintf("EggContractor: new contract \"%s\"", c.Name)
var buf bytes.Buffer
if err := _contractMessageTmpl.Execute(&buf, c); err != nil {
return nil, err
}
message := buf.String()
return &ContractNotification{
title: title,
message: message,
timestamp: time.Now(),
}, nil
}
func (n ContractNotification) Title() string {
return n.title
}
func (n ContractNotification) Message() string {
return n.message
}
func (n ContractNotification) URL() string {
return ""
}
func (n ContractNotification) Timestamp() time.Time {
return n.timestamp
}
func (n ContractNotification) NotifierParams(notifierId string) map[string]interface{} {
switch {
case notifierId == "pushover":
return map[string]interface{}{
"sound": PushoverSoundMagic,
}
default:
return nil
}
}
|
package cmd
import (
"strconv"
"github.com/spf13/cobra"
"github.com/fanaticscripter/EggContractor/util"
)
var _unitsCommand = &cobra.Command{
Use: "units",
Short: "Print a table of units (order of magnitudes)",
Args: cobra.NoArgs,
Run: func(cmd *cobra.Command, args []string) {
table := [][]string{
{"Symb", "OoM", "Symb", "OoM", "Symb", "OoM"},
{"----", "---", "----", "---", "----", "---"},
}
row := make([]string, 0, 6)
for _, u := range util.Units {
row = append(row, u.Symbol, strconv.Itoa(u.OoM))
if len(row) == 6 {
table = append(table, row)
row = make([]string, 0, 6)
}
}
util.PrintTable(table)
},
}
func init() {
_rootCmd.AddCommand(_unitsCommand)
}
|
package product_attribute_category
import (
"inventory-service/modules/product_attribute_category/dao"
"inventory-service/modules/product_attribute_category/service"
)
func Init() {
dao.Init()
service.Init()
}
|
package zen
import (
"context"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"reflect"
"regexp"
"runtime"
"strconv"
"time"
log "github.com/sirupsen/logrus"
)
const (
inputTagName = "form"
validTagName = "valid"
validMsgName = "msg"
)
// Headers
const (
HeaderAcceptEncoding = "Accept-Encoding"
HeaderAllow = "Allow"
HeaderAuthorization = "Authorization"
HeaderContentDisposition = "Content-Disposition"
HeaderContentEncoding = "Content-Encoding"
HeaderContentLength = "Content-Length"
HeaderContentType = "Content-Type"
HeaderCookie = "Cookie"
HeaderSetCookie = "Set-Cookie"
HeaderIfModifiedSince = "If-Modified-Since"
HeaderLastModified = "Last-Modified"
HeaderLocation = "Location"
HeaderUpgrade = "Upgrade"
HeaderVary = "Vary"
HeaderWWWAuthenticate = "WWW-Authenticate"
HeaderXForwardedProto = "X-Forwarded-Proto"
HeaderXHTTPMethodOverride = "X-HTTP-Method-Override"
HeaderXForwardedFor = "X-Forwarded-For"
HeaderXRealIP = "X-Real-IP"
HeaderServer = "Server"
HeaderOrigin = "Origin"
HeaderAccessControlRequestMethod = "Access-Control-Request-Method"
HeaderAccessControlRequestHeaders = "Access-Control-Request-Headers"
HeaderAccessControlAllowOrigin = "Access-Control-Allow-Origin"
HeaderAccessControlAllowMethods = "Access-Control-Allow-Methods"
HeaderAccessControlAllowHeaders = "Access-Control-Allow-Headers"
HeaderAccessControlAllowCredentials = "Access-Control-Allow-Credentials"
HeaderAccessControlExposeHeaders = "Access-Control-Expose-Headers"
HeaderAccessControlMaxAge = "Access-Control-Max-Age"
// Security
HeaderStrictTransportSecurity = "Strict-Transport-Security"
HeaderXContentTypeOptions = "X-Content-Type-Options"
HeaderXXSSProtection = "X-XSS-Protection"
HeaderXFrameOptions = "X-Frame-Options"
HeaderContentSecurityPolicy = "Content-Security-Policy"
HeaderXCSRFToken = "X-CSRF-Token"
)
type (
// Context warps request and response writer
Context struct {
Req *http.Request
Rw http.ResponseWriter
params Params
parsed bool
context.Context
}
)
func getContext(rw http.ResponseWriter, req *http.Request) Context {
c := Context{}
c.Req = req
c.Rw = rw
c.Context = context.TODO()
c.SetValue(fieldKey{}, fields{})
return c
}
// parseInput will parse request's form and
func (ctx *Context) parseInput() error {
ctx.parsed = true
return ctx.Req.ParseForm()
}
// Dup make a duplicate Context with context.Context
func (ctx *Context) Dup(c context.Context) Context {
ret := Context{}
ret.Req = ctx.Req
ret.Rw = ctx.Rw
ret.Context = c
ret.parsed = ctx.parsed
ret.params = ctx.params
return ret
}
// WithDeadline ...
func (ctx *Context) WithDeadline(dead time.Time) (Context, context.CancelFunc) {
c, cancel := context.WithDeadline(ctx, dead)
return ctx.Dup(c), cancel
}
// WithCancel ...
func (ctx *Context) WithCancel() (Context, context.CancelFunc) {
c, cancel := context.WithCancel(ctx)
return ctx.Dup(c), cancel
}
// Do job with context
func (ctx *Context) Do(job func() error) error {
errChan := make(chan error)
done := make(chan struct{})
go func() {
if err := job(); err != nil {
errChan <- err
return
}
close(done)
}()
select {
case <-ctx.Done():
return ctx.Err()
case err := <-errChan:
return err
case <-done:
return nil
}
}
// Form return request form value with given key
func (ctx *Context) Form(key string) string {
if !ctx.parsed {
ctx.parseInput()
}
return ctx.Req.FormValue(key)
}
// Param return url param with given key
func (ctx *Context) Param(key string) string {
return ctx.params.ByName(key)
}
// ParseValidateForm will parse request's form and map into a interface{} value
func (ctx *Context) ParseValidateForm(input interface{}) error {
if !ctx.parsed {
ctx.parseInput()
}
return ctx.parseValidateForm(input)
}
// BindJSON will parse request's json body and map into a interface{} value
func (ctx *Context) BindJSON(input interface{}) error {
if err := json.NewDecoder(ctx.Req.Body).Decode(input); err != nil {
return err
}
return nil
}
// BindXML will parse request's xml body and map into a interface{} value
func (ctx *Context) BindXML(input interface{}) error {
if err := xml.NewDecoder(ctx.Req.Body).Decode(input); err != nil {
return err
}
return nil
}
func (ctx *Context) parseValidateForm(input interface{}) error {
inputValue := reflect.ValueOf(input).Elem()
inputType := inputValue.Type()
for i := 0; i < inputValue.NumField(); i++ {
tag := inputType.Field(i).Tag
formName := tag.Get(inputTagName)
validate := tag.Get(validTagName)
validateMsg := tag.Get(validMsgName)
field := inputValue.Field(i)
formValue := ctx.Req.Form.Get(formName)
// scan form string value into field
if err := scan(field, formValue); err != nil {
return err
}
// validate form with regex
if err := valid(formValue, validate, validateMsg); err != nil {
return err
}
}
return nil
}
func scan(v reflect.Value, s string) error {
if !v.CanSet() {
return nil
}
switch v.Kind() {
case reflect.String:
v.SetString(s)
case reflect.Bool:
b, err := strconv.ParseBool(s)
if err != nil {
return err
}
v.SetBool(b)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
i, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return err
}
v.SetInt(i)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
x, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return err
}
v.SetUint(x)
case reflect.Float32, reflect.Float64:
f, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
v.SetFloat(f)
}
return nil
}
func valid(s string, validate, msg string) error {
if validate == "" {
return nil
}
rxp, err := regexp.Compile(validate)
if err != nil {
return err
}
if !rxp.MatchString(s) {
return errors.New(msg)
}
return nil
}
// JSON : write json data to http response writer, with status code 200
func (ctx *Context) JSON(i interface{}) (err error) {
// write http status code
ctx.WriteHeader(HeaderContentType, MIMEApplicationJSONCharsetUTF8)
// Encode json data to rw
return json.NewEncoder(ctx.Rw).Encode(i)
}
// XML : write xml data to http response writer, with status code 200
func (ctx *Context) XML(i interface{}) (err error) {
// write http status code
ctx.WriteHeader(HeaderContentType, MIMEApplicationXMLCharsetUTF8)
// Encode xml data to rw
return xml.NewEncoder(ctx.Rw).Encode(i)
}
// WriteStatus set response's status code
func (ctx *Context) WriteStatus(code int) {
ctx.Rw.WriteHeader(code)
}
// WriteHeader set response header
func (ctx *Context) WriteHeader(k, v string) {
ctx.Rw.Header().Add(k, v)
}
// WriteString write raw string
func (ctx *Context) WriteString(s string) {
io.WriteString(ctx.Rw, s)
}
// WriteFile serve file
func (ctx *Context) WriteFile(filepath string) {
http.ServeFile(ctx.Rw, ctx.Req, filepath)
}
// WriteData writes some data into the body stream and updates the HTTP code.
func (ctx *Context) WriteData(contentType string, data []byte) {
ctx.WriteHeader(HeaderContentType, contentType)
ctx.Rw.Write(data)
}
// SetValue set value on context
func (ctx *Context) SetValue(key, val interface{}) {
ctx.Context = context.WithValue(ctx.Context, key, val)
}
// GetValue of key
func (ctx *Context) GetValue(key interface{}) interface{} {
return ctx.Value(key)
}
// SetField set key val on context fields
func (ctx *Context) SetField(key string, val interface{}) {
f, _ := ctx.Value(fieldKey{}).(fields)
n := fields{key: val}
n.Merge(f)
ctx.SetValue(fieldKey{}, n)
}
// LogError print error level log with fields
func (ctx *Context) LogError(args ...interface{}) {
log.WithFields(log.Fields(ctx.stackField(2))).Error(args...)
}
// LogErrorf print error level format log with fields
func (ctx *Context) LogErrorf(format string, args ...interface{}) {
log.WithFields(log.Fields(ctx.stackField(2))).Errorf(format, args...)
}
// LogInfo print info level log with fields
func (ctx *Context) LogInfo(args ...interface{}) {
log.WithFields(log.Fields(ctx.stackField(2))).Info(args...)
}
// LogInfof print info level format log with fields
func (ctx *Context) LogInfof(format string, args ...interface{}) {
log.WithFields(log.Fields(ctx.stackField(2))).Infof(format, args...)
}
func (ctx *Context) stackField(depth int) fields {
_, caller, line, _ := runtime.Caller(depth)
stack := fields{
"caller": fmt.Sprintf("%s:%d", caller, line),
}
stack.Merge(ctx.fields())
return stack
}
func (ctx *Context) fields() fields {
return ctx.Value(fieldKey{}).(fields)
}
|
package controllers
import (
"christopher/helpers"
"christopher/models"
"encoding/json"
"github.com/gin-gonic/gin"
"log"
"strconv"
)
type MerchantGallerySigle map[string]interface{}
type MerchantGallery struct {
Id int64 `json:"id, Number"`
Photo_url string `json:"photo_url"`
Merchant_uid string `json:"merchant_uid"`
Create_at int64 `json:"create_at, Number"`
Update_at int64 `json:"update_at, Number"`
}
type MerchantGalleryForm struct {
Id int64 `form:"id"`
Photo_url string `form:"photo_url"`
Merchant_uid string `form:"merchant_uid"`
Create_at int64 `form:"create_at"`
Update_at int64 `form:"update_at"`
}
// get merchant info with image gallery
// create
func NewMerchantGaller(c *gin.Context) {
SERVICE_NAME := c.Params.ByName("service_name")
var form MerchantGalleryForm
c.Bind(&form)
merchantGallery := &models.MerchantGallery{
Photo_url: form.Photo_url,
Merchant_uid: form.Merchant_uid,
Create_at: helpers.Unix_milisec_time_now(),
Update_at: helpers.Unix_milisec_time_now(),
}
msg, err := merchantGallery.Save(SERVICE_NAME)
if msg == "err" {
c.JSON(200, gin.H{
"status": 500,
"error": err,
})
} else {
c.JSON(200, gin.H{
"status": 200,
"message": "Created!",
})
}
}
// update
func UpdateMerchantGaller(c *gin.Context) {
SERVICE_NAME := c.Params.ByName("service_name")
photo_id := c.Params.ByName("id")
var form MerchantGalleryForm
c.Bind(&form)
merchantGallery := &models.MerchantGallery{
Photo_url: form.Photo_url,
Merchant_uid: form.Merchant_uid,
Create_at: helpers.Unix_milisec_time_now(),
Update_at: helpers.Unix_milisec_time_now(),
}
msg, err := merchantGallery.Update(SERVICE_NAME, photo_id)
if msg == "err" {
c.JSON(200, gin.H{
"status": 500,
"error": err,
})
} else {
c.JSON(200, gin.H{
"status": 200,
"message": "Updated!",
})
}
}
// delete
func DeleteMerchantGaller(c *gin.Context) {
SERVICE_NAME := c.Params.ByName("service_name")
id_photo := c.Params.ByName("uid")
id_int, _ := strconv.ParseInt(id_photo, 0, 64)
merchantGallery := &models.MerchantGallery{
Id: id_int,
}
err := merchantGallery.Delete(SERVICE_NAME)
if err != nil {
c.JSON(200, gin.H{
"status": 500,
"message": "Somting wrong!",
})
} else {
c.JSON(200, gin.H{
"status": 200,
"message": "Deleted",
})
}
}
func GetMerchantsGallery(c *gin.Context) {
SERVICE_NAME := c.Params.ByName("service_name")
merchant_uid := c.Params.ByName("uid")
merchant := &models.MerchantMeta{
Merchant_uid: merchant_uid,
}
data, msg, err := merchant.MerchantShowGallery(SERVICE_NAME)
if msg == "err" {
c.JSON(200, gin.H{
"status": 500,
"message": err,
})
} else {
log.Println(data)
mercahnts := []byte(data)
// res := &MerchantGallery{}
merchant_slice := make([]MerchantGallerySigle, 0)
err_unmarshal := json.Unmarshal(mercahnts, &merchant_slice)
if err_unmarshal != nil {
c.JSON(200, gin.H{
"status": 500,
"message": "json error",
})
}
c.JSON(200, gin.H{
"status": 200,
"message": "Success!",
"data": merchant_slice,
})
}
}
|
// +build local
package main
import (
gwv "../../gwv"
"path/filepath"
"simonwaldherr.de/go/golibs/gopath"
)
func main() {
dir := gopath.Dir()
HTTPD := gwv.NewWebServer(8080, 60)
HTTPD.ConfigSSL(4443, filepath.Join(dir, "..", "ssl.key"), filepath.Join(dir, "..", "ssl.cert"), true)
HTTPD.URLhandler(
gwv.Favicon(filepath.Join(".", "static", "favicon.ico")),
gwv.Redirect("^/go/$", "/golang/", 301),
gwv.Proxy("^/proxy/", "http://selfcss.org/"),
gwv.Proxy("^/golang/", "https://golang.org/"),
)
HTTPD.Start()
HTTPD.WG.Wait()
}
|
package bootstrap
import (
"encoding/json"
"fmt"
"io/ioutil"
"ktmall/config"
"ktmall/routes"
"strings"
"github.com/labstack/echo/v4"
)
func RunServer() {
e := SetupServer()
e.Logger.Fatal(e.Start(config.String("APP.PORT")))
}
func SetupServer() *echo.Echo {
e := echo.New()
e.Debug = config.IsDev()
e.HideBanner = true
routes.Register(e)
PrintRoutes(e, config.String("APP.TEMP_DIR")+"/routes.json")
SetupServerRender(e)
fmt.Printf("\n\napp runmode is %s\n\n", config.AppRunMode())
config.SetupApp(&config.Application{Engine: e})
return e
}
// ่พๅบ่ทฏ็ฑ้
็ฝฎ
func PrintRoutes(e *echo.Echo, filename string) {
routes := make([]*echo.Route, 0)
for _, item := range e.Routes() {
if strings.HasPrefix(item.Name, "github.com") {
continue
}
routes = append(routes, item)
}
routesStr, _ := json.MarshalIndent(struct {
Count int `json:"count"`
Routes []*echo.Route `json:"routes"`
}{
Count: len(routes),
Routes: routes,
}, "", " ")
ioutil.WriteFile(filename, routesStr, 0644)
}
|
// Copyright 2014 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package batcheval
import (
"context"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval/result"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/spanset"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
)
func init() {
RegisterReadWriteCommand(roachpb.DeleteRange, declareKeysDeleteRange, DeleteRange)
}
func declareKeysDeleteRange(
rs ImmutableRangeState,
header roachpb.Header,
req roachpb.Request,
latchSpans, lockSpans *spanset.SpanSet,
) {
args := req.(*roachpb.DeleteRangeRequest)
if args.Inline {
DefaultDeclareKeys(rs, header, req, latchSpans, lockSpans)
} else {
DefaultDeclareIsolatedKeys(rs, header, req, latchSpans, lockSpans)
}
}
// DeleteRange deletes the range of key/value pairs specified by
// start and end keys.
func DeleteRange(
ctx context.Context, readWriter storage.ReadWriter, cArgs CommandArgs, resp roachpb.Response,
) (result.Result, error) {
args := cArgs.Args.(*roachpb.DeleteRangeRequest)
h := cArgs.Header
reply := resp.(*roachpb.DeleteRangeResponse)
var timestamp hlc.Timestamp
if !args.Inline {
timestamp = h.Timestamp
}
// NB: Even if args.ReturnKeys is false, we want to know which intents were
// written if we're evaluating the DeleteRange for a transaction so that we
// can update the Result's AcquiredLocks field.
returnKeys := args.ReturnKeys || h.Txn != nil
deleted, resumeSpan, num, err := storage.MVCCDeleteRange(
ctx, readWriter, cArgs.Stats, args.Key, args.EndKey, h.MaxSpanRequestKeys, timestamp, h.Txn, returnKeys,
)
if err == nil && args.ReturnKeys {
reply.Keys = deleted
}
reply.NumKeys = num
if resumeSpan != nil {
reply.ResumeSpan = resumeSpan
reply.ResumeReason = roachpb.RESUME_KEY_LIMIT
}
// NB: even if MVCC returns an error, it may still have written an intent
// into the batch. This allows callers to consume errors like WriteTooOld
// without re-evaluating the batch. This behavior isn't particularly
// desirable, but while it remains, we need to assume that an intent could
// have been written even when an error is returned. This is harmless if the
// error is not consumed by the caller because the result will be discarded.
return result.FromAcquiredLocks(h.Txn, deleted...), err
}
|
/**
Go่ฏญ่จไธญๅๆถๆๅฝๆฐๅๆนๆณ. ไธไธชๆนๆณๅฐฑๆฏไธไธชๅ
ๅซไบๆฅๅ่
็ๅฝๆฐ, ๆฅๅ่
ๅฏไปฅๆฏๅฝๅ็ฑปๅๆ็ปๆไฝ็ฑปๅ็ไธไธชๅผๆ่
ๆฏไธไธชๆ้.
func (variable_name variable_data_type) function_name() [return_type] {
// ๅฝๆฐไฝ
}
**/
package main
import "fmt"
// ็ปๆไฝ
type Circle struct {
radius float64
}
func main() {
var c1 Circle
c1.radius = 10.00
fmt.Println("Area of Clircle(c1)=", c1.getArea())
}
// ่ฏฅๆนๆณ ๅฑไบCircle็ฑปๅๅฏน่ฑกไธญ็ๆนๆณ
func (c Circle) getArea() float64 {
// c.radius ไธบCircle็ฑปๅๅฏน่ฑกไธญ็ๅฑๆง
return 3.14 * c.radius * c.radius
}
// ่พๅบ: Area of Clircle(c1)= 314 |
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package pb_test
import (
fmt "fmt"
"reflect"
"sort"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"storj.io/common/pb"
)
func TestCompatibility(t *testing.T) {
// when these fail, the X and XSigning definitions are out of sync
// remember to update the conversions in auth/signing
check(t, pb.OrderLimit{}, pb.OrderLimitSigning{})
check(t, pb.Order{}, pb.OrderSigning{})
check(t, pb.PieceHash{}, pb.PieceHashSigning{})
}
func check(t *testing.T, a, b interface{}) {
afields := fields(a)
bfields := fields(b)
assert.Equal(t, afields, bfields, fmt.Sprintf("%T and %T definitions don't match", a, b))
}
type Field struct {
Name string
Type string
Index string
}
func fields(v interface{}) []Field {
t := reflect.ValueOf(v).Type()
if t.Kind() != reflect.Struct {
panic(t.Kind())
}
var fields []Field
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
tag := f.Tag.Get("protobuf")
if tag == "" {
continue
}
tags := strings.Split(tag, ",")
fields = append(fields, Field{
Name: f.Name,
Type: tags[0],
Index: tags[1],
})
}
sort.Slice(fields, func(i, k int) bool {
return fields[i].Name < fields[k].Name
})
return fields
}
|
// https://programmers.co.kr/learn/courses/30/lessons/12969
package main
import (
"fmt"
"strings"
)
func p12969() {
var a, b int
fmt.Scan(&a, &b)
for i := 0; i < b; i++ {
fmt.Println(strings.Repeat("*", a))
}
}
|
package logif
import (
"log"
"os"
"testing"
)
func TestExample(t *testing.T) {
log.SetOutput(os.Stdout)
Debugf("hello %s", "debug")
Infof("hello %s", "info")
Warningf("hello %s", "warning")
Errorf("hello %s", "error")
DefaultLogger.Debugf("hello %s", "dl debug")
DefaultLogger.Infof("hello %s", "dl info")
DefaultLogger.Warningf("hello %s", "dl warning")
DefaultLogger.Errorf("hello %s", "dl error")
DefaultLogger.SetLevel(LevelDebug)
DefaultLogger.SetVerbosity(1)
DefaultLogger.V(1).Infof("level 1")
DefaultLogger.V(2).Infof("level 2 not shown")
if IsV(1) {
log.Printf("level 1")
}
if IsV(2) {
log.Printf("level 2 not shown")
}
DefaultLogger.SetDebugging(1)
DefaultLogger.D(1).Debugf("debug level 1")
DefaultLogger.D(2).Debugf("debug level 2 not shown")
if IsD(1) {
log.Printf("level 1")
}
if IsD(2) {
log.Printf("level 2 not shown")
}
if DefaultLogger.IsV(2) {
log.Printf("this line shouldn't print")
DefaultLogger.V(2).Infof("level 2 not shown")
}
DefaultLogger.SetVerbosity(3)
if DefaultLogger.IsV(2) {
log.Printf("this line should print")
DefaultLogger.V(2).Infof("level 2 shown when verbosity set to 3")
}
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package image
import (
"bytes"
"fmt"
"github.com/google/gapid/core/data/endian"
"github.com/google/gapid/core/math/sint"
"github.com/google/gapid/core/os/device"
)
type rgbaF32 struct {
r, g, b, a float32
}
func rgbaAvg(a, b rgbaF32) rgbaF32 {
return rgbaF32{(a.r + b.r) * 0.5, (a.g + b.g) * 0.5, (a.b + b.b) * 0.5, (a.a + b.a) * 0.5}
}
func rgbaLerp(a, b rgbaF32, f float32) rgbaF32 {
return rgbaF32{a.r + (b.r-a.r)*f, a.g + (b.g-a.g)*f, a.b + (b.b-a.b)*f, a.a + (b.a-a.a)*f}
}
// resizeRGBA_F32 returns a RGBA_F32 image resized from srcW x srcH to dstW x dstH.
// The algorithm uses pixel-pair averaging to down-sample (if required) the
// image to no greater than twice the width or height than the target
// dimensions, then uses a bilinear interpolator to calculate the final image
// at the requested size.
func resizeRGBA_F32(data []byte, srcW, srcH, srcD, dstW, dstH, dstD int) ([]byte, error) {
if err := checkSize(data, RGBA_F32.format(), srcW, srcH, srcD); err != nil {
return nil, err
}
if srcW <= 0 || srcH <= 0 || srcD <= 0 {
return nil, fmt.Errorf("Invalid source size for Resize: %dx%dx%d", srcW, srcH, srcD)
}
if dstW <= 0 || dstH <= 0 || dstD <= 0 {
return nil, fmt.Errorf("Invalid target size for Resize: %dx%dx%d", dstW, dstH, dstD)
}
r := endian.Reader(bytes.NewReader(data), device.LittleEndian)
bufTexels := sint.Max(srcW*srcH*srcD, dstW*dstH*dstD)
bufA, bufB := make([]rgbaF32, bufTexels), make([]rgbaF32, bufTexels)
for i := range bufA {
bufA[i] = rgbaF32{r.Float32(), r.Float32(), r.Float32(), r.Float32()}
}
dst, src := bufB, bufA
samples := func(val, max int, scale float64) (int, int, float32) {
f := float64(val) * scale
i := int(f)
return i, sint.Min(i+1, max-1), float32(f - float64(i))
}
for dstD*2 <= srcD { // Depth 2x downsample
i, newD := 0, srcD/2
for z := 0; z < newD; z++ {
srcA, srcB := src[srcW*srcH*z*2:], src[srcW*srcH*(z*2+1):]
for y := 0; y < srcH; y++ {
srcA, srcB := srcA[srcW*y:], srcB[srcW*y:]
for x := 0; x < srcW; x++ {
dst[i] = rgbaAvg(srcA[x], srcB[x])
i++
}
}
}
dst, src, srcD = src, dst, newD
}
if srcD != dstD { // Depth bi-linear downsample
i, s := 0, float64(sint.Max(srcD-1, 0))/float64(sint.Max(dstD-1, 1))
for z := 0; z < dstD; z++ {
iA, iB, f := samples(z, srcD, s)
srcA, srcB := src[srcW*srcH*iA:], src[srcW*srcH*iB:]
for y := 0; y < srcH; y++ {
srcA, srcB := srcA[srcW*y:], srcB[srcW*y:]
for x := 0; x < srcW; x++ {
dst[i] = rgbaLerp(srcA[x], srcB[x], f)
i++
}
}
}
dst, src, srcD = src, dst, dstD
}
for dstH*2 <= srcH { // Vertical 2x downsample
i, newH := 0, srcH/2
for z := 0; z < srcD; z++ {
src := src[srcW*srcH*z:]
for y := 0; y < newH; y++ {
srcA, srcB := src[srcW*y*2:], src[srcW*(y*2+1):]
for x := 0; x < srcW; x++ {
dst[i] = rgbaAvg(srcA[x], srcB[x])
i++
}
}
}
dst, src, srcH = src, dst, newH
}
if srcH != dstH { // Vertical bi-linear downsample
i, s := 0, float64(sint.Max(srcH-1, 0))/float64(sint.Max(dstH-1, 1))
for z := 0; z < srcD; z++ {
src := src[srcW*srcH*z:]
for y := 0; y < dstH; y++ {
iA, iB, f := samples(y, srcH, s)
srcA, srcB := src[srcW*iA:], src[srcW*iB:]
for x := 0; x < srcW; x++ {
dst[i] = rgbaLerp(srcA[x], srcB[x], f)
i++
}
}
}
dst, src, srcH = src, dst, dstH
}
for dstW*2 <= srcW { // Horizontal 2x downsample
i, newW := 0, srcW/2
for z := 0; z < srcD; z++ {
src := src[srcW*srcH*z:]
for y := 0; y < srcH; y++ {
src := src[srcW*y:]
for x := 0; x < srcW/2; x++ {
dst[i] = rgbaAvg(src[x*2], src[x*2+1])
i++
}
}
}
dst, src, srcW = src, dst, newW
}
if srcW != dstW { // Horizontal bi-linear downsample
i, s := 0, float64(sint.Max(srcW-1, 0))/float64(sint.Max(dstW-1, 1))
for z := 0; z < srcD; z++ {
src := src[srcW*srcH*z:]
for y := 0; y < srcH; y++ {
src := src[srcW*y:]
for x := 0; x < dstW; x++ {
iA, iB, f := samples(x, srcW, s)
dst[i] = rgbaLerp(src[iA], src[iB], f)
i++
}
}
}
dst, src, srcW = src, dst, dstW
}
out := make([]byte, dstW*dstH*dstD*4*4)
w := endian.Writer(bytes.NewBuffer(out[:0]), device.LittleEndian)
for i, c := 0, dstW*dstH*dstD; i < c; i++ {
w.Float32(src[i].r)
w.Float32(src[i].g)
w.Float32(src[i].b)
w.Float32(src[i].a)
}
return out, nil
}
|
package fsjobqueue
import (
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
)
func uuidList(t *testing.T, strs ...string) []uuid.UUID {
var err error
ids := make([]uuid.UUID, len(strs))
for i, s := range strs {
ids[i], err = uuid.Parse(s)
require.NoError(t, err)
}
return ids
}
func TestUniqueUUIDList(t *testing.T) {
l := uniqueUUIDList([]uuid.UUID{})
require.Empty(t, l)
s := uuidList(t, "8ad6bbcd-55f9-4cd8-be45-d0370ff079d2", "a0ad7428-b813-4efb-a156-da2b524f4868", "36e5817c-f29d-4043-8d7d-95ffaa77ff88")
l = uniqueUUIDList(s)
require.ElementsMatch(t, s, l)
s = uuidList(t, "8ad6bbcd-55f9-4cd8-be45-d0370ff079d2", "8ad6bbcd-55f9-4cd8-be45-d0370ff079d2")
l = uniqueUUIDList(s)
require.ElementsMatch(t, uuidList(t, "8ad6bbcd-55f9-4cd8-be45-d0370ff079d2"), l)
s = uuidList(t, "8ad6bbcd-55f9-4cd8-be45-d0370ff079d2", "a0ad7428-b813-4efb-a156-da2b524f4868", "8ad6bbcd-55f9-4cd8-be45-d0370ff079d2")
l = uniqueUUIDList(s)
require.ElementsMatch(t, uuidList(t, "8ad6bbcd-55f9-4cd8-be45-d0370ff079d2", "a0ad7428-b813-4efb-a156-da2b524f4868"), l)
}
|
package logger
import (
"errors"
"fmt"
"io"
"os"
"github.com/dmitryt/otus-golang-hw/hw12_13_14_15_calendar/internal/config"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
var ErrFileLog = errors.New("cannot setup file log")
func getLogLevel(str string) zerolog.Level {
switch str {
case "error":
return zerolog.ErrorLevel
case "warn":
return zerolog.WarnLevel
case "info":
return zerolog.InfoLevel
case "debug":
return zerolog.DebugLevel
default:
return zerolog.InfoLevel
}
}
func Init(c *config.LogConfig) (err error) {
var logInput io.Writer = os.Stderr
logLevel := getLogLevel(c.Level)
if c.FilePath != "" {
f, err := os.OpenFile(c.FilePath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o666)
if err != nil {
return fmt.Errorf("%s: %w", ErrFileLog, err)
}
logInput = zerolog.MultiLevelWriter(f, os.Stderr)
// defer f.Close()
// hm... If I close the file, I cannot write there
// probably, need to close the file, when service is shut down
}
log.Logger = zerolog.New(logInput).With().Timestamp().Logger().Level(logLevel)
return
}
|
package main
import "fmt"
func ExampleTransformLine() {
fmt.Println(transformLine("/dev/grid/node-x1-y10 90T 71T 19T 78%"))
// Output:
// [1 10 90 71 19 78%]
}
|
package server
import (
"log"
"net/http"
)
type closeHandler struct {
quit chan<- bool
logger *log.Logger
}
func (closeHandler *closeHandler) ServeHTTP(respWriter http.ResponseWriter, req *http.Request) {
closeHandler.logger.Println("Closing server.")
closeHandler.quit <- true
}
|
package BLC
import (
"github.com/boltdb/bolt"
"log"
"fmt"
"time"
"math/big"
)
type BlockChainIterator struct {
currentHash []byte
db *bolt.DB
}
func (blockChain *BlockChain)Iterator() *BlockChainIterator {
return &BlockChainIterator{blockChain.tip, blockChain.db}
}
func (iter *BlockChainIterator) Next() *Block {
var block *Block
err := iter.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(blockBucket))
currentBlockData := b.Get(iter.currentHash)
block = DeserializeBlock(currentBlockData)//ๅๅบๅๅ่ทๅๅบๅ
iter.currentHash = block.PrevBlockHash //ๆดๆฐ่ฟญไปฃๅจ็ไธไธไธชๅบๅhash
return nil
})
if err != nil {
log.Panic(err)
}
return block
}
func (bc *BlockChain) VisitBlockChain() {
iter := bc.Iterator()
for {
block := iter.Next()
fmt.Printf("ๅบๅ้ซๅบฆ๏ผ%d\n", block.Height)
fmt.Printf("ไธไธไธชๅบๅ็hash๏ผ%x\n", block.PrevBlockHash)
fmt.Printf("ๅบๅๆฐๆฎ๏ผ%s\n", block.Data)
fmt.Printf("ๅบๅๆถ้ดๆณ๏ผ%s\n", time.Unix(block.Timestamp, 0).Format("2006-01-02 15:04:05 AM") )//go่ฏ็ไนๆฅ
fmt.Printf("ๅบๅhash๏ผ%x\n", block.Hash)
fmt.Printf("Nonce: %d\n\n", block.Nonce)
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(&hashInt) == 0 {
break
}
}
} |
package main
type a interface {
get() string
}
// cannot define another get method as method
// overloading is not allowed in go.
type b interface {
a
put(string)
}
type impl struct {}
func (i impl) put(s string) {
}
func (i impl) get() string {
return ""
}
func main() {
var x impl
var y b
y = x
y.get()
y.put("")
} |
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"github.com/xr-hui/nox/pkg/template"
)
var (
// Used for flags
templateFlag string
generatorCmd = &cobra.Command{
Use: "generate",
Short: "Auto generate codes follow template settings",
Run: func(cmd *cobra.Command, args []string) {
tmpl, err := template.NewTemplate(templateFlag, args)
if err != nil {
fmt.Printf("New template failed, %s.\n", err)
return
}
err = tmpl.Generate()
if err != nil {
fmt.Printf("Generate failed, %s.\n", err)
}
},
}
)
func init() {
generatorCmd.Flags().StringVarP(&templateFlag, "template", "t", "", "Specific which template should be generated.")
rootCmd.AddCommand(generatorCmd)
}
|
package main
import "strings"
// Leetcode 395. (medium)
func longestSubstring(s string, k int) (res int) {
if s == "" {
return
}
cnt := [26]int{}
for _, b := range s {
cnt[b-'a']++
}
split := byte(0)
for i, c := range cnt {
if c > 0 && c < k {
split = byte('a' + i)
break
}
}
if split == 0 {
return len(s)
}
for _, subStr := range strings.Split(s, string(split)) {
res = max(res, longestSubstring(subStr, k))
}
return res
}
|
package main
import (
"fmt"
"math"
"os"
)
type vector struct{ x, y, z float64 }
func (v vector) add(w vector) vector {
return vector{v.x + w.x, v.y + w.y, v.z + w.z}
}
func (v vector) sub(w vector) vector {
return vector{v.x - w.x, v.y - w.y, v.z - w.z}
}
func (v vector) scale(m float64) vector {
return vector{v.x * m, v.y * m, v.z * m}
}
func (v vector) mod() float64 {
return math.Sqrt(v.x*v.x + v.y*v.y + v.z*v.z)
}
var (
bodies, timeSteps int
masses []float64
gc float64
positions, velocities, accelerations []vector
)
func initiateSystem(fileName string) error {
file, err := os.Open(fileName)
if err != nil {
return err
}
defer file.Close()
fmt.Fscanf(file, "%f%d%d", &gc, &bodies, &timeSteps)
masses = make([]float64, bodies)
positions = make([]vector, bodies)
velocities = make([]vector, bodies)
accelerations = make([]vector, bodies)
for i := 0; i < bodies; i++ {
fmt.Fscanf(file, "%f", &masses[i])
fmt.Fscanf(file, "%f%f%f", &positions[i].x, &positions[i].y, &positions[i].z)
fmt.Fscanf(file, "%f%f%f", &velocities[i].x, &velocities[i].y, &velocities[i].z)
}
return nil
}
func resolveCollisions() {
for i := 0; i < bodies-1; i++ {
for j := i + 1; j < bodies; j++ {
if positions[i] == positions[j] {
velocities[i], velocities[j] = velocities[j], velocities[i]
}
}
}
}
func computeAccelerations() {
for i := 0; i < bodies; i++ {
accelerations[i] = vector{0, 0, 0}
for j := 0; j < bodies; j++ {
if i != j {
temp := gc * masses[j] / math.Pow(positions[i].sub(positions[j]).mod(), 3)
accelerations[i] = accelerations[i].add(positions[j].sub(positions[i]).scale(temp))
}
}
}
}
func computeVelocities() {
for i := 0; i < bodies; i++ {
velocities[i] = velocities[i].add(accelerations[i])
}
}
func computePositions() {
for i := 0; i < bodies; i++ {
positions[i] = positions[i].add(velocities[i].add(accelerations[i].scale(0.5)))
}
}
func simulate() {
computeAccelerations()
computePositions()
computeVelocities()
resolveCollisions()
}
func printResults() {
f := "Body %d : % 8.6f % 8.6f % 8.6f | % 8.6f % 8.6f % 8.6f\n"
for i := 0; i < bodies; i++ {
fmt.Printf(
f, i+1,
positions[i].x, positions[i].y, positions[i].z,
velocities[i].x, velocities[i].y, velocities[i].z,
)
}
}
func main() {
if len(os.Args) != 2 {
fmt.Printf("Usage : %s <file name containing system configuration data>\n", os.Args[0])
} else {
err := initiateSystem(os.Args[1])
if err != nil {
fmt.Println(err)
return
}
fmt.Print("Body : x y z |")
fmt.Println(" vx vy vz")
for i := 0; i < timeSteps; i++ {
fmt.Printf("\nCycle %d\n", i+1)
simulate()
printResults()
}
}
}
|
package main
import (
"bufio"
"compress/gzip"
"container/list"
"flag"
"fmt"
"math"
"os"
"strconv"
"strings"
)
const LIMIT1 = 10
const STEP1 = 100
const LIMIT2 = 1000000
const MAX_SCORE = 150000.0
func main() {
action := flag.String("a", "", "action")
limit := flag.Int("n", 1000, "number of imported images")
path := flag.String("f", "/Volumes/ondra_zaloha/profi-neuralnet-20M.data.gz", "path to gziped file with data")
esPort := flag.Int("e", 9200, "Elasticsearch port")
k := flag.Int("k", 200, "how many most important vector components we will use")
l := flag.Int("l", 100, "how many results we will retrieve from Elasticsearch")
p := flag.Int("p", 8585, "server port")
flag.Parse()
if *action == "" {
fmt.Println("No action specified.")
return
}
var e error
switch *action {
case "server":
e = server(*esPort, *p, *l, *k)
case "parser":
parser()
case "import":
e = importer(*limit, *path, *esPort, *k)
default:
fmt.Println("Don't know action", *action)
}
if e != nil {
fmt.Println(e)
}
}
func importer(limit int, path string, esPort int, maxVectors int) (e error) {
es := NewElasticSearch(esPort)
es.deleteIndex()
file, e := os.Open(path)
r, e := gzip.NewReader(file)
br := bufio.NewReader(r)
var header, strVectors, id string
i := 0
for e == nil && i < limit {
fmt.Print("\r", i)
i += 1
header, e = br.ReadString('\n')
id = ParseNameStr(header)
strVectors, e = br.ReadString('\n')
strVectors = strings.TrimSuffix(strVectors, "\n")
fields := ParseVectorsToTags(strVectors, maxVectors)
es.saveData(id, fields, strVectors)
}
fmt.Println()
return
}
func parser() (e error) {
limit1 := LIMIT1
limit2 := LIMIT2
loaded := make([]*Vectors, limit1)
//fmt.Println("start", time.Now())
path := "/Volumes/ondra_zaloha/profi-neuralnet-20M.data.gz"
//path = "/Users/ondrejodchazel/projects/diplomka/data/tiny.gz"
file, e := os.Open(path)
r, e := gzip.NewReader(file)
br := bufio.NewReader(r)
var header, strVectors string
i := 0
for i < limit1*STEP1 && e == nil {
fmt.Print("\rfirst ", i)
header, e = br.ReadString('\n')
strVectors, e = br.ReadString('\n')
var element *Vectors
element, e = NewVector(header, strVectors)
if i%STEP1 == 0 {
loaded[i/STEP1] = element
}
i += 1
}
file, e = os.Open(path)
r, e = gzip.NewReader(file)
br = bufio.NewReader(r)
i = 0
for i < limit2 && e == nil {
fmt.Print("\rsecond ", i)
header, e = br.ReadString('\n')
strVectors, e = br.ReadString('\n')
var element *Vectors
element, e = NewVector(header, strVectors)
for j := 0; j < limit1; j++ {
vec := loaded[j]
if vec != nil {
vec.compare(element)
}
}
i += 1
}
fmt.Print("\r \r")
for _, v := range loaded {
v.print()
}
return
}
func VectorDistance(v1, v2 [4096]float64, max_score float64) (ret float64) {
for i := 0; i < 4096; i++ {
ret += math.Abs(v1[i] - v2[i])
if ret > max_score {
return ret
}
}
return
}
func ParseNameStr(line string) string {
return line[49:59]
}
func ParseName(line string) (i int64, e error) {
line = line[49:59]
i, e = strconv.ParseInt(line, 10, 0)
return
}
func ParseVectorsToTags(line string, limit int) []int64 {
results := make(PairList, 4096)
line = strings.TrimSuffix(line, "\n")
strVectors := strings.Split(line, " ")
j := 0
var i int64
for i = 0; i < 4096; i++ {
fl, e := strconv.ParseFloat(strVectors[i], 32)
if e != nil {
fmt.Println(e)
}
if fl > 0.0 {
results[j] = Pair{int64(j), fl}
j++
}
}
results = results[0:j]
//sort.Sort(sort.Reverse(results))
if len(results) > limit {
results = results[0:limit]
}
ret := make([]int64, len(results))
for i := 0; i < len(ret); i++ {
ret[i] = results[i].Key.(int64)
}
return ret
}
type Pair struct {
Key interface{}
Value float64
}
type PairList []Pair
func (p PairList) Len() int { return len(p) }
func (p PairList) Less(i, j int) bool { return p[i].Value < p[j].Value }
func (p PairList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func ParseVectors(line string) (ret [4096]float64, e error) {
line = strings.TrimSuffix(line, "\n")
strVectors := strings.Split(line, " ")
for i := 0; i < 4096; i++ {
ret[i], e = strconv.ParseFloat(strVectors[i], 0)
}
return
}
type Vectors struct {
id int64
vectors [4096]float64
list *list.List
}
func (v *Vectors) print() {
fmt.Println(v.id)
for e := v.list.Front(); e != nil; e = e.Next() {
res := *(e.Value.(*Result))
fmt.Println(" ", res.id, res.score)
}
}
func (v *Vectors) compare(comp *Vectors) {
maxLen := 10
if v.id == comp.id {
return
}
score := VectorDistance(v.vectors, comp.vectors, MAX_SCORE)
if score > MAX_SCORE {
return
}
res := NewResult(comp.id, score)
if v.list.Len() == maxLen {
lastScore := v.list.Back().Value.(*Result).score
if lastScore <= score {
return
} else {
v.list.Remove(v.list.Back())
newLastScore := v.list.Back().Value.(*Result).score
if newLastScore <= score {
v.list.PushBack(res)
return
}
}
}
el := v.list.Front()
if el == nil {
v.list.PushFront(res)
return
}
for el != nil {
currentScore := el.Value.(*Result).score
if currentScore > score {
v.list.InsertBefore(res, el)
el = nil
} else {
el = el.Next()
}
}
}
func NewVector(header, vector string) (ret *Vectors, e error) {
ret = new(Vectors)
ret.id, e = ParseName(header)
ret.vectors, e = ParseVectors(vector)
ret.list = list.New()
return
}
type Result struct {
id int64
score float64
}
func NewResult(id int64, score float64) (ret *Result) {
ret = new(Result)
ret.id = id
ret.score = score
return
}
|
package retrievable
import (
"google.golang.org/appengine/datastore"
)
// IntID is a shortcut type that can be embeded in another struct to fulfil
// the KeyRetrievable interface easily in the most common case.
type IntID int64
func (i *IntID) StoreKey(key *datastore.Key) {
*i = IntID(key.IntID())
}
// StringID is a shortcut type that can be embeded in another struct to fulfil
// the KeyRetrievable interface easily in the most common case.
type StringID string
func (s *StringID) StoreKey(key *datastore.Key) {
*s = StringID(key.StringID())
}
|
package main
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func addOneRow(root *TreeNode, v int, d int) *TreeNode {
if root == nil {
return root
}
if d == 1 {
root = &TreeNode{Val: v, Left: root}
} else if d == 2 {
L, R := root.Left, root.Right
root.Left = &TreeNode{Val: v, Left: L}
root.Right = &TreeNode{Val: v, Right: R}
} else {
root.Left = addOneRow(root.Left, v, d-1)
root.Right = addOneRow(root.Right, v, d-1)
}
return root
}
func main() {
}
|
package main
import (
"fmt"
"go-design-pattern/creational-pattern/singleton"
)
func CallSingletonWithLock() {
for i := 0; i < 10; i++ {
go singleton.GetDBInstanceWithLock()
}
fmt.Scanln()
}
func CallSingletonWithOnce() {
for i := 0; i < 10; i++ {
go singleton.GetDBInstanceWithOnce()
}
fmt.Scanln()
}
|
// Copyright ยฉ 2020 Weald Technology Trading
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fetcher
import types "github.com/wealdtech/go-eth2-wallet-types/v2"
// Service is the interface for a wallet and account fetching service.
type Service interface {
FetchWallet(path string) (types.Wallet, error)
FetchAccount(path string) (types.Wallet, types.Account, error)
FetchAccountByKey(pubKey []byte) (types.Wallet, types.Account, error)
}
|
package main
import (
"fmt"
"os"
"os/exec"
"runtime"
//"time"
//"bufio"
//"log"
//"os"
)
var clear map[string]func()
func init(){
clear = make(map[string]func()) // proses instalasi
clear ["linux"] = func() {
cmd := exec.Command("clear") // contoh di linux, sudah ditest bung
cmd.Stdout = os.Stdout
cmd.Run()
}
clear ["windows"] = func () {
cmd := exec.Command ("cmd", "/c", "cls") // contoh di Windows, sudah juga di ditest
cmd.Stdout = os.Stdout
cmd.Run()
}
}
func CallClear() {
value, ok := clear[runtime.GOOS]
if ok {
value()
} else {
panic("Your platform is unsupported!")
}
}
type calon struct{
Namacalon string
Partai string
Nomor int
Threshold int
}
type pemilih struct{
Nama string
Gender string
Umur int
Pilihan int
TPS int
}
const N int = 100
const PT int = 3
type arcalon [N] calon
type arpemilih [N] pemilih
var con int
func main (){
var calonpemimpin arcalon
var pemilihnya arpemilih
var manusia, npemilih int
var esc bool=false
calonpemimpin[0]=calon{Namacalon:"Prabowo",Partai:"Gerindra",Nomor:1,Threshold:6}
calonpemimpin[1]=calon{Namacalon:"Jokowi",Partai:"PDIP",Nomor:2,Threshold: 5}
calonpemimpin[2]=calon{Namacalon:"Bahrul",Partai:"Bem Tel U",Nomor:3,Threshold:4}
ncalon:=3
pemilihnya[0]=pemilih{Nama:"Indira",Gender:"Wanita",Umur:19,Pilihan:6,TPS:111}
pemilihnya[1]=pemilih{Nama:"Atikah",Gender:"Wanita",Umur:19,Pilihan:2,TPS:111}
pemilihnya[2]=pemilih{Nama:"Naufal",Gender:"Pria",Umur:19,Pilihan:2,TPS:111}
pemilihnya[3]=pemilih{Nama:"Mandar",Gender:"Pria",Umur:19,Pilihan:1,TPS:222}
pemilihnya[4]=pemilih{Nama:"Nuriz",Gender:"Wanita",Umur:19,Pilihan:1,TPS:222}
pemilihnya[5]=pemilih{Nama:"Dewa",Gender:"Pria",Umur:19,Pilihan:1,TPS:222}
pemilihnya[6]=pemilih{Nama:"Reikiko",Gender:"Pria",Umur:19,Pilihan:7,TPS:333}
pemilihnya[7]=pemilih{Nama:"Nanda",Gender:"Wanita",Umur:19,Pilihan:3,TPS:333}
pemilihnya[8]=pemilih{Nama:"Blamma",Gender:"Pria",Umur:19,Pilihan:0,TPS:333}
pemilihnya[9]=pemilih{Nama:"Hawa",Gender:"Wanita",Umur:19,Pilihan:2,TPS:111}
pemilihnya[10]=pemilih{Nama:"Sitti",Gender:"Wanita",Umur:19,Pilihan:2,TPS:111}
pemilihnya[11]=pemilih{Nama:"Yais",Gender:"Pria",Umur:19,Pilihan:2,TPS:111}
pemilihnya[12]=pemilih{Nama:"Maulana",Gender:"Pria",Umur:19,Pilihan:1,TPS:222}
pemilihnya[13]=pemilih{Nama:"Annisa",Gender:"Wanita",Umur:19,Pilihan:1,TPS:222}
pemilihnya[14]=pemilih{Nama:"Dika",Gender:"Pria",Umur:19,Pilihan:1,TPS:222}
pemilihnya[15]=pemilih{Nama:"Raditya",Gender:"Pria",Umur:19,Pilihan:3,TPS:333}
pemilihnya[16]=pemilih{Nama:"Nadin",Gender:"Wanita",Umur:19,Pilihan:3,TPS:333}
pemilihnya[17]=pemilih{Nama:"Ceb",Gender:"Pria",Umur:19,Pilihan:3,TPS:333}
npemilih=18
for !esc{
CallClear()
fmt.Println("\n\n\n\n\n\n\n")
fmt.Println(" โโโโโโโโโโโโโโโโโโโ โโโโโโ โโโโ โโโโ โโโโโโ โโโโโโโโโ โโโโโโโ โโโโโโ โโโโโโโโโ โโโโโโ โโโโ โโโ โโโโโโโ โโโโโโโ โโโ ")
fmt.Println(" โโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโ โโโโโโโโโโโ ")
fmt.Println(" โโโโโโโโโโโโโโ โโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโ โโโ โโโโโโโโโโโ โโโ โโโโโโโโโโโโโโ โโโโโโ โโโโ โโโ โโโโโโ ")
fmt.Println(" โโโโโโโโโโโโโโ โโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโ โโโ โโโโโโโโโโโ โโโ โโโโโโโโโโโโโโโโโโโโโ โโโ โโโ โโโโโโ ")
fmt.Println(" โโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโโโโ โโโ โโโโโโ โโโ โโโ โโโโโโโโโโโ โโโ โโโ โโโ โโโโโโ โโโโโโโโโโโโโโโ โโโโโโโโโโโ ")
fmt.Println(" โโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโโโโ โโโโโโ โโโ โโโ โโโโโโโ โโโ โโโ โโโ โโโ โโโโโโ โโโโโ โโโโโโโ โโโโโโโ โโโ ")
fmt.Println("\n")
fmt.Println(" โโโโโโ โโโโโโโ โโโ โโโโโโ โโโ โโโโโโ โโโโโโโโโโโ โโโโโโโ โโโโโโโโโโโโ โโโโโโโโโโ โโโโโโ โโโ โโโโโโ โโโโ โโโ ")
fmt.Println(" โโโโโโโโโโโโโโโโโโโ โโโโโโ โโโโโโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโ โโโโโโ โโโโโโโโโโโโโโโโ โโโ ")
fmt.Println(" โโโโโโโโโโโโโโโโโโโ โโโโโโโโโโ โโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโ โโโ ")
fmt.Println(" โโโโโโโโโโโโโโโ โโโ โโโโโโโโโโ โโโโโโโโโโโโโโโโโโโ โโโโโโโ โโโโโโ โโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโ ")
fmt.Println(" โโโ โโโโโโ โโโโโโโโโโโโโโ โโโโโโ โโโโโโโโโโโโโโ โโโ โโโโโโโโโโโ โโโ โโโโโโโโโโโโโโโโโโโโ โโโโโโ โโโโโโ โโโโโโ ")
fmt.Println(" โโโ โโโโโโ โโโโโโโโโโโโโโ โโโโโโ โโโโโโโโโโโโโโ โโโ โโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโ โโโโโโ โโโโโโ โโโโโ ")
fmt.Println("\n")
fmt.Println(" โโโ โโโโโโโ โโโโโโโ โโโโโโโ โโโโ โโโ โโโโโโโโ โโโโโโโ โโโโโโโโโโโโโโ โโโโโโ โโโโโโโโโโโโโโโโโโโโ ")
fmt.Println(" โโโ โโโโโโโโ โโโโโโโโ โโโโโโโโ โโโโโ โโโ โโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโ ")
fmt.Println(" โโโ โโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโ โโโ โโโโโโ โโโ โโโโโโโโโโโโโโโโโโ โโโโโโโโ โโโ โโโโโโโโโ ")
fmt.Println(" โโโ โโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโ โโโ โโโโโโ โโโ โโโโโโโโโโโโโโโโโ โโโโโโโโ โโโ โโโโโโโโโ ")
fmt.Println(" โโโโโโโโโโโโ โโโ โโโโโโโโโโโโโโโ โโโ โโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโ โโโ โโโโโโ ")
fmt.Println(" โโโโโโโ โโโ โโโ โโโโโโโ โโโ โโโ โโโโโโโโโโโโโโโโ โโโโโโโ โโโโโโโโโโโโโโโโโโโโโโ โโโ โโโ โโโโโโ ")
fmt.Println("\n")
fmt.Print(" ")
fmt.Scanln(&con)
CallClear()
fmt.Println("\n\n\n\n\n\n\n\n\n\n\n\n\n\n")
fmt.Println (" Siapa anda? Apa urusan anda?")
fmt.Println("\n")
fmt.Println (" 1. Saya Pemilih, Saya ingin Memilih!")
fmt.Println("\n")
fmt.Println (" 2. Saya Petugas, Apa urusan anda menanyakan itu?")
fmt.Println("\n")
fmt.Println (" 3. Keluar")
fmt.Println("\n")
fmt.Print(" Masukan Pilihan : ")
fmt.Scanln (&manusia)
switch manusia {
case 1:
CallClear()
pemilihh(&calonpemimpin,&pemilihnya,&ncalon,&npemilih)
case 2:
CallClear()
petugas(&calonpemimpin,&pemilihnya,&ncalon,&npemilih)
case 3:
esc=true
}}
}
func pemilihh(ahh *arcalon, nawn *arpemilih, ncalon *int, npemilih *int){
var menupemilih int
var keluar bool = false
var n int = *npemilih
fmt.Println("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n")
fmt.Print(" Nama Pemilih : ")
fmt.Scan(&nawn[n].Nama)
fmt.Println("\n")
fmt.Print(" Jenis Kelamin : ")
fmt.Scan(&nawn[n].Gender)
fmt.Println("\n")
fmt.Print(" Umur : ")
fmt.Scan(&nawn[n].Umur)
for !keluar {
CallClear()
fmt.Println("\n\n\n\n\n\n\n\n\n\n")
fmt.Println(" ******************************")
fmt.Println(" ******************************")
fmt.Println(" ***------------------------***")
fmt.Println(" *** M E N U P E M I L I H ***")
fmt.Println(" ***------------------------***")
fmt.Println(" *** ***")
fmt.Println(" ***(1) LIHAT CALON ***")
fmt.Println(" ***(2) MULAI MEMILIH ***")
fmt.Println(" ***(3) CARI DATA ***")
fmt.Println(" ***(4) KELUAR ***")
fmt.Println(" *** ***")
fmt.Println(" ******************************")
fmt.Println(" ******************************")
fmt.Print(" Pilihan : ")
fmt.Scanln(&menupemilih)
if menupemilih == 1{
lihatData(ahh,ncalon)
} else if menupemilih==4{
keluar=true
} else if menupemilih==2{
fmt.Print(" Masukkan TPS : ")
fmt.Scanln(&nawn[n].TPS)
fmt.Print(" Masukkan Pilihan : ")
fmt.Scanln(&nawn[n].Pilihan)
for j:=0;j<n;j++{
if nawn[n].Pilihan==ahh[j].Nomor{
ahh[j].Threshold++
}
}
} else if menupemilih==3{
find(ahh,ncalon)
}
}
*npemilih++
}
//}
func petugas(sahh *arcalon, nawn *arpemilih, n*int, npemilih *int){
var menupetugas int
var esc bool = false
for !esc {
CallClear()
fmt.Println("\n\n\n\n\n\n\n\n\n\n")
fmt.Println(" ***--------------------------***")
fmt.Println(" ***----------M E N U---------***")
fmt.Println(" *** (1) MASUKAN CALON ***")
fmt.Println(" *** (2) HAPUS CALON ***")
fmt.Println(" *** (3) UBAH CALON ***")
fmt.Println(" *** (4) LIHAT CALON ***")
fmt.Println(" *** (5) CARI CALON ***")
fmt.Println(" *** (6) URUTKAN CALON ***")
fmt.Println(" *** (7) HASIL TPS ***")
fmt.Println(" *** (8) CEK GOLPUT ***")
fmt.Println(" *** (9) KELUAR ***")
fmt.Println(" ***--------------------------***")
fmt.Println(" ***--------------------------***")
fmt.Print(" Pilihan : ")
fmt.Scanln(&menupetugas)
switch menupetugas {
case 1:insert(sahh,n)
case 2:delete(sahh,n)
case 3:edit(sahh,n)
case 4:lihatData(sahh,n)
case 5:find(sahh,n)
case 6:urut(sahh, n)
case 7:hasil(sahh,n)
case 8:golput(sahh,nawn,n,npemilih)
case 9:esc=true
}
}
}
func edit(editcalon *arcalon, ncalon *int){
var pilihedit, editnomor int
fmt.Print(" Masukkan Nomor Calon yang mau di edit : ")
fmt.Scanln(&editnomor)
for i:=0;i<*ncalon;i++{
if editnomor==editcalon[i].Nomor{
fmt.Println(" Pilih Data yang maw di edit : ")
fmt.Println(" 1. Nama Calon")
fmt.Println(" 2. Partai Calon")
fmt.Println(" 3. Nomor Calon")
fmt.Print(" Pilihan : ")
fmt.Scanln(&pilihedit)
if pilihedit == 1{
fmt.Print(" Masukkan Nama Calon yang baru : ")
fmt.Scanln(&editcalon[i].Namacalon)
} else if pilihedit == 2{
fmt.Print(" Masukkan Partai Calon yang baru : ")
fmt.Scanln(&editcalon[i].Partai)
} else if pilihedit == 3{
fmt.Print(" Masukkan Nomor Calon yang baru : ")
fmt.Scanln(&editcalon[i].Nomor)
}
}
}
}
func insert(masukcalon *arcalon, ncalon *int){
fmt.Print(" Nama Calon Baru : ")
fmt.Scanln(&masukcalon[*ncalon].Namacalon)
fmt.Print(" Partai Calon Baru : ")
fmt.Scanln(&masukcalon[*ncalon].Partai)
fmt.Print(" Nomor Calon Baru : ")
fmt.Scanln(&masukcalon[*ncalon].Nomor)
*ncalon++
fmt.Println(" \tOK")
fmt.Print(" Press any key to continue ")
fmt.Scanln(&con)
}
func lihatData(lihatcalon *arcalon, ncalon *int){
fmt.Println(" Nomor\t\tNama Calon\t\tPartai\n")
fmt.Print(" ----------------------------------------------------------\n")
for i:=0;i<*ncalon;i++{
fmt.Println("\t\t\t\t\t\t\t\t",lihatcalon[i].Nomor,"\t\t",lihatcalon[i].Namacalon,"\t\t",lihatcalon[i].Partai,"\n")
}
fmt.Print(" Press any key to continue ")
fmt.Scanln(&con)
}
func delete(hpscalon *arcalon, ncalon *int){
var hapus int
fmt.Print(" Masukkan Nomor yang di hapus : ")
fmt.Scanln(&hapus)
for j:=0;j<*ncalon;j++{
if hapus == hpscalon[j].Nomor{
hpscalon[j]=hpscalon[j+1]
}
}
*ncalon--
fmt.Println("\n\t\t\t\t\t\t\t\tData sudah dihapus! Silahkan cek menu Lihat Calon")
fmt.Print("\n")
fmt.Print(" Press any key to continue ")
fmt.Scanln(&con)
}
func find(cari *arcalon, ncalon *int){
var pilihcari,i int
var found bool
fmt.Println(" Pilih data yang mau dicari : ")
fmt.Println(" 1. Nama Calon")
fmt.Println(" 2. Partai Calon")
fmt.Println(" 3. Nomor Calon")
fmt.Println(" !!!CASE SENSITIVE!!!")
fmt.Print(" Pilihan : ")
fmt.Scanln(&pilihcari)
if pilihcari==1{
var carinama string
fmt.Print(" Cari Nama : ")
fmt.Scanln(&carinama)
i=0
found=false
for i<*ncalon && !found{
if carinama==cari[i].Namacalon{
fmt.Print(" \t\tNomor\t\tNama Calon\tPartai\n")
fmt.Print(" \t\t--------------------------------------------------\n")
fmt.Print(" \t\t",cari[i].Nomor,"\t\t",cari[i].Namacalon,"\t\t",cari[i].Partai,"\n")
found=true
}
i++
}
if found==false {
fmt.Println(" DATA TIDAK ADA")
}
fmt.Print(" Press any key to continue")
fmt.Scanln(&con)
} else if pilihcari==2{
var caripartai string
fmt.Print(" Cari Partai : ")
fmt.Scanln(&caripartai)
i=0
found=false
for i<*ncalon && !found{
if caripartai==cari[i].Partai{
fmt.Print(" Nomor\t\tNama Calon\tPartai\n")
fmt.Print(" --------------------------------------------------\n")
fmt.Print(" ",cari[i].Nomor,"\t\t",cari[i].Namacalon,"\t\t",cari[i].Partai,"\n")
found=true
}
i++
}
if found==false {
fmt.Println(" DATA TIDAK ADA")
}
fmt.Print(" Press any key to continue")
fmt.Scanln(&con)
} else if pilihcari==3{
var carinomor int
fmt.Print(" Cari Nomor : ")
fmt.Scanln(&carinomor)
i=0
found=false
for i<*ncalon && !found{
if carinomor==cari[i].Nomor{
fmt.Print(" Nomor\t\tNama Calon\tPartai\n")
fmt.Print(" -----------------------------------------------\n")
fmt.Print(" ",cari[i].Nomor,"\t\t",cari[i].Namacalon,"\t\t",cari[i].Partai,"\n")
found=true
}
i++
}
if found==false {
fmt.Println(" DATA TIDAK ADA")
}
fmt.Print(" Press any key to continue")
fmt.Scanln(&con)
}
}
func hasil(hasilcalon *arcalon, ncalon *int){
var max calon
for i:=0;i<*ncalon;i++{
if hasilcalon[i].Threshold>PT{
max=hasilcalon[i]
for j:=0;j<*ncalon;j++{
if hasilcalon[j].Threshold>max.Threshold{
max=hasilcalon[j]
}
}
}
}
fmt.Println(" Pemenang Pemilu adalah ",max.Namacalon," dengan perolehan suara",max.Threshold,"\n")
fmt.Print(" Press any key to continue ")
fmt.Scanln(&con)
}
func golput(ahh *arcalon, nawn *arpemilih, ncalon *int, npemilih *int){
var tps111 arpemilih
var tps222 arpemilih
var tps333 arpemilih
var nopilihan[N] int
var j int
for j=0;j<*ncalon;j++{
nopilihan[j]=ahh[j].Nomor
}
var tpsberapa int
var golput int = 0
var k int = 0
fmt.Println(" Mau liat golput di TPS berapa?")
fmt.Println(" 1. TPS 111")
fmt.Println(" 2. TPS 222")
fmt.Println(" 3. TPS 333")
fmt.Print(" Pilihannya : ")
fmt.Scanln(&tpsberapa)
if tpsberapa==1 {
for i:=0;i<*npemilih;i++{
if nawn[i].TPS==111{
tps111[k]=nawn[i]
k++
}
}
var b,c int
for b=0;b<j;b++{
for c=0;c<=k;c++{
if nopilihan[b]==tps111[c].Pilihan{
}else if nopilihan[b]!=tps111[c].Pilihan {
golput++
}
}
}
fmt.Println("\t\t\t\t\t\t\t\t",b,k,golput)
golput=golput-((k*b)-3)
fmt.Println(" Jumlah Golput di TPS 111 adalah ",golput," dari ",k," orang")
} else if tpsberapa==2{
for i:=0;i<*npemilih;i++{
if nawn[i].TPS==222{
tps222[k]=nawn[i]
k++
}
}
var b,c int
for b=0;b<j;b++{
for c=0;c<=k;c++{
if nopilihan[b]==tps222[c].Pilihan{
}else if nopilihan[b]!=tps222[c].Pilihan {
golput++
}
}
}
fmt.Println("\t\t\t\t\t\t\t\t",b,k,golput)
golput=golput-((k*b)-3)
fmt.Println(" Jumlah Golput di TPS 222 adalah ",golput," dari ",k," orang")
} else if tpsberapa==3 {
for i:=0;i<*npemilih;i++{
if nawn[i].TPS==333{
tps333[k]=nawn[i]
k++
}
}
var b,c int
for b=0;b<j;b++{
for c=0;c<=k;c++{
if nopilihan[b]==tps333[c].Pilihan{
}else if nopilihan[b]!=tps333[c].Pilihan {
golput++
}
}
}
fmt.Println("\t\t\t\t\t\t\t\t",b,k,golput)
golput=golput-((k*b)-3)
fmt.Println(" Jumlah Golput di TPS 333 adalah ",golput," dari ",k," orang")
}
fmt.Print(" Press any key to continue ")
fmt.Scanln(&con)
}
func urut(hasilcalon *arcalon, ncalon *int){
var max calon
for i:=0;i<*ncalon;i++{
if hasilcalon[i].Threshold>PT{
max=hasilcalon[i]
for j:=0;j < *ncalon;j++{
if hasilcalon[j].Threshold > max.Threshold{
max.Threshold = max.Threshold + 1
max=hasilcalon[j]
}
}
max.Threshold++
}
}
fmt.Println(" Nomor\t\tNama Calon\t\tPartai\t\tPerolehan\n")
fmt.Print(" ------------------------------------------------------------------\n")
for i:=0;i<*ncalon;i++{
fmt.Println("\t\t\t\t\t\t\t\t",hasilcalon[i].Nomor,"\t\t",hasilcalon[i].Namacalon,"\t\t",hasilcalon[i].Partai,"\t\t",hasilcalon[i].Threshold,"\n")
}
fmt.Print(" Press any key to continue ")
fmt.Scanln(&con)
}
/*func selection_sort(arr *array, n int){
var min_index int
for i := 0; i < n; i++ {
min_index = i
for j := i + 1; j < n; j++ {
//urutan besar ke kecil atau kecil ke besar (perhatikan tanda)
if arr[j] < arr [min_index] {
min_index = j
}
}
temp := arr[min_index]
arr[min_index] = arr[i]
arr[i] = temp
}
}
func UrutkanData(arr *array, n int){
var index array
var m int
fmt.Scanln(&n)
i := 0
for (i < n){
j := 0
fmt.Scan(&m)
for ( j < m){
fmt.Scan(&index[j])
j++
}
selection_sort(&index, m)
for k := 0; k < m; k++ {
fmt.Print(index[k], " ")
}
}
}
*/
|
package main
// TODO ไบคไบๅ่ฝ๏ผไฝๆฏclinet้ๅบไผๆฅ้
import (
// "bufio"
"fmt"
"net"
"strings"
)
func handleErr(err error) {
if err != nil {
fmt.Println(err)
panic(err.Error())
}
}
func main() {
ls, err := net.Listen("tcp", "0.0.0.0:20000")
handleErr(err)
for {
conn, err := ls.Accept()
handleErr(err)
go func(conn net.Conn) {
defer conn.Close()
for {
//ๅค็ๅปบ็ซ็tcp่ฟๆฅ
// fmt.Println("localAddr:", conn.LocalAddr().String())
fmt.Println("remoteAddr:", conn.RemoteAddr().String())
var buf = [1024]byte{}
n, err := conn.Read(buf[:])
handleErr(err)
fmt.Println("Read:", string(buf[:n]), "num:", n)
conn.Write([]byte("ๆถๅฐ!"))
if strings.ToUpper(string(buf[:n])) == "END"{
fmt.Println("END")
return
}
}
}(conn)
// conn.Close() ไธ็จๅ
ณ้ญ,ไธ้ข็ไผ ๅไธๆฏๅคๅถ
}
}
|
package db
import (
"net/url"
"time"
"github.com/go-redis/redis"
"github.com/sirupsen/logrus"
)
type Redis struct {
engine *redis.Client
}
func (r *Redis) Open(_url string) {
redisUrl, err := url.Parse(_url)
if err != nil {
logrus.Error(err)
}
redisPassword, _ := redisUrl.User.Password()
redisOptions := redis.Options{
Addr: redisUrl.Host,
Password: redisPassword,
DB: 0,
}
r.engine = redis.NewClient(&redisOptions)
}
func (r *Redis) Close() {
if err := r.engine.Close(); err != nil {
logrus.Error(err)
}
}
func (r *Redis) Get(key string) (string, error) {
return r.engine.Get(key).Result()
}
func (r *Redis) Set(key, value string) {
r.engine.Set(key, value, 0)
}
func (r *Redis) SetWithExpiration(key, value string, expiration time.Duration) {
r.engine.Set(key, value, expiration)
}
func (r *Redis) Del(key string) {
r.engine.Del(key)
}
func (r *Redis) SCard(key string) int64 {
val, err := r.engine.SCard(key).Result()
if err != nil {
logrus.Error(err)
}
return val
}
func (r *Redis) SIsMember(key, member string) bool {
val, err := r.engine.SIsMember(key, member).Result()
if err != nil {
logrus.Error(err)
}
return val
}
func (r *Redis) SAdd(key string, members ...string) int64 {
val, err := r.engine.SAdd(key, members).Result()
if err != nil {
logrus.Error(err)
}
return val
}
func (r *Redis) SRem(key string, members ...string) int64 {
val, err := r.engine.SRem(key, members).Result()
if err != nil {
logrus.Error(err)
}
return val
}
|
package config
import (
"github.com/golang/glog"
"gopkg.in/yaml.v2"
"io/ioutil"
"path/filepath"
"time"
)
type Config struct {
Agent struct {
ListenAddr string `yaml:"listen_addr"`
MonitorInterval time.Duration `yaml:"monitor_interval"`
CleanupTimer time.Duration `yaml:"cleanup_timer"`
ConsulAddr string `yaml:"consul_addr"`
ConsulQueryInterval time.Duration `yaml:"consul_query_interval"`
}
Bgp struct {
LocalAS int `yaml:"local_as"`
PeerAS int `yaml:"peer_as"`
PeerIP string `yaml:"peer_ip"`
Communities []string
Origin string
}
Apps []struct {
Name string
Vip string
Monitors []string
Nats []string
}
}
func GetConfig(file string) *Config {
absPath, _ := filepath.Abs(file)
data, err := ioutil.ReadFile(absPath)
if err != nil {
glog.Exitf("FATAL: Unable to read config file: %v", err)
}
config := &Config{}
if err := yaml.Unmarshal(data, config); err != nil {
glog.Exitf("FATAL: Unable to decode yaml: %v", err)
}
return config
}
|
package main
import (
"crd/pkg/apis/crd.com/v1alpha1"
cpclientset "crd/pkg/client/clientset/versioned"
"github.com/tamalsaha/go-oneliners"
crdapi "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
"log"
"os"
"os/signal"
"path/filepath"
"time"
)
func main() {
kubeconfigPath := filepath.Join(os.Getenv("HOME"), ".kube/config")
config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
if err != nil {
log.Fatalf("Could not get Kubernetes config: %s", err)
}
log.Println("Custom Resource 'CustomPod' Creating. . . . .")
crdClient, err := crdclientset.NewForConfig(config)
mycrd := &crdapi.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: "custompods.crd.com",
},
Spec: crdapi.CustomResourceDefinitionSpec{
Group: "crd.com",
Names: crdapi.CustomResourceDefinitionNames{
Plural: "custompods",
Singular: "custompod",
ShortNames: []string{
"cp",
},
Kind: "CustomPod",
},
Scope: "Namespaced",
Versions: []crdapi.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
},
},
},
}
_, err = crdClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(mycrd)
if err != nil {
panic(err)
}
time.Sleep(5 * time.Second)
log.Println("Custom Resource 'CustomPod' Created!")
defer func() {
log.Println("Deleting CustomPod. . . . .")
if err = crdClient.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(
"custompods.crd.com",
nil,
); err != nil {
panic(err)
}
log.Println("CustomPod Deleted")
}()
log.Println("Press Ctrl+C to Create an instance of CustomPod. . . .")
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt)
<-ch
log.Println("Creating CustomPod 'cpod'. . . . .")
cpClient, err := cpclientset.NewForConfig(config)
customPod := &v1alpha1.CustomPod{
TypeMeta: metav1.TypeMeta{
Kind: "CustomPod",
APIVersion: "crd.com/v1alpha1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "cpod",
Labels: map[string]string{
"app": "cpod"},
},
Spec: v1alpha1.CustomPodSpec{
Containers: []v1alpha1.Container{
{
Name: "api-latest",
Image: "fahimabrar/api:latest",
},
{
Name: "api-alpine",
Image: "fahimabrar/api:alpine",
},
},
},
}
cp, err := cpClient.CrdV1alpha1().CustomPods("default").Create(customPod)
if err != nil {
panic(err)
}
log.Println("'cpod' Created!\n")
oneliners.PrettyJson(cp)
defer func() {
log.Println("Deleting 'cpod'. . . . .")
if err := cpClient.CrdV1alpha1().CustomPods("default").Delete(
"cpod",
nil,
); err != nil {
panic(err)
}
log.Println("'cpod' Deleted")
}()
log.Println("Press Ctrl+C to Delete the Custom Resource 'CustomPod' and the instance of CustomPod 'cpod'. . . .")
ch = make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt)
<-ch
}
|
package inetd
import (
"io"
"time"
)
// Echo (RFC 862)
func Echo(rw io.ReadWriter) {
buf := make([]byte, 1024)
for {
n, _ := rw.Read(buf)
rw.Write(buf[:n])
}
}
// Discard (RFC 863)
func Discard(rw io.ReadWriter) {
buf := make([]byte, 1024)
for {
rw.Read(buf)
}
}
// Daytime (RFC 867)
func Daytime(rw io.ReadWriter) {
rw.Write([]byte(time.Now().Format("Monday, January 02, 2006 15:04:05-MST\n")))
}
|
package list
// Stack ๆ
type Stack struct {
l []interface{}
}
// NewStack ๅๅปบๆ
func NewStack(cap int) *Stack {
l := make([]interface{}, 0, cap)
return &Stack{l}
}
// Destory ้ๆฏ
func (s *Stack) Destory() {
s.l = nil
}
// Len ๆ ้ฟ
func (s *Stack) Len() int {
return len(s.l)
}
// Top ่ฟๅๆ ้กถ
func (s *Stack) Top() interface{} {
length := len(s.l)
if length == 0 {
return nil
}
return s.l[length-1]
}
// Push ่ฟๆ
func (s *Stack) Push(v interface{}) {
s.l = append(s.l, v)
}
// Pop ๅบๆ
func (s *Stack) Pop() interface{} {
length := len(s.l)
if length == 0 {
return nil
}
ret := s.l[length-1]
s.l = s.l[:length-1]
return ret
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.