text
stringlengths 11
4.05M
|
|---|
package kate
import (
"context"
"net/http"
"github.com/k81/kate/log/ctxzap"
"go.uber.org/zap"
)
// Router defines the standard http outer
type Router struct {
*http.ServeMux
maxBodyBytes int64
ctx context.Context
}
// NewRouter create a http router
func NewRouter(ctx context.Context, logger *zap.Logger) *Router {
return &Router{
ServeMux: http.NewServeMux(),
ctx: ctxzap.ToContext(ctx, logger),
}
}
// SetMaxBodyBytes set the body size limit
func (r *Router) SetMaxBodyBytes(n int64) {
r.maxBodyBytes = n
}
// StdHandle register a standard http handler for the specified path
func (r *Router) StdHandle(pattern string, h http.Handler) {
r.ServeMux.Handle(pattern, h)
}
// Handle register a http handler for the specified path
func (r *Router) Handle(pattern string, h ContextHandler) {
r.ServeMux.Handle(pattern, StdHandler(r.ctx, h, r.maxBodyBytes))
}
// HandleFunc register a http handler for the specified path
func (r *Router) HandleFunc(pattern string, h func(context.Context, ResponseWriter, *Request)) {
r.Handle(pattern, ContextHandlerFunc(h))
}
// HEAD register a handler for HEAD request
func (r *Router) HEAD(pattern string, h ContextHandler) {
r.ServeMux.Handle(pattern, StdHandler(r.ctx, HEAD(h), r.maxBodyBytes))
}
// OPTIONS register a handler for OPTIONS request
func (r *Router) OPTIONS(pattern string, h ContextHandler) {
r.ServeMux.Handle(pattern, StdHandler(r.ctx, OPTIONS(h), r.maxBodyBytes))
}
// GET register a handler for GET request
func (r *Router) GET(pattern string, h ContextHandler) {
r.ServeMux.Handle(pattern, StdHandler(r.ctx, GET(h), r.maxBodyBytes))
}
// POST register a handler for POST request
func (r *Router) POST(pattern string, h ContextHandler) {
r.ServeMux.Handle(pattern, StdHandler(r.ctx, POST(h), r.maxBodyBytes))
}
// PUT register a handler for PUT request
func (r *Router) PUT(pattern string, h ContextHandler) {
r.ServeMux.Handle(pattern, StdHandler(r.ctx, PUT(h), r.maxBodyBytes))
}
// DELETE register a handler for DELETE request
func (r *Router) DELETE(pattern string, h ContextHandler) {
r.ServeMux.Handle(pattern, StdHandler(r.ctx, DELETE(h), r.maxBodyBytes))
}
// PATCH register a handler for PATCH request
func (r *Router) PATCH(pattern string, h ContextHandler) {
r.ServeMux.Handle(pattern, StdHandler(r.ctx, PATCH(h), r.maxBodyBytes))
}
|
package auth
import (
"context"
"github.com/golang/protobuf/ptypes/empty"
"github.com/caos/zitadel/pkg/grpc/auth"
)
func (s *Server) SearchMyUserGrant(ctx context.Context, in *auth.UserGrantSearchRequest) (*auth.UserGrantSearchResponse, error) {
response, err := s.repo.SearchMyUserGrants(ctx, userGrantSearchRequestsToModel(in))
if err != nil {
return nil, err
}
return userGrantSearchResponseFromModel(response), nil
}
func (s *Server) SearchMyProjectOrgs(ctx context.Context, in *auth.MyProjectOrgSearchRequest) (*auth.MyProjectOrgSearchResponse, error) {
response, err := s.repo.SearchMyProjectOrgs(ctx, myProjectOrgSearchRequestRequestsToModel(in))
if err != nil {
return nil, err
}
return projectOrgSearchResponseFromModel(response), nil
}
func (s *Server) GetMyZitadelPermissions(ctx context.Context, _ *empty.Empty) (*auth.MyPermissions, error) {
perms, err := s.repo.SearchMyZitadelPermissions(ctx)
if err != nil {
return nil, err
}
return &auth.MyPermissions{Permissions: perms}, nil
}
func (s *Server) GetMyProjectPermissions(ctx context.Context, _ *empty.Empty) (*auth.MyPermissions, error) {
perms, err := s.repo.SearchMyProjectPermissions(ctx)
if err != nil {
return nil, err
}
return &auth.MyPermissions{Permissions: perms}, nil
}
|
package function
import (
"bytes"
"encoding/json"
"errors"
"github.com/hecatoncheir/Storage"
"log"
"os"
"text/template"
)
type Storage interface {
Query(string) ([]byte, error)
}
type Executor struct {
Store Storage
}
var ExecutorLogger = log.New(os.Stdout, "Executor: ", log.Lshortfile)
var (
// ErrCategoriesByNameNotFound means than the categories does not exist in database
ErrCategoriesByNameNotFound = errors.New("categories by name not found")
// ErrCategoriesByNameCanNotBeFound means that the category can't be found in database
ErrCategoriesByNameCanNotBeFound = errors.New("categories by name can not be found")
)
// ReadCategoriesByName is a method for get all nodes by categories name
func (executor *Executor) ReadCategoriesByName(categoryName, language string) ([]storage.Category, error) {
variables := struct {
CategoryName string
Language string
}{
CategoryName: categoryName,
Language: language}
queryTemplate, err := template.New("ReadCategoriesByName").Parse(`{
categories(func: eq(categoryName@{{.Language}}, "{{.CategoryName}}"))
@filter(eq(categoryIsActive, true)) {
uid
categoryName: categoryName@{{.Language}}
categoryIsActive
belongs_to_company @filter(eq(companyIsActive, true)) {
uid
companyName: companyName@{{.Language}}
companyIsActive
has_category @filter(eq(categoryIsActive, true)) {
uid
categoryName: categoryName@{{.Language}}
categoryIsActive
belong_to_company @filter(eq(companyIsActive, true)) {
uid
companyName: companyName@{{.Language}}
companyIsActive
}
}
}
has_product @filter(eq(productIsActive, true)) {
uid
productName: productName@{{.Language}}
productIri
previewImageLink
productIsActive
belongs_to_category @filter(eq(categoryIsActive, true)) {
uid
categoryName: categoryName@{{.Language}}
categoryIsActive
}
belongs_to_company @filter(eq(companyIsActive, true)) {
uid
companyName: companyName@{{.Language}}
companyIsActive
}
}
}
}`)
if err != nil {
ExecutorLogger.Println(err)
return nil, ErrCategoriesByNameCanNotBeFound
}
queryBuf := bytes.Buffer{}
err = queryTemplate.Execute(&queryBuf, variables)
if err != nil {
ExecutorLogger.Println(err)
return nil, ErrCategoriesByNameCanNotBeFound
}
response, err := executor.Store.Query(queryBuf.String())
if err != nil {
ExecutorLogger.Println(err)
return nil, ErrCategoriesByNameCanNotBeFound
}
type categoriesInStorage struct {
AllCategoriesFoundedByName []storage.Category `json:"categories"`
}
var foundedCategories categoriesInStorage
err = json.Unmarshal(response, &foundedCategories)
if err != nil {
ExecutorLogger.Println(err)
return nil, ErrCategoriesByNameCanNotBeFound
}
if len(foundedCategories.AllCategoriesFoundedByName) == 0 {
return nil, ErrCategoriesByNameNotFound
}
return foundedCategories.AllCategoriesFoundedByName, nil
}
|
package main
import (
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"github.com/gin-gonic/gin"
)
func index_get(ctx *gin.Context) {
cmd := exec.Command("ls")
result, _ := cmd.Output()
ctx.HTML(200, "index.html", string(result))
}
func index_post(ctx *gin.Context) {
file, _ := ctx.FormFile("file")
file_content, _ := file.Open()
defer file_content.Close()
filename := file.Filename
fp, _ := os.Create(filename)
defer fp.Close()
_, err := io.Copy(fp, file_content)
if err != nil {
log.Println(err.Error())
}
}
func read_file(ctx *gin.Context) {
filename, _ := ctx.GetQuery("file")
log.Println(filename)
content, err := ioutil.ReadFile(filename)
if err != nil {
log.Println(err.Error())
}
ctx.String(200, "%s", content)
}
func main() {
router := gin.Default()
router.LoadHTMLGlob("templates/*")
router.GET("/", index_get)
router.POST("/", index_post)
router.GET("/readfile", read_file)
router.Run(":2333")
}
|
// Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hook
import (
"context"
"net/http"
"github.com/go-logr/logr"
"github.com/google/go-github/v27/github"
"github.com/pkg/errors"
ghutils "github.com/gardener/test-infra/pkg/tm-bot/github"
"github.com/gardener/test-infra/pkg/tm-bot/plugins"
"github.com/gardener/test-infra/pkg/tm-bot/plugins/echo"
"github.com/gardener/test-infra/pkg/tm-bot/plugins/resume"
"github.com/gardener/test-infra/pkg/tm-bot/plugins/skip"
commontest "github.com/gardener/test-infra/pkg/tm-bot/plugins/test/common"
"github.com/gardener/test-infra/pkg/tm-bot/plugins/test/gardener"
"github.com/gardener/test-infra/pkg/tm-bot/plugins/test/single"
"github.com/gardener/test-infra/pkg/tm-bot/plugins/xkcd"
testsmanager "github.com/gardener/test-infra/pkg/tm-bot/tests"
)
type Handler struct {
log logr.Logger
ghMgr ghutils.Manager
webhookSecretToken []byte
}
func New(log logr.Logger, ghMgr ghutils.Manager, webhookSecretToken string, runs *testsmanager.Runs) (*Handler, error) {
persistence, err := plugins.NewKubernetesPersistence(runs.GetClient(), "state", "tm-bot")
if err != nil {
return nil, errors.Wrap(err, "unable to setup plugin persistence")
}
plugins.Setup(log.WithName("plugins"), persistence)
// register plugins.Plugin()
plugins.Register(echo.New())
xkcdPlugin, err := xkcd.New()
if err != nil {
return nil, errors.Wrap(err, "unable to initialize xkcd plugin")
}
plugins.Register(xkcdPlugin)
plugins.Register(commontest.New(log, runs))
plugins.Register(gardener.New(log, runs))
plugins.Register(single.New(log, runs))
plugins.Register(skip.New(log))
plugins.Register(resume.New(log, runs.GetClient()))
if err := plugins.ResumePlugins(ghMgr); err != nil {
return nil, errors.Wrap(err, "unable to resume running plugins")
}
return &Handler{
log: log,
ghMgr: ghMgr,
webhookSecretToken: []byte(webhookSecretToken),
}, nil
}
func (h *Handler) HandleWebhook(w http.ResponseWriter, r *http.Request) {
payload, err := github.ValidatePayload(r, h.webhookSecretToken)
if err != nil {
h.log.Error(err, "payload validation failed")
http.Error(w, "validation failed", http.StatusInternalServerError)
return
}
event, err := github.ParseWebHook(github.WebHookType(r), payload)
if err != nil {
h.log.Error(err, "unable to parse webhook")
http.Error(w, "unable to parse webhook", http.StatusInternalServerError)
return
}
switch event := event.(type) {
case *github.IssueCommentEvent:
if event.GetIssue().IsPullRequest() && ghutils.EventActionType(event.GetAction()) == ghutils.EventActionTypeCreated {
h.handleGenericEvent(w, &ghutils.GenericRequestEvent{
InstallationID: event.GetInstallation().GetID(),
ID: event.GetIssue().GetID(),
Number: event.GetIssue().GetNumber(),
Repository: event.GetRepo(),
Body: event.GetComment().GetBody(),
Author: event.GetComment().GetUser(),
})
}
default:
http.Error(w, "event not handled", http.StatusNoContent)
return
}
if _, err := w.Write([]byte{}); err != nil {
h.log.Error(err, "unable to send response to github")
}
}
func (h *Handler) handleGenericEvent(w http.ResponseWriter, event *ghutils.GenericRequestEvent) {
h.log.V(5).Info("handle generic event", "user", event.GetAuthorName(), "id", event.ID, "number", event.Number)
// ignore messages from bots
if ghutils.UserType(*event.Author.Type) != ghutils.UserTypeUser {
return
}
ghClient, err := h.ghMgr.GetClient(event)
if err != nil {
h.log.Error(err, "unable to build client", "user", event.GetAuthorName())
http.Error(w, "internal error", http.StatusUnauthorized)
return
}
// add head commit sha to the event
head, err := ghClient.GetHead(context.TODO(), event)
if err != nil {
h.log.Error(err, "unable to get head of event", "number", event.Number)
return
}
event.Head = head
go func() {
if err := plugins.HandleRequest(ghClient, event); err != nil {
h.log.Error(err, "")
}
}()
}
|
package core
import "fmt"
func Hello() {
fmt.Println("Hello n64emu!")
}
|
// Copyright (c) 2017-2018 Zededa, Inc.
// SPDX-License-Identifier: Apache-2.0
// Manage Xen guest domains based on the subscribed collection of DomainConfig
// and publish the result in a collection of DomainStatus structs.
// We run a separate go routine for each domU to be able to boot and halt
// them concurrently and also pick up their state periodically.
package domainmgr
import (
"flag"
"github.com/lf-edge/eve/pkg/pillar/hypervisor"
"github.com/lf-edge/eve/pkg/pillar/types"
"io/ioutil"
"os"
"path"
"reflect"
"testing"
)
func TestDefaultXenHypervisor(t *testing.T) {
hypervisorPtr := flag.String("h", "xen", "")
flag.CommandLine.Parse([]string{""})
hyper, err := hypervisor.GetHypervisor(*hypervisorPtr)
if err != nil || hyper.Name() != "xen" {
t.Errorf("Expected xen default hypervisor, got %s with error %v", hyper.Name(), err)
}
}
func TestFetchEnvVariablesFromCloudInit(t *testing.T) {
type fetchEnvVar struct {
config types.DomainConfig
expectOutput map[string]string
}
// testStrings are base 64 encoded strings which will contain
// environment variables which user will pass in custom config
// template in the manifest.
// testString1 contains FOO=BAR environment variables which will
// be set inside container.
testString1 := "Rk9PPUJBUg=="
// testString2 contains SQL_ROOT_PASSWORD=$omeR&NdomPa$$word environment variables which will
// be set inside container.
testString2 := "U1FMX1JPT1RfUEFTU1dPUkQ9JG9tZVImTmRvbVBhJCR3b3Jk"
// testString3 contains PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
// environment variables which wil be set inside container.
testString3 := "UEFUSD0vdXNyL2xvY2FsL3NiaW46L3Vzci9sb2NhbC9iaW46L3Vzci9zYmluOi91c3IvYmluOi9zYmluOi9iaW4="
// testString4 contains FOO=1 2 (with space in between)
// environment variables which wil be set inside container.
testString4 := "Rk9PPTEgMg=="
// testString5 contains
// FOO1=BAR1
// FOO2= [Without value]
// FOO3 [Only key without delimiter]
// FOO4=BAR4
// environment variables which wil be set inside container.
testString5 := "Rk9PMT1CQVIxCkZPTzI9CkZPTzMKRk9PND1CQVI0"
testFetchEnvVar := map[string]fetchEnvVar{
"Test env var 1": {
config: types.DomainConfig{
CloudInitUserData: &testString1,
},
expectOutput: map[string]string{
"FOO": "BAR",
},
},
"Test env var 2": {
config: types.DomainConfig{
CloudInitUserData: &testString2,
},
expectOutput: map[string]string{
"SQL_ROOT_PASSWORD": "$omeR&NdomPa$$word",
},
},
"Test env var 3": {
config: types.DomainConfig{
CloudInitUserData: &testString3,
},
expectOutput: map[string]string{
"PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
},
},
"Test env var 4": {
config: types.DomainConfig{
CloudInitUserData: &testString4,
},
expectOutput: map[string]string{
"FOO": "1 2",
},
},
"Negative test env var 5": {
config: types.DomainConfig{
CloudInitUserData: &testString5,
},
},
}
for testname, test := range testFetchEnvVar {
t.Logf("Running test case %s", testname)
envMap, err := fetchEnvVariablesFromCloudInit(nil, test.config)
switch testname {
case "Negative test env var 5":
if err == nil {
t.Errorf("Fetching env variable from cloud init passed, expecting it to be failed.")
}
default:
if err != nil {
t.Errorf("Fetching env variable from cloud init failed: %v", err)
}
if !reflect.DeepEqual(envMap, test.expectOutput) {
t.Errorf("Env map ( %v ) != Expected value ( %v )", envMap, test.expectOutput)
}
}
}
}
func TestCreateMountPointExecEnvFiles(t *testing.T) {
content := `{
"created": "2020-02-05T00:52:57.387773144Z",
"author": "adarsh@zededa.com",
"architecture": "amd64",
"os": "linux",
"config": {
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": [
"/bin/sh"
],
"Volumes": {
"/myvol": {}
}
},
"rootfs": {
"type": "layers",
"diff_ids": [
"sha256:a79a1aaf8143bbbe6061bc5326a1dcc490d9b9c1ea6b9c27d14c182e15c535ee",
"sha256:a235ff03ae531a929c240688c52e802c4f3714b2446d1f34b1d20bfd59ce1965"
]
},
"history": [
{
"created": "2019-01-30T22:20:20.383667418Z",
"created_by": "/bin/sh -c #(nop) ADD file:eaf29f2198d25cc0e88b84af6478f422db6a8ffb6919bf746117252cfcd88a47 in / "
},
{
"created": "2019-01-30T22:20:20.590559734Z",
"created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]",
"empty_layer": true
},
{
"created": "2020-02-05T00:52:55.559839255Z",
"created_by": "/bin/sh -c #(nop) MAINTAINER adarsh@zededa.com",
"author": "adarsh@zededa.com",
"empty_layer": true
},
{
"created": "2020-02-05T00:52:57.115531308Z",
"created_by": "/bin/sh -c mkdir /myvol",
"author": "adarsh@zededa.com"
},
{
"created": "2020-02-05T00:52:57.387773144Z",
"created_by": "/bin/sh -c #(nop) VOLUME [/myvol]",
"author": "adarsh@zededa.com",
"empty_layer": true
}
]
}`
//create a temp dir to hold resulting files
dir, _ := ioutil.TempDir("/tmp", "podfiles")
rootDir := path.Join(dir, "runx")
podPath := path.Join(dir, "pod")
err := os.MkdirAll(rootDir, 0777)
if err != nil {
t.Errorf("failed to create temporary dir")
} else {
defer os.RemoveAll(dir)
}
// now create a fake pod file
file, _ := os.Create(podPath)
_, err = file.WriteString(content)
if err != nil {
t.Errorf("failed to write to a pod file")
}
execpath := []string{"/bin/sh"}
// the proper format for this
execpathStr := "\"/bin/sh\""
workdir := "/data"
mountpoints := map[string]struct{}{
"/myvol": {},
}
env := []string{"PATH=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\""}
err = createMountPointExecEnvFiles(rootDir, mountpoints, execpath, workdir, env, 2)
if err != nil {
t.Errorf("createMountPointExecEnvFiles failed %v", err)
}
cmdlineFile := path.Join(rootDir, "cmdline")
cmdline, err := ioutil.ReadFile(cmdlineFile)
if err != nil {
t.Errorf("createMountPointExecEnvFiles failed to create cmdline file %s %v", cmdlineFile, err)
}
if string(cmdline) != execpathStr {
t.Errorf("mismatched cmdline file content, actual '%s' expected '%s'", string(cmdline), execpathStr)
}
mountFile := path.Join(rootDir, "mountPoints")
mountExpected := "/myvol" + "\n"
mounts, err := ioutil.ReadFile(mountFile)
if err != nil {
t.Errorf("createMountPointExecEnvFiles failed to create mountPoints file %s %v", mountFile, err)
}
if string(mounts) != mountExpected {
t.Errorf("mismatched mountpoints file content, actual '%s' expected '%s'", string(mounts), mountExpected)
}
envFile := path.Join(rootDir, "environment")
envActual, err := ioutil.ReadFile(envFile)
// start with WORKDIR
envExpect := "export WORKDIR=\"/data\"\nexport PATH=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"\n"
if err != nil {
t.Errorf("createMountPointExecEnvFiles failed to create environment file %s %v", envFile, err)
}
if string(envActual) != envExpect {
t.Errorf("mismatched env file content, actual '%s' expected '%s'", string(envActual), envExpect)
}
}
|
package balancer
// Smooth weighted round-robin balancing
// Ref: https://github.com/phusion/nginx/commit/27e94984486058d73157038f7950a0a36ecc6e35
type swrr struct {
items []*Choice
count int
}
func NewSmoothWeightedRoundRobin(choices ...*Choice) (lb *swrr) {
lb = &swrr{}
lb.Update(choices)
return
}
func (b *swrr) Select(_ ...string) (item interface{}) {
switch b.count {
case 0:
item = nil
case 1:
item = b.items[0].Item
default:
item = b.chooseNext().Item
}
return
}
func (b *swrr) chooseNext() (choice *Choice) {
total := 0
for i := range b.items {
c := b.items[i]
if c == nil {
return nil
}
total += c.Weight
c.CurrentWeight += c.Weight
if choice == nil || c.CurrentWeight > choice.CurrentWeight {
choice = c
}
}
if choice == nil {
return nil
}
choice.CurrentWeight -= total
return choice
}
func (b *swrr) Name() string {
return "SmoothWeightedRoundRobin"
}
func (b *swrr) Update(choices []*Choice) bool {
b.items, b.count = cleanWeight(choices)
return b.count > 0
}
|
package main
import (
// 系统包
"database/sql"
"fmt"
// 自定义包
"github.com/qiulc/gotest/conn/myredis"
"github.com/qiulc/gotest/conn/mysql"
// 外包
"github.com/go-redis/redis"
)
var db *sql.DB
var rdb *redis.Client
func main() {
fmt.Println("你好啊 ")
var err error
db, err = mysql.InitDB()
if err != nil {
fmt.Println("连接Mysql数据库失败===>", err)
return
}
fmt.Println("Mysql已连接...")
rdb, err = myredis.InitClient()
if err != nil {
fmt.Println("连接Redis数据库失败===>", err)
return
}
fmt.Println("Redis已连接...")
}
|
//go:generate go get github.com/UnnoTed/fileb0x
//go:generate fileb0x b0x.yml
//go:generate go get -v github.com/jteeuwen/go-bindata/...
//go:generate go get -v github.com/elazarl/go-bindata-assetfs/...
//go:generate go-bindata -nomemcopy -prefix builtin_models/ -pkg caffe -o builtin_models_static.go -ignore=.DS_Store -ignore=README.md builtin_models/...
package caffe
|
package gopaxos
import (
"github.com/buptmiao/gopaxos/paxospb"
)
type systemVariableStore struct {
logStorage LogStorage
}
func newSystemVariableStore(ls LogStorage) *systemVariableStore {
return &systemVariableStore{
logStorage: ls,
}
}
func (s *systemVariableStore) write(wo WriteOptions, groupIdx int, sysVar *paxospb.SystemVariables) error {
value, err := sysVar.Marshal()
if err != nil {
lPLGErr(groupIdx, "Variables.Marshal fail")
return err
}
err = s.logStorage.SetSystemVariables(wo, groupIdx, value)
if err != nil {
lPLGErr(groupIdx, "DB.Put fail, groupidx %d bufferlen %d ret %v",
groupIdx, len(value), err)
return err
}
return nil
}
func (s *systemVariableStore) read(groupIdx int) (*paxospb.SystemVariables, error) {
value, err := s.logStorage.GetSystemVariables(groupIdx)
if err != nil && err != ErrNotFoundFromStorage {
lPLGErr(groupIdx, "DB.Get fail, groupidx %d err: %v", groupIdx, err)
return nil, err
}
if err == ErrNotFoundFromStorage {
lPLGImp(groupIdx, "DB.Get not found, groupidx %d", groupIdx)
return nil, err
}
sysVar := &paxospb.SystemVariables{}
err = sysVar.Unmarshal(value)
if err != nil {
lPLGErr(groupIdx, "Variables.Unmarshal fail, bufferlen %d", len(value))
return nil, err
}
return sysVar, nil
}
|
package models
import (
"time"
)
// album represents data about a record album.
type Request struct {
ID int `json:"id"`
ServiceId int `json:"service_id"`
ResponseTime float32 `json:"response_time"`
CreatedAt time.Time `json:"created_at"`
}
type RequestList struct {
Requests []Request `json:"requests"`
}
|
package main
import (
"fmt"
"sync"
)
func main() {
sizes := make(chan int)
var wg sync.WaitGroup
nums := 10
for num := 0; num < nums; num++ {
wg.Add(1)
go func(num int) {
defer wg.Done()
sizes <- num
}(num)
}
go func() {
wg.Wait()
close(sizes)
}()
for rt := range sizes {
fmt.Println(rt)
}
}
|
package main
// https://www.domoticz.com/forum/viewtopic.php?t=1785 // virtual device?
// https://www.domoticz.com/forum/viewtopic.php?t=10940 // sonos
// https://github.com/jishi/node-sonos-http-api // sonos api
// https://www.domoticz.com/forum/viewtopic.php?t=11577 // update virtual device
// https://github.com/dhleong/ps4-waker/issues/14 // ps4 waker -> netflix
import (
"crypto/tls"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strings"
)
type sonosConfig struct {
urlStr *string
loginStr *string
passwordStr *string
listenerStr *string
}
var config sonosConfig
func init() {
config.listenerStr = flag.String("listen", os.Getenv("LISTENER"), "listner address:port")
config.urlStr = flag.String("url", os.Getenv("SONOS_URL"), "sonos http://address:port")
config.loginStr = flag.String("login", os.Getenv("SONOS_LOGIN"), "sonos login")
config.passwordStr = flag.String("password", os.Getenv("SONOS_PASSWORD"), "sonos password")
flag.Parse()
if *config.urlStr == "" {
flag.Usage()
log.Fatal("url required!")
}
if *config.loginStr == "" {
flag.Usage()
log.Fatal("login required!")
}
if *config.passwordStr == "" {
flag.Usage()
log.Fatal("password required!")
}
if *config.listenerStr == "" {
l := "127.0.0.1:5025"
config.listenerStr = &l
}
}
func handler(w http.ResponseWriter, r *http.Request) {
groupZones()
fmt.Printf("query: %s\n", r.URL.Query())
// Volume
if volume, ok := r.URL.Query()["volume"]; ok {
room := "Living Room bar"
path := strings.Split(r.URL.Path, "/")
fmt.Printf("len path: %d", len(path))
if len(path) > 2 {
room = path[1]
}
fmt.Printf("Setting volume for %s to %s\n", room, volume)
setVolume(room, volume[0])
}
//Shuffle
if shuffle, ok := r.URL.Query()["shuffle"]; ok {
room := "Living Room bar"
path := strings.Split(r.URL.Path, "/")
fmt.Printf("len path: %d", len(path))
if len(path) > 2 {
room = path[1]
}
fmt.Printf("Setting shuffle for %s to %s\n", room, shuffle)
setShuffle(room, shuffle[0])
}
r.URL.RawQuery = ""
result, err := getApi(r.URL.String())
if err != nil {
fmt.Printf("Error: %s", err)
}
fmt.Fprintf(w, string(result))
}
func main() {
http.HandleFunc("/", handler)
log.Fatal(http.ListenAndServe(*config.listenerStr, nil))
}
func groupZones() {
zones, err := getZones()
if err != nil {
log.Fatalf("Failed to get zone: %s\n", err)
}
//fmt.Printf("Zone data: %+v\n", zones)
for _, zone := range *zones {
fmt.Printf("Got zone: %s\n", zone.Coordinator.RoomName)
if zone.Coordinator.RoomName == "Living Room bar" {
if len(zone.Members) == 3 {
fmt.Printf("Living room is grouped!\n")
} else {
fmt.Printf("Expected 3 members, but got: %d\n", len(zone.Members))
expectedMembers := map[string]bool{
"Living Room2": false,
//"Kitchen": false,
}
for _, member := range zone.Members {
if _, ok := expectedMembers[member.RoomName]; ok {
fmt.Printf("member: %s is part of %s\n", member.RoomName, zone.Coordinator.RoomName)
expectedMembers[member.RoomName] = true
}
}
for name, member := range expectedMembers {
if member == false {
fmt.Printf("Need to join %s to Bar\n", name)
err := setGroup(name, "Living Room bar")
if err != nil {
fmt.Printf("Failed to join %s to %s: %s", name, "Living Room bar", err)
}
}
}
fmt.Printf("Living room is grouped!\n")
}
}
}
}
func getState() (*State, error) {
data, err := getApi("/state")
if err != nil {
return nil, err
}
var state State
err = json.Unmarshal(data, &state)
if err != nil {
return nil, err
}
return &state, nil
}
func getZones() (*Zones, error) {
data, err := getApi("/zones")
if err != nil {
return nil, err
}
var zones Zones
err = json.Unmarshal(data, &zones)
if err != nil {
return nil, err
}
return &zones, nil
}
/*
func PostPathData(path string) error {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
url := fmt.Sprintf("%s%s", *domotics.urlStr, path)
fmt.Printf("GET on: [%s]\n", url)
req, err := http.NewRequest("GET", url, nil)
req.SetBasicAuth(*domotics.loginStr, *domotics.passwordStr)
resp, err := client.Do(req)
if err != nil {
return err
}
bodyText, err := ioutil.ReadAll(resp.Body)
fmt.Printf("Output: %s", bodyText)
return nil
}
*/
func setVolume(room, volume string) error {
path := fmt.Sprintf("/%s/volume/%s", UrlEncoded(room), UrlEncoded(volume))
result, err := getApi(path)
if err != nil {
return err
}
var r Result
err = json.Unmarshal(result, &r)
if err != nil {
return err
}
return nil
}
func setShuffle(room, state string) error {
path := fmt.Sprintf("/%s/shuffle/%s", UrlEncoded(room), UrlEncoded(state))
result, err := getApi(path)
if err != nil {
return err
}
var r Result
err = json.Unmarshal(result, &r)
if err != nil {
return err
}
return nil
}
func setGroup(room, group string) error {
path := fmt.Sprintf("/%s/join/%s", UrlEncoded(room), UrlEncoded(group))
result, err := getApi(path)
if err != nil {
return err
}
var r Result
err = json.Unmarshal(result, &r)
if err != nil {
return err
}
return nil
}
func UrlEncoded(str string) string {
u, err := url.Parse(str)
if err != nil {
return ""
}
return u.String()
}
func getApi(path string) ([]byte, error) {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
url := fmt.Sprintf("%s%s", *config.urlStr, path)
fmt.Printf("GET on: [%s]\n", url)
req, err := http.NewRequest("GET", url, nil)
req.SetBasicAuth(*config.loginStr, *config.passwordStr)
resp, err := client.Do(req)
if err != nil {
return []byte{}, err
}
bodyText, err := ioutil.ReadAll(resp.Body)
//fmt.Printf("Output: %s", bodyText)
return bodyText, nil
}
|
package golang
func FindSmallestNum(arr []int) int {
smallestNum := arr[0]
smallestNumIndex := 0
for i := 1; i < len(arr); i++ {
if arr[i] < smallestNum {
smallestNum = arr[i]
smallestNumIndex = i
}
}
return smallestNumIndex
}
func SelectSort(arr []int) []int{
newArr := make([]int, 0, len(arr))
for len(arr) > 0 {
smallestNumIndex := FindSmallestNum(arr)
newArr = append(newArr, arr[smallestNumIndex])
arr = append(arr[:smallestNumIndex], arr[smallestNumIndex+1:]...)
}
return newArr
}
|
// Copyright 2014 Wandoujia Inc. All Rights Reserved.
// Licensed under the MIT (MIT-LICENSE.txt) license.
package cmd
import (
"bufio"
"log"
"sync"
"time"
)
import (
"github.com/wandoulabs/codis/ext/redis-port/utils"
)
func Dump(ncpu int, from, output string) {
log.Printf("[ncpu=%d] dump from '%s' to '%s'\n", ncpu, from, output)
fout := openWriteFile(output)
defer fout.Close()
master, wait := openSyncConn(from)
defer master.Close()
var nsize int64
for nsize == 0 {
select {
case nsize = <-wait:
if nsize == 0 {
log.Println("+")
}
case <-time.After(time.Second):
log.Println("-")
}
}
var nread, nwrite AtomicInt64
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for {
r, w := nread.Get(), nwrite.Get()
p := 100 * r / nsize
log.Printf("total = %d - %3d%%, read=%-14d write=%-14d\n", nsize, p, r, w)
if nsize == r {
log.Printf("done\n")
return
}
time.Sleep(time.Second)
}
}()
reader := bufio.NewReaderSize(master, 1024*1024*32)
writer := bufio.NewWriterSize(fout, 1024*64)
PipeReaderWriter(&wg, reader, writer, &nread, &nwrite, nsize)
wg.Wait()
if err := writer.Flush(); err != nil {
utils.Panic("writer flush error = '%s'", err)
}
}
|
package renderer
import (
"bytes"
"image"
"image/color"
"image/draw"
"github.com/driusan/de/demodel"
"github.com/driusan/de/renderer"
"golang.org/x/image/font"
"golang.org/x/image/math/fixed"
)
// The default renderer. Performs no syntax highlighting.
type NoSyntaxRenderer struct {
renderer.DefaultSizeCalcer
renderer.DefaultImageMapper
}
func (r *NoSyntaxRenderer) InvalidateCache() {
r.DefaultSizeCalcer.InvalidateCache()
r.DefaultImageMapper.InvalidateCache()
}
func (r NoSyntaxRenderer) CanRender(*demodel.CharBuffer) bool {
return true
}
func (r NoSyntaxRenderer) RenderInto(dst draw.Image, buf *demodel.CharBuffer, viewport image.Rectangle) error {
bounds := dst.Bounds()
writer := font.Drawer{
Dst: dst,
Src: &image.Uniform{color.Black},
Face: renderer.MonoFontFace,
Dot: fixed.P(bounds.Min.X, bounds.Min.Y+renderer.MonoFontAscent.Floor()),
}
runes := bytes.Runes(buf.Buffer)
for i, r := range runes {
runeRectangle := image.Rectangle{}
runeRectangle.Min.X = writer.Dot.X.Ceil()
runeRectangle.Min.Y = writer.Dot.Y.Ceil() - renderer.MonoFontAscent.Floor() + 1
switch r {
case '\t':
runeRectangle.Max.X = runeRectangle.Min.X + 8*renderer.MonoFontGlyphWidth.Ceil()
case '\n':
runeRectangle.Max.X = viewport.Max.X
default:
runeRectangle.Max.X = runeRectangle.Min.X + renderer.MonoFontGlyphWidth.Ceil()
}
runeRectangle.Max.Y = runeRectangle.Min.Y + renderer.MonoFontHeight.Ceil() + 1
if runeRectangle.Min.Y > viewport.Max.Y {
return nil
}
if runeRectangle.Intersect(viewport) != image.ZR {
if uint(i) >= buf.Dot.Start && uint(i) <= buf.Dot.End {
draw.Draw(
dst,
image.Rectangle{
runeRectangle.Min.Sub(viewport.Min),
runeRectangle.Max.Sub(viewport.Min),
},
&image.Uniform{renderer.TextHighlight},
image.ZP,
draw.Over,
)
}
}
switch r {
case '\t':
writer.Dot.X += renderer.MonoFontGlyphWidth * 8
continue
case '\n':
writer.Dot.Y += renderer.MonoFontHeight
writer.Dot.X = fixed.I(bounds.Min.X)
continue
}
// hack to draw into the dst using the viewport coordinate system
writer.Dot.X -= fixed.I(viewport.Min.X)
writer.Dot.Y -= fixed.I(viewport.Min.Y)
writer.DrawString(string(r))
writer.Dot.X += fixed.I(viewport.Min.X)
writer.Dot.Y += fixed.I(viewport.Min.Y)
}
return nil
}
|
package model
type Todo struct {
ID string `json:"id"`
Text string `json:"text"`
UID string `json:"uid"`
}
|
package utils_test
import (
. "github.com/gin-tonic/pkg/api/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Utils unit tests", func() {
Describe("Read config file", func() {
Context("With specified config file path", func() {
It("should exist and can be marshalled", func() {
// Given
existingPath := "../../config/service-config.yml"
// When
config, err := ReadConfig(existingPath)
// Then
Expect(err).To(BeNil())
Expect(config.Server).To(Not(BeNil()))
Expect(config.Server.Port).To(Not(BeNil()))
Expect(config.Server.Host).To(Not(BeNil()))
})
It("should exist and can be not marshalled", func() {
// Given
existingPath := "../../config/service-config-wrong.yml"
// When
config, err := ReadConfig(existingPath)
// Then
Expect(err).To(Not(BeNil()))
Expect(config).To(BeNil())
})
It("should not exist", func() {
// Given
existingPath := "../../config/service-config-non-exist.yml"
// When
config, err := ReadConfig(existingPath)
// Then
Expect(err).To(Not(BeNil()))
Expect(config).To(BeNil())
})
})
Context("With non specified config file", func() {
It("should not exist", func() {
// When
config, err := ReadConfig("")
// Then
Expect(err).To(Not(BeNil()))
Expect(config).To(BeNil())
})
})
})
})
|
package miner
import (
"btcnetwork/common"
"encoding/hex"
)
type MiningState uint32
const (
StateStop = MiningState(0)
StateOneBlock = MiningState(1)
StateAuto = MiningState(2)
)
type Config struct {
Version uint32
Target [32]byte
Bits uint32
//区块容量上限
//区块奖励
Reward uint64
MinerPubKeyHash [20]byte
MineEmptyBlock bool
MineTimeval int
MinerBanner string
FixedTxsInBlock int
state MiningState
}
var (
minerConfig *Config
minerStop chan bool
)
//初始化配置信息要从区块0重放区块头进行计算
func InitConfig(cfg *common.Config) *Config {
//把地址对应的公钥哈希计算出来
addr, err := common.Base58Decode(cfg.MinerAddr)
if err != nil {
panic(err)
}
minerCfg := Block1Config()
copy(minerCfg.MinerPubKeyHash[:], addr[1:21])
minerCfg.MineEmptyBlock = cfg.MineEmptyBlock
minerCfg.MineTimeval = cfg.MineTimeval
minerCfg.MinerBanner = cfg.MinerBanner
minerCfg.FixedTxsInBlock = cfg.FixedTxsInBlock
minerCfg = EvolveConfig(minerCfg)
minerCfg.state = StateStop
return minerCfg
}
// Evolve进化的意思
// 随着区块高度的增加,配置信息也可能会改变, 如难度值,区块奖励
func EvolveConfig(cfg *Config) *Config {
//todo: 将来实现,暂时不考虑参数变化
// 需要根据当前区块高度调整bits值等信息
return cfg
}
// 区块1的配置信息,根据创世区块相关信息填写
// 挖矿是从区块1开始的,区块0不用挖矿,区块0是写死在代码里的
func Block1Config() *Config {
cfg := Config{}
cfg.Version = common.MinerVersion
buf, _ := hex.DecodeString(common.GenesisTarget)
copy(cfg.Target[:], buf)
cfg.Bits = common.GenesisBlockBits
cfg.Reward = common.GenesisBlockReward
return &cfg
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package cli
import (
"bufio"
"encoding/base64"
gohex "encoding/hex"
"fmt"
"io"
"os"
"strings"
"github.com/cockroachdb/cockroach/pkg/sql/protoreflect"
"github.com/cockroachdb/errors"
"github.com/mattn/go-isatty"
"github.com/spf13/cobra"
)
func runDebugDecodeProto(_ *cobra.Command, _ []string) error {
if isatty.IsTerminal(os.Stdin.Fd()) {
fmt.Fprintln(stderr,
`# Reading proto-encoded pieces of data from stdin.
# Press Ctrl+C or Ctrl+D to terminate.`,
)
}
return streamMap(os.Stdout, os.Stdin,
func(s string) (bool, string, error) {
return tryDecodeValue(s, debugDecodeProtoName, debugDecodeProtoEmitDefaults)
})
}
// streamMap applies `fn` to all the scanned fields in `in`, and reports
// the result of `fn` on `out`.
// Errors returned by `fn` are emitted on `out` with a "warning" prefix.
func streamMap(out io.Writer, in io.Reader, fn func(string) (bool, string, error)) error {
sc := bufio.NewScanner(in)
sc.Buffer(nil, 128<<20 /* 128 MiB */)
for sc.Scan() {
for _, field := range strings.Fields(sc.Text()) {
ok, value, err := fn(field)
if err != nil {
fmt.Fprintf(out, "warning: %v", err)
continue
}
if !ok {
fmt.Fprintf(out, "%s\t", field)
// Skip since it doesn't appear that this field is an encoded proto.
continue
}
fmt.Fprintf(out, "%s\t", value)
}
fmt.Fprintln(out, "")
}
return sc.Err()
}
// tryDecodeValue tries to decode the given string with the given proto name
// reports ok=false if the data was not valid proto-encoded.
func tryDecodeValue(s, protoName string, emitDefaults bool) (ok bool, val string, err error) {
bytes, err := gohex.DecodeString(s)
if err != nil {
b, err := base64.StdEncoding.DecodeString(s)
if err != nil {
return false, "", nil //nolint:returnerrcheck
}
bytes = b
}
msg, err := protoreflect.DecodeMessage(protoName, bytes)
if err != nil {
return false, "", nil //nolint:returnerrcheck
}
j, err := protoreflect.MessageToJSON(msg, emitDefaults)
if err != nil {
// Unexpected error: the data was valid protobuf, but does not
// reflect back to JSON. We report the protobuf struct in the
// error message nonetheless.
return false, "", errors.Wrapf(err, "while JSON-encoding %#v", msg)
}
return true, j.String(), nil
}
|
package ion
import (
"bytes"
"errors"
"fmt"
"io"
"math/big"
"reflect"
"strconv"
"strings"
)
var (
// ErrNoInput is returned when there is no input to decode
ErrNoInput = errors.New("ion: no input to decode")
)
// Unmarshal unmarshals Ion data to the given object.
func Unmarshal(data []byte, v interface{}) error {
return NewDecoder(NewReader(bytes.NewReader(data))).DecodeTo(v)
}
// UnmarshalStr unmarshals Ion data from a string to the given object.
func UnmarshalStr(data string, v interface{}) error {
return Unmarshal([]byte(data), v)
}
// UnmarshalFrom unmarshal Ion data from a reader to the given object.
func UnmarshalFrom(r Reader, v interface{}) error {
d := Decoder{
r: r,
}
return d.DecodeTo(v)
}
// A Decoder decodes go values from an Ion reader.
type Decoder struct {
r Reader
}
// NewDecoder creates a new decoder.
func NewDecoder(r Reader) *Decoder {
return &Decoder{
r: r,
}
}
// NewTextDecoder creates a new text decoder. Well, a decoder that uses a reader with
// no shared symbol tables, it'll work to read binary too if the binary doesn't reference
// any shared symbol tables.
func NewTextDecoder(in io.Reader) *Decoder {
return NewDecoder(NewReader(in))
}
// Decode decodes a value from the underlying Ion reader without any expectations
// about what it's going to get. Structs become map[string]interface{}s, Lists and
// Sexps become []interface{}s.
func (d *Decoder) Decode() (interface{}, error) {
if !d.r.Next() {
if d.r.Err() != nil {
return nil, d.r.Err()
}
return nil, ErrNoInput
}
return d.decode()
}
// Helper form of Decode for when you've already called Next.
func (d *Decoder) decode() (interface{}, error) {
if d.r.IsNull() {
return nil, nil
}
switch d.r.Type() {
case BoolType:
return d.r.BoolValue()
case IntType:
return d.decodeInt()
case FloatType:
return d.r.FloatValue()
case DecimalType:
return d.r.DecimalValue()
case TimestampType:
return d.r.TimeValue()
case StringType, SymbolType:
return d.r.StringValue()
case BlobType, ClobType:
return d.r.ByteValue()
case StructType:
return d.decodeMap()
case ListType, SexpType:
return d.decodeSlice()
default:
panic("wat?")
}
}
func (d *Decoder) decodeInt() (interface{}, error) {
size, err := d.r.IntSize()
if err != nil {
return nil, err
}
switch size {
case NullInt:
return nil, nil
case Int32:
return d.r.IntValue()
case Int64:
return d.r.Int64Value()
default:
return d.r.BigIntValue()
}
}
// DecodeMap decodes an Ion struct to a go map.
func (d *Decoder) decodeMap() (map[string]interface{}, error) {
if err := d.r.StepIn(); err != nil {
return nil, err
}
result := map[string]interface{}{}
for d.r.Next() {
name := d.r.FieldName()
value, err := d.decode()
if err != nil {
return nil, err
}
result[*name] = value
}
if err := d.r.StepOut(); err != nil {
return nil, err
}
return result, nil
}
// DecodeSlice decodes an Ion list or sexp to a go slice.
func (d *Decoder) decodeSlice() ([]interface{}, error) {
if err := d.r.StepIn(); err != nil {
return nil, err
}
result := []interface{}{}
for d.r.Next() {
value, err := d.decode()
if err != nil {
return nil, err
}
result = append(result, value)
}
if err := d.r.StepOut(); err != nil {
return nil, err
}
return result, nil
}
// DecodeTo decodes an Ion value from the underlying Ion reader into the
// value provided.
func (d *Decoder) DecodeTo(v interface{}) error {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
return errors.New("ion: v must be a pointer")
}
if rv.IsNil() {
return errors.New("ion: v must not be nil")
}
if !d.r.Next() {
if d.r.Err() != nil {
return d.r.Err()
}
return ErrNoInput
}
return d.decodeTo(rv)
}
func (d *Decoder) decodeTo(v reflect.Value) error {
if !v.IsValid() {
// Don't actually have anywhere to put this value; skip it.
return nil
}
isNull := d.r.IsNull()
v = indirect(v, isNull)
if isNull {
v.Set(reflect.Zero(v.Type()))
return nil
}
switch d.r.Type() {
case BoolType:
return d.decodeBoolTo(v)
case IntType:
return d.decodeIntTo(v)
case FloatType:
return d.decodeFloatTo(v)
case DecimalType:
return d.decodeDecimalTo(v)
case TimestampType:
return d.decodeTimestampTo(v)
case StringType, SymbolType:
return d.decodeStringTo(v)
case BlobType, ClobType:
return d.decodeLobTo(v)
case StructType:
return d.decodeStructTo(v)
case ListType, SexpType:
return d.decodeSliceTo(v)
default:
panic("wat?")
}
}
func (d *Decoder) decodeBoolTo(v reflect.Value) error {
val, err := d.r.BoolValue()
if err != nil {
return err
}
switch v.Kind() {
case reflect.Bool:
// Too easy.
v.SetBool(val)
return nil
case reflect.Interface:
if v.NumMethod() == 0 {
v.Set(reflect.ValueOf(val))
return nil
}
}
return fmt.Errorf("ion: cannot decode bool to %v", v.Type().String())
}
var bigIntType = reflect.TypeOf(big.Int{})
func (d *Decoder) decodeIntTo(v reflect.Value) error {
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
val, err := d.r.Int64Value()
if err != nil {
return err
}
if v.OverflowInt(val) {
return fmt.Errorf("ion: value %v won't fit in type %v", val, v.Type().String())
}
v.SetInt(val)
return nil
case reflect.Uint8, reflect.Uint16, reflect.Uint32:
val, err := d.r.Int64Value()
if err != nil {
return err
}
if val < 0 || v.OverflowUint(uint64(val)) {
return fmt.Errorf("ion: value %v won't fit in type %v", val, v.Type().String())
}
v.SetUint(uint64(val))
return nil
case reflect.Uint, reflect.Uint64, reflect.Uintptr:
val, err := d.r.BigIntValue()
if err != nil {
return err
}
if !val.IsUint64() {
return fmt.Errorf("ion: value %v won't fit in type %v", val, v.Type().String())
}
uiv := val.Uint64()
if v.OverflowUint(uiv) {
return fmt.Errorf("ion: value %v won't fit in type %v", val, v.Type().String())
}
v.SetUint(uiv)
return nil
case reflect.Struct:
if v.Type() == bigIntType {
val, err := d.r.BigIntValue()
if err != nil {
return err
}
v.Set(reflect.ValueOf(*val))
return nil
}
case reflect.Interface:
if v.NumMethod() == 0 {
val, err := d.decodeInt()
if err != nil {
return err
}
v.Set(reflect.ValueOf(val))
return nil
}
}
return fmt.Errorf("ion: cannot decode int to %v", v.Type().String())
}
func (d *Decoder) decodeFloatTo(v reflect.Value) error {
val, err := d.r.FloatValue()
if err != nil {
return err
}
switch v.Kind() {
case reflect.Float32, reflect.Float64:
if v.OverflowFloat(val) {
return fmt.Errorf("ion: value %v won't fit in type %v", val, v.Type().String())
}
v.SetFloat(val)
return nil
case reflect.Struct:
if v.Type() == decimalType {
flt := strconv.FormatFloat(val, 'g', -1, 64)
dec, err := ParseDecimal(strings.Replace(flt, "e", "d", 1))
if err != nil {
return err
}
v.Set(reflect.ValueOf(*dec))
return nil
}
case reflect.Interface:
if v.NumMethod() == 0 {
v.Set(reflect.ValueOf(val))
return nil
}
}
return fmt.Errorf("ion: cannot decode float to %v", v.Type().String())
}
func (d *Decoder) decodeDecimalTo(v reflect.Value) error {
val, err := d.r.DecimalValue()
if err != nil {
return err
}
switch v.Kind() {
case reflect.Struct:
if v.Type() == decimalType {
v.Set(reflect.ValueOf(*val))
return nil
}
case reflect.Interface:
if v.NumMethod() == 0 {
v.Set(reflect.ValueOf(val))
return nil
}
}
return fmt.Errorf("ion: cannot decode decimal to %v", v.Type().String())
}
func (d *Decoder) decodeTimestampTo(v reflect.Value) error {
val, err := d.r.TimeValue()
if err != nil {
return err
}
switch v.Kind() {
case reflect.Struct:
if v.Type() == timeType {
v.Set(reflect.ValueOf(val))
return nil
}
case reflect.Interface:
if v.NumMethod() == 0 {
v.Set(reflect.ValueOf(val))
return nil
}
}
return fmt.Errorf("ion: cannot decode timestamp to %v", v.Type().String())
}
func (d *Decoder) decodeStringTo(v reflect.Value) error {
val, err := d.r.StringValue()
if err != nil {
return err
}
switch v.Kind() {
case reflect.String:
v.SetString(val)
return nil
case reflect.Interface:
if v.NumMethod() == 0 {
v.Set(reflect.ValueOf(val))
return nil
}
}
return fmt.Errorf("ion: cannot decode string to %v", v.Type().String())
}
func (d *Decoder) decodeLobTo(v reflect.Value) error {
val, err := d.r.ByteValue()
if err != nil {
return err
}
switch v.Kind() {
case reflect.Slice:
if v.Type().Elem().Kind() == reflect.Uint8 {
v.SetBytes(val)
return nil
}
case reflect.Array:
if v.Type().Elem().Kind() == reflect.Uint8 {
i := reflect.Copy(v, reflect.ValueOf(val))
for ; i < v.Len(); i++ {
v.Index(i).SetUint(0)
}
return nil
}
case reflect.Interface:
if v.NumMethod() == 0 {
v.Set(reflect.ValueOf(val))
return nil
}
}
return fmt.Errorf("ion: cannot decode lob to %v", v.Type().String())
}
func (d *Decoder) decodeStructTo(v reflect.Value) error {
switch v.Kind() {
case reflect.Struct:
return d.decodeStructToStruct(v)
case reflect.Map:
return d.decodeStructToMap(v)
case reflect.Interface:
if v.NumMethod() == 0 {
m, err := d.decodeMap()
if err != nil {
return err
}
v.Set(reflect.ValueOf(m))
return nil
}
}
return fmt.Errorf("ion: cannot decode struct to %v", v.Type().String())
}
func (d *Decoder) decodeStructToStruct(v reflect.Value) error {
fields := fieldsFor(v.Type())
if err := d.r.StepIn(); err != nil {
return err
}
for d.r.Next() {
name := d.r.FieldName()
field := findField(fields, *name)
if field != nil {
subv, err := findSubvalue(v, field)
if err != nil {
return err
}
if err := d.decodeTo(subv); err != nil {
return err
}
}
}
return d.r.StepOut()
}
func findField(fields []field, name string) *field {
var f *field
for i := range fields {
ff := &fields[i]
if ff.name == name {
return ff
}
if f == nil && strings.EqualFold(ff.name, name) {
f = ff
}
}
return f
}
func findSubvalue(v reflect.Value, f *field) (reflect.Value, error) {
for _, i := range f.path {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
if !v.CanSet() {
return reflect.Value{}, fmt.Errorf("ion: cannot set embedded pointer to unexported struct: %v", v.Type().Elem())
}
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
v = v.Field(i)
}
return v, nil
}
func (d *Decoder) decodeStructToMap(v reflect.Value) error {
t := v.Type()
switch t.Key().Kind() {
case reflect.String:
default:
return fmt.Errorf("ion: cannot decode struct to %v", t.String())
}
if v.IsNil() {
v.Set(reflect.MakeMap(t))
}
subv := reflect.New(t.Elem()).Elem()
if err := d.r.StepIn(); err != nil {
return err
}
for d.r.Next() {
name := d.r.FieldName()
if err := d.decodeTo(subv); err != nil {
return err
}
var kv reflect.Value
switch t.Key().Kind() {
case reflect.String:
kv = reflect.ValueOf(*name)
default:
panic("wat?")
}
if kv.IsValid() {
v.SetMapIndex(kv, subv)
}
}
return d.r.StepOut()
}
func (d *Decoder) decodeSliceTo(v reflect.Value) error {
k := v.Kind()
// If all we know is we need an interface{}, decode an []interface{} with
// types based on the Ion value stream.
if k == reflect.Interface && v.NumMethod() == 0 {
s, err := d.decodeSlice()
if err != nil {
return err
}
v.Set(reflect.ValueOf(s))
return nil
}
// Only other valid targets are arrays and slices.
if k != reflect.Array && k != reflect.Slice {
return fmt.Errorf("ion: cannot unmarshal slice to %v", v.Type().String())
}
if err := d.r.StepIn(); err != nil {
return err
}
i := 0
// Decode values into the array or slice.
for d.r.Next() {
if v.Kind() == reflect.Slice {
// If it's a slice, we can grow it as needed.
if i >= v.Cap() {
newcap := v.Cap() + v.Cap()/2
if newcap < 4 {
newcap = 4
}
newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
reflect.Copy(newv, v)
v.Set(newv)
}
if i >= v.Len() {
v.SetLen(i + 1)
}
}
if i < v.Len() {
if err := d.decodeTo(v.Index(i)); err != nil {
return err
}
}
i++
}
if err := d.r.StepOut(); err != nil {
return err
}
if i < v.Len() {
if v.Kind() == reflect.Array {
// Zero out any additional values.
z := reflect.Zero(v.Type().Elem())
for ; i < v.Len(); i++ {
v.Index(i).Set(z)
}
} else {
v.SetLen(i)
}
}
return nil
}
// Dig in through any pointers to find the actual underlying value that we want
// to set. If wantPtr is false, the algorithm terminates at a non-ptr value (e.g.,
// if passed an *int, it returns the int it points to, allocating such an int if the
// pointer is currently nil). If wantPtr is true, it terminates on a pointer to that
// value (allowing said pointer to be set to nil, generally).
func indirect(v reflect.Value, wantPtr bool) reflect.Value {
for {
if v.Kind() == reflect.Interface && !v.IsNil() {
e := v.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() && (!wantPtr || e.Elem().Kind() == reflect.Ptr) {
v = e
continue
}
}
if v.Kind() != reflect.Ptr {
break
}
if v.Elem().Kind() != reflect.Ptr && wantPtr && v.CanSet() {
break
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
return v
}
|
// Copyright 2022 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at //
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fifo
import "gvisor.dev/gvisor/pkg/tcpip/stack"
// packetBufferCircularList is a slice-backed circular list. All operations are
// O(1) unless otherwise noted. It only allocates once, during the call to
// init().
//
// Users should call init() before using packetBufferCircularList.
//
// +stateify savable
type packetBufferCircularList struct {
pbs []stack.PacketBufferPtr
head int
size int
}
// init initializes the list with the given size.
func (pl *packetBufferCircularList) init(size int) {
pl.pbs = make([]stack.PacketBufferPtr, size)
}
// length returns the number of elements in the list.
//
//go:nosplit
func (pl *packetBufferCircularList) length() int {
return pl.size
}
// hasSpace returns whether there is space left in the list.
//
//go:nosplit
func (pl *packetBufferCircularList) hasSpace() bool {
return pl.size < len(pl.pbs)
}
// isEmpty returns whether the list is empty.
//
//go:nosplit
func (pl *packetBufferCircularList) isEmpty() bool {
return pl.size == 0
}
// pushBack inserts the PacketBuffer at the end of the list.
//
// Users must check beforehand that there is space via a call to hasSpace().
// Failing to do so may clobber existing entries.
//
//go:nosplit
func (pl *packetBufferCircularList) pushBack(pb stack.PacketBufferPtr) {
next := (pl.head + pl.size) % len(pl.pbs)
pl.pbs[next] = pb
pl.size++
}
// removeFront returns the first element of the list or nil.
//
//go:nosplit
func (pl *packetBufferCircularList) removeFront() stack.PacketBufferPtr {
if pl.isEmpty() {
return nil
}
ret := pl.pbs[pl.head]
pl.pbs[pl.head] = nil
pl.head = (pl.head + 1) % len(pl.pbs)
pl.size--
return ret
}
// decRef decreases the reference count on each stack.PacketBuffer stored in
// the list.
//
// NOTE: runs in O(n) time.
//
//go:nosplit
func (pl *packetBufferCircularList) decRef() {
for i := 0; i < pl.size; i++ {
pl.pbs[(pl.head+i)%len(pl.pbs)].DecRef()
}
}
|
package main
import (
"fmt"
)
/*
Задача 4. Три числа: еще попытка
Напишите программу, которая запрашивает у пользователя три числа и выводит количество чисел, которые больше, либо равны 5.
*/
func main() {
var total, examScore int
cntNumber := 3
contolNumber := 5
arr := make([]int, cntNumber)
fmt.Println("Программа Три числа")
for i := 1; i <= cntNumber; i++ {
fmt.Printf("Введите %v число:\n", i)
fmt.Scan(&arr[i-1])
total += examScore
}
cnt := 0
for i, n := range arr {
fmt.Printf("i is %d berfore\n", i)
if n > contolNumber {
i += 1
fmt.Printf("Число %v, которое ввели %v, больше 5:\n", n, i)
cnt++
}
fmt.Printf("i is %d after\n", i)
}
if cnt == 1 {
fmt.Printf("Вы ввели %v число больше 5:\n", cnt)
} else if cnt > 1 && cnt < 5 {
fmt.Printf("Вы ввели %v чисела больше 5:\n", cnt)
} else if cnt > 4 {
fmt.Printf("Вы ввели %v чисел больше 5:\n", cnt)
} else {
fmt.Printf("Среде веденных чисел %v нет ниодного больше 5:\n", arr)
}
}
|
package wechat
import "time"
type Config struct {
AppID string `json:"appid"`
SecretKey string `json:"secret_key"`
Timeout int `json:"timeout"`
}
func (cfg *Config) TimeoutDuration() time.Duration {
timeout := cfg.Timeout
if timeout == 0 {
timeout = 30
}
return time.Duration(timeout) * time.Second
}
|
package http
import (
"log"
"strings"
)
// HTTP请求结构体,包含HTTP方法,版本,URI,HTTP头,内容长
type Request struct {
//http 请求方法
method http_method
//http 版本
version http_version
//原始方法
method_raw string
//原始版本
version_raw string
uri string
port int
headers *http_headers
content_length int
//数据包 body
body string
}
//初始化一个httprequest
func newRequest() *Request {
var req Request
req.content_length = 0
req.version = HTTP_VERSION_UNKNOWN
req.content_length = -1
req.headers = newHeaders()
return &req
}
//解析httprequest
func (req *Request) parse(con *Connection) {
buf := con.recv_buf
req.method_raw, buf = match_until(buf, " ")
log.Println("@application http: header parse method_raw:", req.method_raw)
if req.method_raw == "" {
con.status_code = 400
return
}
// 获得HTTP方法
req.method = get_method(req.method_raw)
log.Println("@application http: header parse method:", req.method)
if req.method == HTTP_METHOD_NOT_SUPPORTED {
con.set_status_code(501)
} else if req.method == HTTP_METHOD_UNKNOWN {
con.status_code = 400
}
// 获得URI
req.uri, buf = match_until(buf, " ")
log.Println("@application http: header parse uri:", req.uri)
if req.uri == "" {
con.status_code = 400
}
/*
* 判断访问的资源是否在服务器上
*
*/
// if (resolve_uri(con.real_path, serv.conf.doc_root, req.uri) == -1) {
// try_set_status(con, 404);
// }
// 如果版本为HTTP_VERSION_09立刻退出
if req.version == HTTP_VERSION_09 {
con.set_status_code(200)
req.version_raw = ""
return
}
// 获得HTTP版本
req.version_raw, buf = match_until(buf, "\r\n")
log.Println("@application http: header parse version_raw:", req.version_raw)
if req.version_raw == "" {
con.status_code = 400
return
}
// 支持HTTP/1.0或HTTP/1.1
if strings.EqualFold(req.version_raw, "HTTP/1.0") {
req.version = HTTP_VERSION_10
} else if strings.EqualFold(req.version_raw, "HTTP/1.1") {
req.version = HTTP_VERSION_11
} else {
con.set_status_code(400)
}
log.Println("@application http: header parse version:", req.version)
log.Println("@application http: header parse status_code:", con.status_code)
//if con.status_code > 0 {
// return
//}
// 解析HTTP请求头部
p := buf
key, value,tmp := "", "",""
for p != "" {
if key, tmp = match_until(p, ": ");key != "" {
p = tmp
}
if value, tmp = match_until(p, "\r\n");value != "" {
p = tmp
}
if key == "" || value == "" {
break
}
req.headers.http_headers_add(key, value)
}
//剩下到就是 body
req.body = p
}
//GetMethod d
func (req *Request) GetMethod() string {
return req.method_raw
}
//GetHeader header
func (req *Request) GetHeader(h string) string {
if _, exist := req.headers.ptr[h]; !exist {
return ""
}
return req.headers.ptr[h]
}
//GetBody get get|post data
func (req *Request) GetBody()string{
return req.body
}
|
package main
import "github.com/vferko/golang-brno/db"
type Todo struct {
Id int
Description string
IsDone bool
}
func GetAll() []Todo {
db := db.GetDB()
rows, err := db.Query("SELECT * FROM")
if err != nil {
panic(err)
}
defer rows.Close()
var todoList []Todo
for rows.Next() {
todo := Todo{} // same as `var todo TODO`
err := rows.Scan(&todo.Id, &todo.Description, &todo.IsDone)
if err != nil {
panic(err)
}
todoList = append(todoList, todo)
}
return todoList
}
func GetTodo() Todo {
return Todo{1, "Golang workshop", false}
}
func SaveTodo(todo Todo) {
db := db.GetDB()
_, err := db.Exec("INSERT INTO todo(description, is_done) VALUES($1,$2)")
if err != nil {
panic(err)
}
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package dlp
import (
"context"
"net/http"
"net/http/httptest"
"net/url"
"time"
"chromiumos/tast/common/fixture"
"chromiumos/tast/common/policy/fakedms"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/bundles/cros/dlp/clipboard"
"chromiumos/tast/local/bundles/cros/dlp/dragdrop"
"chromiumos/tast/local/bundles/cros/dlp/policy"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
"chromiumos/tast/local/chrome/display"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/webutil"
"chromiumos/tast/local/input"
"chromiumos/tast/local/policyutil"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: DataLeakPreventionRulesListDragdrop,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Test behavior of DataLeakPreventionRulesList policy with clipboard blocked restriction by drag and drop",
Contacts: []string{
"ayaelattar@google.com",
"chromeos-dlp@google.com",
},
SoftwareDeps: []string{"chrome"},
Attr: []string{"group:mainline", "informational"},
Data: []string{"text_1.html", "text_2.html", "editable_text_box.html"},
Params: []testing.Param{{
Fixture: fixture.ChromePolicyLoggedIn,
Val: browser.TypeAsh,
}, {
Name: "lacros",
ExtraSoftwareDeps: []string{"lacros"},
Fixture: fixture.LacrosPolicyLoggedIn,
Val: browser.TypeLacros,
}}})
}
func DataLeakPreventionRulesListDragdrop(ctx context.Context, s *testing.State) {
// Reserve time for various cleanup.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
cr := s.FixtValue().(chrome.HasChrome).Chrome()
fakeDMS := s.FixtValue().(fakedms.HasFakeDMS).FakeDMS()
allowedServer := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer allowedServer.Close()
blockedServer := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer blockedServer.Close()
dstServer := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer dstServer.Close()
if err := policyutil.ServeAndVerify(ctx, fakeDMS, cr, policy.PopulateDLPPolicyForClipboard(blockedServer.URL, dstServer.URL)); err != nil {
s.Fatal("Failed to serve and verify the DLP policy: ", err)
}
// Connect to Test API.
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to connect to test API: ", err)
}
// Sets the display zoom factor to minimum, to ensure that the work area
// length is at least twice the minimum length of a browser window, so that
// browser windows can be snapped in split view.
info, err := display.GetPrimaryInfo(ctx, tconn)
if err != nil {
s.Fatal("Failed to get the primary display info: ", err)
}
zoomInitial := info.DisplayZoomFactor
zoomMin := info.AvailableDisplayZoomFactors[0]
if err := display.SetDisplayProperties(ctx, tconn, info.ID, display.DisplayProperties{DisplayZoomFactor: &zoomMin}); err != nil {
s.Fatalf("Failed to set display zoom factor to minimum %f: %v", zoomMin, err)
}
defer display.SetDisplayProperties(cleanupCtx, tconn, info.ID, display.DisplayProperties{DisplayZoomFactor: &zoomInitial})
keyboard, err := input.VirtualKeyboard(ctx)
if err != nil {
s.Fatal("Failed to get keyboard: ", err)
}
defer keyboard.Close()
defer ash.SetOverviewModeAndWait(cleanupCtx, tconn, false)
for _, param := range []struct {
name string
wantAllowed bool
srcURL string
content string
}{
{
name: "dropBlocked",
wantAllowed: false,
srcURL: blockedServer.URL + "/text_1.html",
content: "Sample text about random things.",
},
{
name: "dropAllowed",
wantAllowed: true,
srcURL: allowedServer.URL + "/text_2.html",
content: "Here is a random piece of text for testing things.",
},
} {
s.Run(ctx, param.name, func(ctx context.Context, s *testing.State) {
if err := cr.ResetState(ctx); err != nil {
s.Fatal("Failed to reset the Chrome: ", err)
}
br, closeBr, err := browserfixt.SetUp(ctx, cr, s.Param().(browser.Type))
if err != nil {
s.Fatal("Failed to open the destination browser: ", err)
}
defer closeBr(cleanupCtx)
dstURL := dstServer.URL + "/editable_text_box.html"
dstConn, err := br.NewConn(ctx, dstURL)
if err != nil {
s.Fatalf("Failed to open page %q: %v", dstURL, err)
}
defer dstConn.Close()
if err := webutil.WaitForQuiescence(ctx, dstConn, 10*time.Second); err != nil {
s.Fatalf("Failed to wait for %q to achieve quiescence: %v", dstURL, err)
}
srcConn, err := br.NewConn(ctx, param.srcURL, browser.WithNewWindow())
if err != nil {
s.Fatalf("Failed to open page %q: %v", param.srcURL, err)
}
defer srcConn.Close()
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "ui_tree_"+param.name)
if err := webutil.WaitForQuiescence(ctx, srcConn, 10*time.Second); err != nil {
s.Fatalf("Failed to wait for %q to achieve quiescence: %v", param.srcURL, err)
}
if err := ash.SetOverviewModeAndWait(ctx, tconn, true); err != nil {
s.Fatal("Failed to enter into the overview mode: ", err)
}
// Snap the param.srcURL window to the right.
w1, err := ash.FindFirstWindowInOverview(ctx, tconn)
if err != nil {
s.Fatalf("Failed to find the %s window in the overview mode: %s", param.srcURL, err)
}
if err := ash.SetWindowStateAndWait(ctx, tconn, w1.ID, ash.WindowStateRightSnapped); err != nil {
s.Fatalf("Failed to snap the %s window to the right: %s", param.srcURL, err)
}
// Snap the destination window to the left.
w2, err := ash.FindFirstWindowInOverview(ctx, tconn)
if err != nil {
s.Fatalf("Failed to find the %s window in the overview mode: %s", dstURL, err)
}
if err := ash.SetWindowStateAndWait(ctx, tconn, w2.ID, ash.WindowStateLeftSnapped); err != nil {
s.Fatalf("Failed to snap the %s window to the left: %s", dstURL, err)
}
// Activate the drag source (param.srcURL) window.
if err := w1.ActivateWindow(ctx, tconn); err != nil {
s.Fatalf("Failed to activate the %s window: %s", param.srcURL, err)
}
if err = keyboard.Accel(ctx, "Ctrl+A"); err != nil {
s.Fatal("Failed to press Ctrl+A to select all content: ", err)
}
s.Log("Draging and dropping content")
if err := dragdrop.DragDrop(ctx, tconn, param.content); err != nil {
s.Error("Failed to drag drop content: ", err)
}
s.Log("Checking notification")
ui := uiauto.New(tconn)
// Verify notification bubble.
parsedSrcURL, err := url.Parse(blockedServer.URL)
if err != nil {
s.Fatalf("Failed to parse blocked server URL %s: %s", blockedServer.URL, err)
}
notifError := clipboard.CheckClipboardBubble(ctx, ui, parsedSrcURL.Hostname())
if !param.wantAllowed && notifError != nil {
s.Error("Expected notification but found an error: ", notifError)
}
if param.wantAllowed && notifError == nil {
s.Error("Didn't expect notification but one was found: ")
}
// Check dropped content.
dropError := dragdrop.CheckDraggedContent(ctx, ui, param.content)
if param.wantAllowed && dropError != nil {
s.Error("Checked pasted content but found an error: ", dropError)
}
if !param.wantAllowed && dropError == nil {
s.Error("Content was pasted but should have been blocked")
}
})
}
}
|
package solutions
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
type ListNode struct {
Val int
Next *ListNode
}
func addTwoNumbers(l1 *ListNode, l2 *ListNode) *ListNode {
l1Next := l1
l2Next := l2
head := &ListNode{}
originalHead := head
var sumOver bool
var sum int
for {
tail := &ListNode{}
sum = 0
if l1Next != nil {
sum = sum + l1Next.Val
}
if l2Next != nil {
sum = sum + l2Next.Val
}
if sumOver {
sum = sum + 1
}
if sum >= 10 {
tail.Val = sum - 10
sumOver = true
} else {
tail.Val = sum
sumOver = false
}
head.Next = tail
head = tail
if l1Next != nil {
l1Next = l1Next.Next
}
if l2Next != nil {
l2Next = l2Next.Next
}
if l1Next == nil && l2Next == nil && !sumOver {
break
}
}
return originalHead.Next
}
|
// Copyright (c) 2018-present, MultiVAC Foundation.
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
package wire
import (
"encoding/gob"
"io"
"github.com/multivactech/MultiVAC/model/chaincfg/multivacaddress"
"github.com/multivactech/MultiVAC/model/shard"
)
// MsgFetchDeposit is a message to request deposit info from storage node.
type MsgFetchDeposit struct {
ShardIndex shard.Index
Address multivacaddress.Address
}
// BtcDecode decode the message.
func (msg *MsgFetchDeposit) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error {
decoder := gob.NewDecoder(r)
return decoder.Decode(msg)
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
func (msg *MsgFetchDeposit) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error {
encoder := gob.NewEncoder(w)
return encoder.Encode(*msg)
}
// Command returns the protocol command string for the message.
func (msg *MsgFetchDeposit) Command() string {
return CmdFetchDeposit
}
// MaxPayloadLength returns the maximum length the payload can be for the
// receiver.
func (msg *MsgFetchDeposit) MaxPayloadLength(pver uint32) uint32 {
// 10k. In theory this message is very small.
return 10240
}
// GetShardIndex returns the shardIndex.
func (msg *MsgFetchDeposit) GetShardIndex() shard.Index {
return msg.ShardIndex
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package login
import (
"context"
"time"
"unicode/utf8"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/lockscreen"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
)
// Duration of inactivity after which the password input field should be cleared.
// margin of error.
const clearTimeout = 30 * time.Second
func init() {
testing.AddTest(&testing.Test{
Func: ClearPasswordAfterInactivity,
Desc: "Check that that the password input field on the signin screen is cleared after inactivity",
Contacts: []string{
"mbid@google.com",
"cros-lurs@google.com",
"chromeos-sw-engprod@google.com",
},
SoftwareDeps: []string{"chrome"},
Attr: []string{"group:mainline", "informational"},
VarDeps: []string{"ui.signinProfileTestExtensionManifestKey"},
Timeout: 2*chrome.LoginTimeout + clearTimeout + time.Minute,
LacrosStatus: testing.LacrosVariantUnneeded,
})
}
func ClearPasswordAfterInactivity(ctx context.Context, s *testing.State) {
cleanUpCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 20*time.Second)
defer cancel()
// Setup: Create user and save creds.
creds := func(ctx context.Context) chrome.Creds {
cr, err := chrome.New(ctx)
if err != nil {
s.Fatal("Chrome login failed: ", err)
}
defer cr.Close(ctx)
return cr.Creds()
}(ctx)
// chrome.NoLogin() and chrome.KeepState() are needed to show the login screen with a user pod
// (instead of the OOBE login screen).
cr, err := chrome.New(ctx,
chrome.ExtraArgs("--skip-force-online-signin-for-testing"),
chrome.NoLogin(),
chrome.KeepState(),
chrome.LoadSigninProfileExtension(s.RequiredVar("ui.signinProfileTestExtensionManifestKey")),
)
if err != nil {
s.Fatal("Failed to start Chrome: ", err)
}
defer cr.Close(cleanUpCtx)
tconn, err := cr.SigninProfileTestAPIConn(ctx)
if err != nil {
s.Fatal("Creating login test API connection failed: ", err)
}
defer faillog.DumpUITreeOnError(cleanUpCtx, s.OutDir(), s.HasError, tconn)
// Wait for the login screen to be ready for password entry.
readyForPassword := func(st lockscreen.State) bool { return st.ReadyForPassword }
if _, err := lockscreen.WaitState(ctx, tconn, readyForPassword, 30*time.Second); err != nil {
s.Fatal("Failed to wait for login screen: ", err)
}
kb, err := input.Keyboard(ctx)
if err != nil {
s.Fatal("Failed to get keyboard: ", err)
}
defer kb.Close()
const partialPassword = "abcd"
if err := lockscreen.TypePassword(ctx, tconn, creds.User, partialPassword, kb); err != nil {
s.Fatal("Failed to type password: ", err)
}
passwordValue, err := readPasswordValue(ctx, tconn, creds.User)
if err != nil {
s.Fatal("Failed to read entered password field: ", err)
}
// We have to use RuneCount instead of len because password is concealed and all characters are
// replaced by the bullet character, which is more than one byte in utf8. We can use `len` for
// `partialPassword` because it's ASCII.
if utf8.RuneCountInString(passwordValue) != len(partialPassword) {
s.Fatal("Failed to verify value of password field: ", passwordValue)
}
// Wait until password is cleared. We allow some margin of error.
const clearTimeoutErrorMargin = 3 * time.Second
if err := testing.Sleep(ctx, clearTimeout+clearTimeoutErrorMargin); err != nil {
s.Fatal("Failed to sleep: ", err)
}
passwordValue, err = readPasswordValue(ctx, tconn, creds.User)
if err != nil {
s.Fatal("Failed to read entered password field: ", err)
}
if passwordValue != "" {
s.Fatal("Password field not cleared after inactivity: ", passwordValue)
}
}
// readPasswordValue finds the password input field on the lockscreen and reads its value.
func readPasswordValue(ctx context.Context, tconn *chrome.TestConn, user string) (string, error) {
inputInfo, err := lockscreen.UserPassword(ctx, tconn, user, false /* UsePIN */)
if err != nil {
return "", errors.Wrap(err, "failed to find password field")
}
return inputInfo.Value, nil
}
|
package auth
import (
"../models"
"../repos/session"
"../repos/user"
)
// Auth interface for something that make basic auth functions
type Auth interface {
Init(ur user.Repo, sr session.Repo) error
Login(login string, pass string) (*models.Session, error)
Logout(id string) error
CheckSession(id string) (*models.Session, error)
NewUser(*models.User) (*models.Session, error)
DeleteUser(login string) error
}
|
package main
import (
"github.com/cloudfoundry/buildpacks-ci/tasks/cnb/helpers"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
)
func UpdateOrders(orders []helpers.Order, dep helpers.Dependency) []helpers.Order {
for i, order := range orders {
for j, group := range order.Group {
if group.ID == dep.ID {
orders[i].Group[j].Version = dep.Version
}
}
}
return orders
}
func UpdateDependenciesWith(buildpackTOML helpers.BuildpackTOML, dep helpers.Dependency, newDeps Dependencies, versionsToKeep int) (Dependencies, Dependencies, error) {
var deps Dependencies
err := mapstructure.Decode(buildpackTOML.Metadata[helpers.DependenciesKey], &deps)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to decode dependencies")
}
updatedDeps, err := deps.Update(dep, newDeps, flags.versionLine, versionsToKeep)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to add new dependencies to the dependencies list")
}
buildpackTOML.Metadata[helpers.DependenciesKey] = updatedDeps
return deps, updatedDeps, nil
}
|
package main
import (
"math"
"math/rand"
)
//给定圆的半径和圆心的 x、y 坐标,写一个在圆中产生均匀随机点的函数 randPoint 。
//
//说明:
//
//输入值和输出值都将是浮点数。
//圆的半径和圆心的 x、y 坐标将作为参数传递给类的构造函数。
//圆周上的点也认为是在圆中。
//randPoint 返回一个包含随机点的x坐标和y坐标的大小为2的数组。
//示例 1:
//
//输入:
//["Solution","randPoint","randPoint","randPoint"]
//[[1,0,0],[],[],[]]
//输出: [null,[-0.72939,-0.65505],[-0.78502,-0.28626],[-0.83119,-0.19803]]
//示例 2:
//
//输入:
//["Solution","randPoint","randPoint","randPoint"]
//[[10,5,-7.5],[],[],[]]
//输出: [null,[11.52438,-8.33273],[2.46992,-16.21705],[11.13430,-12.42337]]
//输入语法说明:
//
//输入是两个列表:调用成员函数名和调用的参数。Solution 的构造函数有三个参数,圆的半径、圆心的 x 坐标、圆心的 y 坐标。randPoint 没有参数。输入参数是一个列表,即使参数为空,也会输入一个 [] 空列表。
type Solution struct {
cx, cy float64
r float64
}
func Constructor(radius float64, x_center float64, y_center float64) Solution {
return Solution{r: radius, cx: x_center, cy: y_center}
}
func (this *Solution) RandPoint() []float64 {
x := getRand(-this.r, this.r)
y := getRand(-this.r, this.r)
if math.Sqrt(x*x+y*y) > this.r {
return this.RandPoint()
}
return []float64{this.cx + x, this.cy + y}
}
func getRand(min, max float64) float64 {
return min + rand.Float64()*(max-min)
}
/**
* Your Solution object will be instantiated and called as such:
* obj := Constructor(radius, x_center, y_center);
* param_1 := obj.RandPoint();
*/
|
// Copyright 2021 Comcast Cable Communications Management, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package event_test
import (
"context"
"encoding/json"
"errors"
"github.com/xmidt-org/ears/internal/pkg/ack"
"github.com/xmidt-org/ears/pkg/event"
"io/ioutil"
"reflect"
"testing"
"time"
)
func TestEventBasic(t *testing.T) {
ctx := context.Background()
payload := map[string]interface{}{
"field1": "abcd",
"field2": 1234,
}
e, err := event.New(ctx, payload)
if err != nil {
t.Errorf("Fail to create new event %s\n", err.Error())
}
if e.Context() == nil {
t.Errorf("Fail to get context")
}
if !reflect.DeepEqual(e.Payload(), payload) {
t.Errorf("Fail to match payload +%v +%v\n", e.Payload(), payload)
}
ctx2, cancel := context.WithCancel(ctx)
defer cancel()
payload2 := map[string]interface{}{
"field3": "efgh",
"field4": 5678,
}
e.SetContext(ctx2)
e.SetPayload(payload2)
if e.Context() != ctx2 {
t.Errorf("Fail to get context2")
}
if !reflect.DeepEqual(e.Payload(), payload2) {
t.Errorf("Fail to match payload +%v +%v\n", e.Payload(), payload2)
}
}
func TestEventGetPath(t *testing.T) {
ctx := context.Background()
payload := map[string]interface{}{
"field1": "abcd",
"field2": 1234,
"field3": []interface{}{"a", "b", "c"},
"field4": []interface{}{map[string]interface{}{"a": "aa", "b": "bb", "c": "cc"}},
"field5": []interface{}{[]interface{}{"a", "b", "c"}},
"field10": map[string]interface{}{"a": map[string]interface{}{"key": "value", "foo": "bar"}, "b": "bb", "c": "cc"},
"field11": map[string]interface{}{"a": map[string]interface{}{"key.with.dots": "value", "foo": "bar"}, "b": "bb", "c": "cc"},
}
e, err := event.New(ctx, payload)
if err != nil {
t.Errorf("Fail to create new event %s\n", err.Error())
}
if e.Context() == nil {
t.Errorf("Fail to get context")
}
if !reflect.DeepEqual(e.Payload(), payload) {
t.Errorf("Fail to match payload +%v +%v\n", e.Payload(), payload)
}
//
path := ".field1"
v, p, k := e.GetPathValue(path)
if v.(string) != "abcd" {
t.Errorf("bad path value %s\n", path)
}
if k != "field1" {
t.Errorf("bad path key %s\n", path)
}
if !reflect.DeepEqual(p, e.Payload()) {
t.Errorf("bad path parent %s\n", path)
}
//
path = ".field3[1]"
v, p, k = e.GetPathValue(path)
if v.(string) != "b" {
t.Errorf("bad path value %s\n", path)
}
if k != "field3" {
t.Errorf("bad path key %s\n", path)
}
if !reflect.DeepEqual(p, e.Payload()) {
t.Errorf("bad path parent %s\n", path)
}
//
path = ".field4[a=aa].b"
v, p, k = e.GetPathValue(path)
if v.(string) != "bb" {
t.Errorf("bad path value %s\n", path)
}
if k != "b" {
t.Errorf("bad path key %s\n", path)
}
expected := e.Payload().(map[string]interface{})["field4"].([]interface{})[0]
if !reflect.DeepEqual(p, expected) {
t.Errorf("bad path parent %s\n", path)
}
//
path = ".field5[0].[0]"
v, _, _ = e.GetPathValue(path)
if v.(string) != "a" {
t.Errorf("bad path value %s\n", path)
}
//
e.SetPathValue(".field6", "foo", true)
path = ".field6"
v, _, _ = e.GetPathValue(path)
if v.(string) != "foo" {
t.Errorf("bad path value %s\n", path)
}
//
e.SetPathValue(".field3[1]", "baz", false)
path = ".field3[1]"
v, _, _ = e.GetPathValue(path)
if v.(string) != "baz" {
t.Errorf("bad path value %s\n", path)
}
//
e.SetPathValue(".field7[0]", "x", true)
path = ".field7[0]"
v, _, _ = e.GetPathValue(path)
if v.(string) != "x" {
t.Errorf("bad path value %s\n", path)
}
//
path = ".field10.a.key"
v, _, _ = e.GetPathValue(path)
if v.(string) != "value" {
t.Errorf("bad path value %s\n", path)
}
//
path = `.field11.a.key\.with\.dots`
v, _, _ = e.GetPathValue(path)
if v.(string) != "value" {
t.Errorf("bad path value %s\n", path)
}
}
func BenchmarkCloneEvent(b *testing.B) {
ctx := context.Background()
buf, err := ioutil.ReadFile("event.json")
if err != nil {
b.Fatal(err)
}
b.Log("event size", len(buf), "test size", b.N)
var payload map[string]interface{}
err = json.Unmarshal(buf, &payload)
if err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
e1, err := event.New(ctx, payload)
if err != nil {
b.Errorf("failed to create new event %s\n", err.Error())
}
e2, err := e1.Clone(ctx)
if err != nil {
b.Errorf("failed to clone new event %s\n", err.Error())
}
e2.DeepCopy()
e2.Ack()
e1.Ack()
}
}
func TestCloneEvent(t *testing.T) {
ctx := context.Background()
payload := map[string]interface{}{
"field1": "abcd",
"field2": 1234,
"field3": map[string]interface{}{
"field4": 1.02,
},
}
e1, err := event.New(ctx, payload)
if err != nil {
t.Errorf("Fail to create new event %s\n", err.Error())
}
e2, err := e1.Clone(ctx)
if err != nil {
t.Errorf("Fail to clone new event %s\n", err.Error())
}
e2.DeepCopy()
payload2, ok := e2.Payload().(map[string]interface{})
if !ok {
t.Error("Fail to cast payload to expected type")
}
if !reflect.DeepEqual(payload, payload2) {
t.Errorf("payloads do not match +%v\n", payload2)
}
//validate that deep copy worked and that updating a payload does not
//affect the other payload
payload2["field1"] = "efgh"
if payload["field1"] != "abcd" {
t.Errorf("unexpected field1 value in payload +%v\n", payload["field1"])
}
payload["field2"] = 5678
if payload2["field2"] != 1234 {
t.Errorf("unexpected field2 value in payload2 +%v\n", payload2["field2"])
}
p1field3, ok := payload["field3"].(map[string]interface{})
if !ok {
t.Error("Fail to cast payload field3 to expected type")
}
p2field3, ok := payload2["field3"].(map[string]interface{})
if !ok {
t.Error("Fail to cast payload2 field3 to expected type")
}
p1field3["field4"] = 5.67
if p2field3["field4"] != 1.02 {
t.Errorf("unexpected field4 value in payload2 +%v\n", p2field3["field4"])
}
}
func TestEventAck(t *testing.T) {
ctx := context.Background()
payload := map[string]interface{}{
"field1": "abcd",
"field2": 1234,
"field3": map[string]interface{}{
"field4": 1.02,
},
}
done := make(chan bool)
e1, err := event.New(ctx, payload, event.WithAck(
func(evt event.Event) {
if !reflect.DeepEqual(payload, evt.Payload()) {
t.Errorf("Event payload does not match")
}
done <- true
},
func(evt event.Event, err error) {
t.Errorf("Fail to receive all acknowledgements %s\n", err.Error())
done <- true
}))
if err != nil {
t.Errorf("Fail to create new event %s\n", err.Error())
}
e2, err := e1.Clone(ctx)
if err != nil {
t.Errorf("Fail to clone event %s\n", err.Error())
}
e3, err := e1.Clone(ctx)
if err != nil {
t.Errorf("Fail to clone event %s\n", err.Error())
}
e4, err := e2.Clone(ctx)
if err != nil {
t.Errorf("Fail to clone event %s\n", err.Error())
}
e5, err := e4.Clone(ctx)
if err != nil {
t.Errorf("Fail to clone event %s\n", err.Error())
}
e6, err := e4.Clone(ctx)
if err != nil {
t.Errorf("Fail to clone event %s\n", err.Error())
}
e7, err := e3.Clone(ctx)
if err != nil {
t.Errorf("Fail to clone event %s\n", err.Error())
}
e1.Ack()
e2.Ack()
e3.Ack()
e4.Ack()
e5.Ack()
e6.Ack()
e7.Ack()
<-done
//make sure we cannot do anything to events that are already acked
var ackedErr *ack.AlreadyAckedError
err = e1.SetPayload("blah")
if err == nil || !errors.As(err, &ackedErr) {
t.Errorf("Expect AlreadyAckedError but get +%v\n", err)
}
err = e5.SetContext(ctx)
if err == nil || !errors.As(err, &ackedErr) {
t.Errorf("Expect AlreadyAckedError but get +%v\n", err)
}
_, err = e7.Clone(ctx)
if err == nil || !errors.As(err, &ackedErr) {
t.Errorf("Expect AlreadyAckedError but get +%v\n", err)
}
}
func TestEventNack(t *testing.T) {
ctx := context.Background()
payload := map[string]interface{}{
"field1": "abcd",
"field2": 1234,
"field3": map[string]interface{}{
"field4": 1.02,
},
}
done := make(chan bool)
e1, err := event.New(ctx, payload, event.WithAck(
func(evt event.Event) {
t.Errorf("Expect error function to be called")
done <- true
},
func(evt event.Event, err error) {
if !reflect.DeepEqual(payload, evt.Payload()) {
t.Errorf("Event payload does not match")
}
var nackErr *ack.NackError
if !errors.As(err, &nackErr) {
t.Errorf("Expect nackError but get +%v\n", err)
}
done <- true
}))
if err != nil {
t.Errorf("Fail to create a event %s\n", err.Error())
}
e2, err := e1.Clone(ctx)
if err != nil {
t.Errorf("Fail to clone event %s\n", err.Error())
}
e3, err := e1.Clone(ctx)
if err != nil {
t.Errorf("Fail to clone event %s\n", err.Error())
}
e2.Nack(errors.New("error"))
e3.Ack()
<-done
}
func TestEventTimeout(t *testing.T) {
ctx := context.Background()
payload := map[string]interface{}{
"field1": "abcd",
"field2": 1234,
"field3": map[string]interface{}{
"field4": 1.02,
},
}
ctx, cancel := context.WithTimeout(ctx, time.Millisecond*500)
defer cancel()
done := make(chan bool)
e1, err := event.New(ctx, payload, event.WithAck(
func(evt event.Event) {
t.Errorf("Expect error function to be called")
done <- true
},
func(evt event.Event, err error) {
if !reflect.DeepEqual(payload, evt.Payload()) {
t.Errorf("Event payload does not match")
}
var toErr *ack.TimeoutError
if !errors.As(err, &toErr) {
t.Errorf("Expect toErr but get +%v\n", err)
}
done <- true
}))
if err != nil {
t.Errorf("Fail to create a event %s\n", err.Error())
}
_, err = e1.Clone(ctx)
if err != nil {
t.Errorf("Fail to clone event %s\n", err.Error())
}
<-done
}
|
package service
import (
"bytes"
"encoding/json"
"fmt"
login "github.com/carprks/login/service"
permissions "github.com/carprks/permissions/service"
"io/ioutil"
"net/http"
"os"
"time"
)
// LoginObject ...
type LoginObject struct {
Identifier string `json:"identifier"`
Permissions []permissions.Permission `json:"permissions"`
}
// LoginHandler ...
func LoginHandler(body string) (string, error) {
r := login.LoginRequest{}
err := json.Unmarshal([]byte(body), &r)
if err != nil {
fmt.Println(fmt.Sprintf("can't unmarshall login: %v, %v", err, body))
return "", fmt.Errorf("can't unmarshall login: %w", err)
}
rf, err := Login(r)
if err != nil {
fmt.Println(fmt.Sprintf("can't get login: %v, %v", err, r))
return "", fmt.Errorf("can't get login: %w", err)
}
rfb, err := json.Marshal(rf)
if err != nil {
fmt.Println(fmt.Sprintf("can't marshall login: %v, %v", err, rf))
return "", fmt.Errorf("can't marshall login: %w", err)
}
return string(rfb), nil
}
// Login ...
func Login(l login.LoginRequest) (LoginObject, error) {
lo, err := LoginUser(l)
if err != nil {
fmt.Println(fmt.Sprintf("can't get login for user: %v, %v", err, l))
return LoginObject{}, fmt.Errorf("can't get login for user: %w", err)
}
resp, err := LoginPermissions(lo)
if err != nil {
fmt.Println(fmt.Sprintf("can't get permissions for user: %v, %v", err, lo))
return LoginObject{}, fmt.Errorf("can't get permissions for user: %w", err)
}
return LoginObject{
Identifier: lo.Identifier,
Permissions: resp,
}, nil
}
// LoginUser ...
func LoginUser(l login.LoginRequest) (login.Login, error) {
lr := login.Login{}
j, err := json.Marshal(&l)
if err != nil {
fmt.Println(fmt.Sprintf("can't unmarshall login: %v, %v", err, l))
return lr, fmt.Errorf("an't unmarshall login: %w", err)
}
req, err := http.NewRequest("POST", fmt.Sprintf("%s/login", os.Getenv("SERVICE_LOGIN")), bytes.NewBuffer(j))
if err != nil {
fmt.Println(fmt.Sprintf("req err: %v", err))
return lr, fmt.Errorf("login user req err: %w", err)
}
req.Header.Set("X-Authorization", os.Getenv("AUTH_LOGIN"))
req.Header.Set("Content-Type", "application/json")
client := &http.Client{
Timeout: time.Second * 10,
Transport: &http.Transport{
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
IdleConnTimeout: 2 * time.Minute,
},
}
resp, err := client.Do(req)
if err != nil {
fmt.Println(fmt.Sprintf("login client err: %v", err))
return lr, fmt.Errorf("login client err: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(fmt.Sprintf("login resp err: %v", err))
return lr, fmt.Errorf("login resp err: %w", err)
}
err = json.Unmarshal(body, &lr)
if err != nil {
fmt.Println(fmt.Sprintf("can't unmarshal login service: %v, %v", err, string(body)))
return lr, fmt.Errorf("can't unmarshal login service: %w", err)
}
if lr.Error != "" {
return lr, fmt.Errorf("login response err: %v", lr.Error)
}
return lr, nil
}
return lr, fmt.Errorf("login came back with a different statuscode: %v", resp.StatusCode)
}
// LoginPermissions ...
func LoginPermissions(l login.Login) ([]permissions.Permission, error) {
p := permissions.Permissions{
Identifier: l.Identifier,
}
j, err := json.Marshal(&p)
if err != nil {
return []permissions.Permission{}, err
}
req, err := http.NewRequest("POST", fmt.Sprintf("%s/retrieve", os.Getenv("SERVICE_PERMISSIONS")), bytes.NewBuffer(j))
if err != nil {
fmt.Println(fmt.Sprintf("req err: %v", err))
return p.Permissions, fmt.Errorf("permissions req err: %w", err)
}
req.Header.Set("X-Authorization", os.Getenv("AUTH_PERMISSIONS"))
req.Header.Set("Content-Type", "application/json")
client := &http.Client{
Timeout: time.Second * 10,
Transport: &http.Transport{
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
IdleConnTimeout: 2 * time.Minute,
},
}
resp, err := client.Do(req)
if err != nil {
fmt.Println(fmt.Sprintf("permissions client err: %v", err))
return p.Permissions, fmt.Errorf("permissions client err: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(fmt.Sprintf("permissions resp err: %v", err))
return p.Permissions, fmt.Errorf("permissions resp err: %w", err)
}
err = json.Unmarshal(body, &p)
if err != nil {
fmt.Println(fmt.Sprintf("can't unmarshall permissions body: %v, %v", err, string(body)))
return p.Permissions, fmt.Errorf("can't unmarshall permissions body: %w", err)
}
if p.Status != "" {
return p.Permissions, fmt.Errorf("permissions status err: %v", p.Status)
}
return p.Permissions, nil
}
return p.Permissions, fmt.Errorf("permissions came back with different statuscode: %v", resp.StatusCode)
}
|
package remote
import (
"fmt"
"strings"
)
// TagImpl TagImpl
type TagImpl struct {
Value string `json:"value"`
}
// GetName Name
func (s *TagImpl) GetName() (ret string) {
items := strings.Split(s.Value, " ")
ret = items[0]
return
}
// IsPrimaryKey IsPrimaryKey
func (s *TagImpl) IsPrimaryKey() (ret bool) {
items := strings.Split(s.Value, " ")
if len(items) <= 1 {
return false
}
isPrimaryKey := false
if len(items) >= 2 {
switch items[1] {
case "key":
isPrimaryKey = true
}
}
if len(items) >= 3 {
switch items[2] {
case "key":
isPrimaryKey = true
}
}
ret = isPrimaryKey
return
}
// IsAutoIncrement IsAutoIncrement
func (s *TagImpl) IsAutoIncrement() (ret bool) {
items := strings.Split(s.Value, " ")
if len(items) <= 1 {
return false
}
isAutoIncrement := false
if len(items) >= 2 {
switch items[1] {
case "auto":
isAutoIncrement = true
}
}
if len(items) >= 3 {
switch items[2] {
case "auto":
isAutoIncrement = true
}
}
ret = isAutoIncrement
return
}
// copy copy
func (s *TagImpl) copy() (ret *TagImpl) {
ret = &TagImpl{Value: s.Value}
return
}
func (s *TagImpl) dump() (ret string) {
return fmt.Sprintf("name=%s key=%v auto=%v", s.GetName(), s.IsPrimaryKey(), s.IsAutoIncrement())
}
// newTag new tag
func newTag(tag string) (ret *TagImpl, err error) {
items := strings.Split(tag, "")
if len(items) < 1 {
err = fmt.Errorf("illegal tag value, val:%s", tag)
return
}
ret = &TagImpl{Value: tag}
return
}
func compareTag(l, r *TagImpl) bool {
return l.Value == r.Value
}
|
/*++
Copyright (C) 2018 Autodesk Inc. (Original Author)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--*/
//////////////////////////////////////////////////////////////////////////////////////////////////////
// buildbindingcpp.go
// functions to generate C++-bindings of a library's API in form of automatically implemented C++-
// wrapper classes.
//////////////////////////////////////////////////////////////////////////////////////////////////////
package main
import (
"fmt"
"log"
"path"
"strings"
"path/filepath"
)
// BuildBindingCPP builds C++-bindings of a library's API in form of automatically implemented C++-
// wrapper classes.
func BuildBindingCPP(component ComponentDefinition, outputFolder string, outputFolderExample string, indentString string) error {
namespace := component.NameSpace;
libraryname := component.LibraryName;
baseName := component.BaseName;
forceRecreation := true
CppHeaderName := path.Join(outputFolder, baseName+".hpp");
log.Printf("Creating \"%s\"", CppHeaderName)
hppfile, err :=CreateLanguageFile(CppHeaderName, indentString)
if err != nil {
return err
}
CppImplName := path.Join(outputFolder, baseName+".cpp");
log.Printf("Creating \"%s\"", CppImplName)
cppfile, err :=CreateLanguageFile(CppImplName, indentString)
if err != nil {
return err
}
WriteLicenseHeader(hppfile.Writer, component,
fmt.Sprintf("This is an autogenerated C++ Header file in order to allow an easy use\n of %s", libraryname),
true)
WriteLicenseHeader(cppfile.Writer, component,
fmt.Sprintf("This is an autogenerated C++ Wrapper Implementation file in order to allow \nan easy use of %s", libraryname),
true)
err = buildCPPHeaderAndImplementation(component, hppfile, cppfile, namespace, baseName)
if err != nil {
return err
}
if (len(outputFolderExample) > 0) {
CPPExample := path.Join(outputFolderExample, namespace+"_example"+".cpp");
if (forceRecreation || !FileExists(CPPExample)) {
log.Printf("Creating \"%s\"", CPPExample)
cppexamplefile, err := CreateLanguageFile (CPPExample, " ")
if err != nil {
return err;
}
cppexamplefile.WriteCLicenseHeader(component,
fmt.Sprintf("This is an autogenerated C++ application that demonstrates the\n usage of the C++ bindings of %s", libraryname),
true)
buildCppExample(component, cppexamplefile, outputFolder)
} else {
log.Printf("Omitting recreation of C++Dynamic example file \"%s\"", CPPExample)
}
CPPCMake := path.Join(outputFolderExample, "CMakeLists.txt");
if (forceRecreation || !FileExists(CPPCMake)) {
log.Printf("Creating \"%s\"", CPPCMake)
cppcmake, err := CreateLanguageFile (CPPCMake, " ")
if err != nil {
return err;
}
cppcmake.WriteCMakeLicenseHeader(component,
fmt.Sprintf("This is an autogenerated CMake Project that demonstrates the\n usage of the C++ bindings of %s", libraryname),
true)
buildCppExampleCMake(component, cppcmake, outputFolder)
} else {
log.Printf("Omitting recreation of C++Dynamic example file \"%s\"", CPPCMake)
}
}
return nil
}
func buildCPPHeaderAndImplementation(component ComponentDefinition, w LanguageWriter, cppimplw LanguageWriter, NameSpace string, BaseName string) error {
// Header start code
w.Writeln("")
w.Writeln("#ifndef __%s_CPPHEADER", strings.ToUpper(NameSpace))
w.Writeln("#define __%s_CPPHEADER", strings.ToUpper(NameSpace))
w.Writeln("")
w.Writeln("#include \"%s.h\"", BaseName)
w.Writeln("#include <string>")
w.Writeln("#include <memory>")
w.Writeln("#include <vector>")
w.Writeln("#include <exception>")
w.Writeln("")
w.Writeln("namespace %s {", NameSpace)
w.Writeln("")
w.Writeln("/*************************************************************************************************************************")
w.Writeln(" Forward Declaration of all classes ")
w.Writeln("**************************************************************************************************************************/")
w.Writeln("")
cppClassPrefix := "C" + NameSpace
w.Writeln("class %sBaseClass;", cppClassPrefix)
for i := 0; i < len(component.Classes); i++ {
class := component.Classes[i]
w.Writeln("class %s%s;", cppClassPrefix, class.ClassName)
}
w.Writeln("")
w.Writeln("/*************************************************************************************************************************")
w.Writeln(" Declaration of shared pointer types ")
w.Writeln("**************************************************************************************************************************/")
w.Writeln("")
w.Writeln("typedef std::shared_ptr<%sBaseClass> P%sBaseClass;", cppClassPrefix, NameSpace)
for i := 0; i < len(component.Classes); i++ {
class := component.Classes[i]
w.Writeln("typedef std::shared_ptr<%s%s> P%s%s;", cppClassPrefix, class.ClassName, NameSpace, class.ClassName)
}
w.Writeln(" ")
w.Writeln("/*************************************************************************************************************************")
w.Writeln(" Class E%sException ", NameSpace)
w.Writeln("**************************************************************************************************************************/")
w.Writeln("class E%sException : public std::exception {", NameSpace)
w.Writeln(" protected:")
w.Writeln(" /**")
w.Writeln(" * Error code for the Exception.")
w.Writeln(" */")
w.Writeln(" %sResult m_errorCode;", NameSpace)
w.Writeln(" /**")
w.Writeln(" * Error message for the Exception.")
w.Writeln(" */")
w.Writeln(" std::string m_errorMessage;")
w.Writeln("")
w.Writeln(" public:")
w.Writeln(" /**")
w.Writeln(" * Exception Constructor.")
w.Writeln(" */")
w.Writeln(" E%sException (%sResult errorCode);", NameSpace, NameSpace)
w.Writeln("")
w.Writeln(" /**")
w.Writeln(" * Returns error code")
w.Writeln(" */")
w.Writeln(" %sResult getErrorCode ();", NameSpace)
w.Writeln("")
w.Writeln(" /**")
w.Writeln(" * Returns error message")
w.Writeln(" */")
w.Writeln(" const char* what () const noexcept;")
w.Writeln("")
w.Writeln("};")
w.Writeln("")
err := writeCPPInputVector(w, NameSpace)
if err != nil {
return err
}
w.Writeln("")
w.Writeln("")
w.Writeln("/*************************************************************************************************************************")
w.Writeln(" Class %sBaseClass ", cppClassPrefix)
w.Writeln("**************************************************************************************************************************/")
w.Writeln("class %sBaseClass {", cppClassPrefix)
w.Writeln("protected:")
w.Writeln(" /* Handle to Instance in library*/")
w.Writeln(" %sHandle m_pHandle;", NameSpace)
w.Writeln("")
w.Writeln(" /* Checks for an Error code and raises Exceptions */")
w.Writeln(" void CheckError(%sResult nResult);", NameSpace)
w.Writeln("public:")
w.Writeln("")
w.Writeln(" /**")
w.Writeln(" * %sBaseClass::%sBaseClass - Constructor for Base class.", cppClassPrefix, cppClassPrefix)
w.Writeln(" */")
w.Writeln(" %sBaseClass(%sHandle pHandle);", cppClassPrefix, NameSpace)
w.Writeln("")
w.Writeln(" /**")
w.Writeln(" * %sBaseClass::~%sBaseClass - Destructor for Base class.", cppClassPrefix, cppClassPrefix)
w.Writeln(" */")
w.Writeln(" virtual ~%sBaseClass();", cppClassPrefix)
w.Writeln("")
w.Writeln(" /**")
w.Writeln(" * %sBaseClass::GetHandle - Returns handle to instance.", cppClassPrefix)
w.Writeln(" */")
w.Writeln(" %sHandle GetHandle();", NameSpace)
w.Writeln("};")
// Implementation start code
cppimplw.Writeln("#include \"%s.hpp\"", BaseName)
cppimplw.Writeln("")
cppimplw.Writeln("#include <vector>")
cppimplw.Writeln("")
cppimplw.Writeln("namespace %s {", NameSpace)
cppimplw.Writeln("")
cppimplw.Writeln("/*************************************************************************************************************************")
cppimplw.Writeln(" Class E%sException ", NameSpace)
cppimplw.Writeln("**************************************************************************************************************************/")
cppimplw.Writeln(" E%sException::E%sException(%sResult errorCode)", NameSpace, NameSpace, NameSpace)
cppimplw.Writeln(" : m_errorMessage(\"%s Error \" + std::to_string (errorCode))", NameSpace)
cppimplw.Writeln(" {")
cppimplw.Writeln(" m_errorCode = errorCode;")
cppimplw.Writeln(" }")
cppimplw.Writeln("")
cppimplw.Writeln(" %sResult E%sException::getErrorCode ()", NameSpace, NameSpace)
cppimplw.Writeln(" {")
cppimplw.Writeln(" return m_errorCode;")
cppimplw.Writeln(" }")
cppimplw.Writeln("")
cppimplw.Writeln(" const char* E%sException::what () const noexcept", NameSpace)
cppimplw.Writeln(" {")
cppimplw.Writeln(" return m_errorMessage.c_str();")
cppimplw.Writeln(" }")
cppimplw.Writeln("")
cppimplw.Writeln("/*************************************************************************************************************************")
cppimplw.Writeln(" Class %sBaseClass ", cppClassPrefix)
cppimplw.Writeln("**************************************************************************************************************************/")
cppimplw.Writeln("")
cppimplw.Writeln("%sBaseClass::%sBaseClass(%sHandle pHandle)", cppClassPrefix, cppClassPrefix, NameSpace)
cppimplw.Writeln("{")
cppimplw.Writeln(" m_pHandle = pHandle;")
cppimplw.Writeln("}")
cppimplw.Writeln("")
cppimplw.Writeln("%sBaseClass::~%sBaseClass()", cppClassPrefix, cppClassPrefix)
cppimplw.Writeln("{")
cppimplw.Writeln(" %sWrapper::%s(this);", cppClassPrefix, component.Global.ReleaseMethod)
cppimplw.Writeln("}")
cppimplw.Writeln("")
cppimplw.Writeln("void %sBaseClass::CheckError(%sResult nResult)", cppClassPrefix, NameSpace)
cppimplw.Writeln("{")
cppimplw.Writeln(" %sWrapper::CheckError(m_pHandle, nResult);", cppClassPrefix)
cppimplw.Writeln("}")
cppimplw.Writeln("")
cppimplw.Writeln("%sHandle %sBaseClass::GetHandle()", NameSpace, cppClassPrefix)
cppimplw.Writeln("{")
cppimplw.Writeln(" return m_pHandle;")
cppimplw.Writeln("}")
cppimplw.Writeln("")
for i := 0; i < len(component.Classes); i++ {
class := component.Classes[i]
cppClassName := cppClassPrefix + class.ClassName
parentClassName := class.ParentClass
if parentClassName == "" {
parentClassName = "BaseClass"
}
cppParentClassName := cppClassPrefix + parentClassName
w.Writeln(" ")
w.Writeln("/*************************************************************************************************************************")
w.Writeln(" Class %s ", cppClassName)
w.Writeln("**************************************************************************************************************************/")
w.Writeln("class %s : public %s {", cppClassName, cppParentClassName)
w.Writeln("public:")
w.Writeln(" ")
w.Writeln(" /**")
w.Writeln(" * %s::%s - Constructor for %s class.", cppClassName, cppClassName, class.ClassName)
w.Writeln(" */")
w.Writeln(" %s (%sHandle pHandle);", cppClassName, NameSpace)
cppimplw.Writeln(" ")
cppimplw.Writeln("/*************************************************************************************************************************")
cppimplw.Writeln(" Class %s ", cppClassName)
cppimplw.Writeln("**************************************************************************************************************************/")
cppimplw.Writeln("/**")
cppimplw.Writeln("* %s::%s - Constructor for %s class.", cppClassName, cppClassName, class.ClassName)
cppimplw.Writeln("*/")
cppimplw.Writeln("%s::%s (%sHandle pHandle)", cppClassName, cppClassName, NameSpace)
cppimplw.Writeln(" : %s (pHandle)", cppParentClassName)
cppimplw.Writeln("{ }")
for j := 0; j < len(class.Methods); j++ {
method := class.Methods[j]
err := writeCPPMethod(method, w, cppimplw, NameSpace, class.ClassName, false)
if err != nil {
return err
}
}
w.Writeln("};")
}
// Global functions
w.Writeln(" ")
w.Writeln("/*************************************************************************************************************************")
w.Writeln(" Class %sWrapper ", cppClassPrefix)
w.Writeln("**************************************************************************************************************************/")
w.Writeln("class %sWrapper {", cppClassPrefix)
w.Writeln("public:")
w.Writeln(" static void CheckError(%sHandle handle, %sResult nResult);", NameSpace, NameSpace)
global := component.Global;
for j := 0; j < len(global.Methods); j++ {
method := global.Methods[j]
err := writeCPPMethod(method, w, cppimplw, NameSpace, "Wrapper", true)
if err != nil {
return err
}
}
w.Writeln("};")
w.Writeln("")
w.Writeln("};")
w.Writeln("")
w.Writeln("#endif // __%s_CPPHEADER", strings.ToUpper(NameSpace))
w.Writeln("")
cppimplw.Writeln("")
cppimplw.Writeln("void %sWrapper::CheckError(%sHandle handle, %sResult nResult)", cppClassPrefix, NameSpace, NameSpace)
cppimplw.Writeln("{")
cppimplw.Writeln(" if (nResult != 0) ")
cppimplw.Writeln(" throw E%sException (nResult);", NameSpace)
cppimplw.Writeln("}")
cppimplw.Writeln("")
cppimplw.Writeln("")
cppimplw.Writeln("}; // end namespace %s", NameSpace)
cppimplw.Writeln("")
return nil
}
func writeCPPInputVector(w LanguageWriter, NameSpace string) (error) {
w.Writeln("/*************************************************************************************************************************")
w.Writeln(" Class C%sInputVector", NameSpace)
w.Writeln("**************************************************************************************************************************/")
w.Writeln("template <typename T>")
w.Writeln("class C%sInputVector {", NameSpace)
w.Writeln("private:")
w.Writeln(" ")
w.Writeln(" const T* m_data;")
w.Writeln(" size_t m_size;")
w.Writeln(" ")
w.Writeln("public:")
w.Writeln(" ")
w.Writeln(" C%sInputVector( const std::vector<T>& vec)", NameSpace)
w.Writeln(" : m_data( vec.data() ), m_size( vec.size() )");
w.Writeln(" {")
w.Writeln(" }")
w.Writeln(" ")
w.Writeln(" C%sInputVector( const T* in_data, size_t in_size)", NameSpace)
w.Writeln(" : m_data( in_data ), m_size(in_size )");
w.Writeln(" {")
w.Writeln(" }")
w.Writeln(" ")
w.Writeln(" const T* data() const")
w.Writeln(" {")
w.Writeln(" return m_data;")
w.Writeln(" }")
w.Writeln(" ")
w.Writeln(" size_t size() const")
w.Writeln(" {")
w.Writeln(" return m_size;")
w.Writeln(" }")
w.Writeln(" ")
w.Writeln("};")
return nil
}
func getBindingCppParamType (param ComponentDefinitionParam, NameSpace string, isInput bool) (string) {
cppClassPrefix := "C" + NameSpace;
switch (param.ParamType) {
case "uint8":
return fmt.Sprintf ("%s_uint8", NameSpace);
case "uint16":
return fmt.Sprintf ("%s_uint16", NameSpace);
case "uint32":
return fmt.Sprintf ("%s_uint32", NameSpace);
case "uint64":
return fmt.Sprintf ("%s_uint64", NameSpace);
case "int8":
return fmt.Sprintf ("%s_int8", NameSpace);
case "int16":
return fmt.Sprintf ("%s_int16", NameSpace);
case "int32":
return fmt.Sprintf ("%s_int32", NameSpace);
case "int64":
return fmt.Sprintf ("%s_int64", NameSpace);
case "string":
return fmt.Sprintf ("std::string");
case "bool":
return fmt.Sprintf ("bool");
case "single":
return fmt.Sprintf ("float");
case "basicarray":
cppBasicType := "";
switch (param.ParamClass) {
case "uint8":
cppBasicType = fmt.Sprintf ("%s_uint8", NameSpace);
case "uint16":
cppBasicType = fmt.Sprintf ("%s_uint16", NameSpace);
case "uint32":
cppBasicType = fmt.Sprintf ("%s_uint32", NameSpace);
case "uint64":
cppBasicType = fmt.Sprintf ("%s_uint64", NameSpace);
case "int8":
cppBasicType = fmt.Sprintf ("%s_int8", NameSpace);
case "int16":
cppBasicType = fmt.Sprintf ("%s_int16", NameSpace);
case "int32":
cppBasicType = fmt.Sprintf ("%s_int32", NameSpace);
case "int64":
cppBasicType = fmt.Sprintf ("%s_int64", NameSpace);
case "bool":
cppBasicType = "bool";
case "single":
cppBasicType =fmt.Sprintf ("%s_single", NameSpace);
case "double":
cppBasicType = fmt.Sprintf ("%s_double", NameSpace);
default:
log.Fatal ("Invalid parameter type: ", param.ParamClass);
}
if (isInput) {
return fmt.Sprintf ("C%sInputVector<%s>", NameSpace, cppBasicType);
}
return fmt.Sprintf ("std::vector<%s>", cppBasicType);
case "structarray":
if (isInput) {
return fmt.Sprintf ("C%sInputVector<s%s%s>", NameSpace, NameSpace, param.ParamClass);
}
return fmt.Sprintf ("std::vector<s%s%s>", NameSpace, param.ParamClass);
case "double":
return fmt.Sprintf ("%s_double", NameSpace);
case "enum":
return fmt.Sprintf ("e%s%s", NameSpace, param.ParamClass);
case "struct":
return fmt.Sprintf ("s%s%s", NameSpace, param.ParamClass);
case "handle":
if (isInput) {
return fmt.Sprintf ("%s%s *", cppClassPrefix, param.ParamClass);
}
return fmt.Sprintf ("P%s%s", NameSpace, param.ParamClass);
case "functiontype":
return fmt.Sprintf ("%s%s", NameSpace, param.ParamClass);
}
log.Fatal ("Invalid parameter type: ", param.ParamType);
return "";
}
func getBindingCppVariableName (param ComponentDefinitionParam) (string) {
switch (param.ParamType) {
case "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64":
return "n" + param.ParamName;
case "string":
return "s" + param.ParamName;
case "bool":
return "b" + param.ParamName;
case "single":
return "f" + param.ParamName;
case "basicarray", "structarray":
return param.ParamName + "Buffer";
case "double":
return "d" + param.ParamName;
case "enum":
return "e" + param.ParamName;
case "struct":
return param.ParamName;
case "handle":
return "p" + param.ParamName;
case "functiontype":
return fmt.Sprintf ("p%s", param.ParamName);
}
log.Fatal ("Invalid parameter type: ", param.ParamType);
return "";
}
func writeCPPMethod(method ComponentDefinitionMethod, w LanguageWriter, cppimplw LanguageWriter, NameSpace string, ClassName string, isGlobal bool) error {
CMethodName := ""
requiresInitCall := false;
initCallParameters := "" // usually used to check sizes of buffers
callParameters := ""
staticPrefix := ""
checkErrorCode := ""
if isGlobal {
CMethodName = fmt.Sprintf("%s_%s%s", strings.ToLower(NameSpace), strings.ToLower(method.MethodName), method.DLLSuffix)
staticPrefix = "static "
checkErrorCode = "CheckError (nullptr,"
} else {
CMethodName = fmt.Sprintf("%s_%s_%s%s", strings.ToLower(NameSpace), strings.ToLower(ClassName), strings.ToLower(method.MethodName), method.DLLSuffix)
callParameters = "m_pHandle"
initCallParameters = "m_pHandle"
checkErrorCode = "CheckError ("
}
parameters := ""
returntype := "void"
definitionCodeLines := []string{}
functionCodeLines := []string{}
returnCodeLines := []string{}
commentcodeLines := []string{}
postCallCodeLines := []string{}
cppClassPrefix := "C" + NameSpace
cppClassName := cppClassPrefix + ClassName
for k := 0; k < len(method.Params); k++ {
param := method.Params[k]
variableName := getBindingCppVariableName(param)
callParameter := "";
initCallParameter := "";
switch param.ParamPass {
case "in":
if parameters != "" {
parameters = parameters + ", "
}
cppParamType := getBindingCppParamType(param, NameSpace, true)
commentcodeLines = append(commentcodeLines, fmt.Sprintf("* @param[in] %s - %s", variableName, param.ParamDescription))
switch param.ParamType {
case "string":
callParameter = variableName + ".c_str()"
initCallParameter = callParameter;
parameters = parameters + fmt.Sprintf("const %s & %s", cppParamType, variableName);
case "struct":
callParameter = "&" + variableName
initCallParameter = callParameter;
parameters = parameters + fmt.Sprintf("const %s & %s", cppParamType, variableName);
case "structarray", "basicarray":
callParameter = fmt.Sprintf("(%s_uint64)%s.size(), %s.data()", NameSpace, variableName, variableName);
initCallParameter = callParameter;
parameters = parameters + fmt.Sprintf("const %s & %s", cppParamType, variableName);
case "handle":
functionCodeLines = append(functionCodeLines, fmt.Sprintf("%sHandle h%s = nullptr;", NameSpace, param.ParamName))
functionCodeLines = append(functionCodeLines, fmt.Sprintf("if (%s != nullptr) {", variableName))
functionCodeLines = append(functionCodeLines, fmt.Sprintf(" h%s = %s->GetHandle ();", param.ParamName, variableName))
functionCodeLines = append(functionCodeLines, fmt.Sprintf("};"))
callParameter = "h" + param.ParamName;
initCallParameter = callParameter;
parameters = parameters + fmt.Sprintf("%s %s", cppParamType, variableName)
default:
callParameter = variableName;
initCallParameter = callParameter;
parameters = parameters + fmt.Sprintf("const %s %s", cppParamType, variableName)
}
case "out":
cppParamType := getBindingCppParamType(param, NameSpace, false)
commentcodeLines = append(commentcodeLines, fmt.Sprintf("* @param[out] %s - %s", variableName, param.ParamDescription))
if parameters != "" {
parameters = parameters + ", "
}
parameters = parameters + fmt.Sprintf("%s & %s", cppParamType, variableName)
switch param.ParamType {
case "string":
requiresInitCall = true;
definitionCodeLines = append(definitionCodeLines, fmt.Sprintf("%s_uint32 bytesNeeded%s = 0;", NameSpace, param.ParamName))
definitionCodeLines = append(definitionCodeLines, fmt.Sprintf("%s_uint32 bytesWritten%s = 0;", NameSpace, param.ParamName))
initCallParameter = fmt.Sprintf("0, &bytesNeeded%s, nullptr", param.ParamName);
functionCodeLines = append(functionCodeLines, fmt.Sprintf("std::vector<char> buffer%s;", param.ParamName))
functionCodeLines = append(functionCodeLines, fmt.Sprintf("buffer%s.resize(bytesNeeded%s + 2);", param.ParamName, param.ParamName))
callParameter = fmt.Sprintf("bytesNeeded%s + 2, &bytesWritten%s, &buffer%s[0]", param.ParamName, param.ParamName, param.ParamName)
postCallCodeLines = append(postCallCodeLines, fmt.Sprintf("buffer%s[bytesNeeded%s + 1] = 0;", param.ParamName, param.ParamName))
postCallCodeLines = append(postCallCodeLines, fmt.Sprintf("s%s = std::string(&buffer%s[0]);", param.ParamName, param.ParamName))
case "handle":
definitionCodeLines = append(definitionCodeLines, fmt.Sprintf("%sHandle h%s = nullptr;", NameSpace, param.ParamName))
callParameter = fmt.Sprintf("&h%s", param.ParamName)
initCallParameter = callParameter;
postCallCodeLines = append(postCallCodeLines, fmt.Sprintf("p%s = std::make_shared<%s%s> (h%s);", param.ParamName, cppClassPrefix, param.ParamClass, param.ParamName))
case "structarray", "basicarray":
requiresInitCall = true;
definitionCodeLines = append(definitionCodeLines, fmt.Sprintf("%s_uint64 elementsNeeded%s = 0;", NameSpace, param.ParamName))
definitionCodeLines = append(definitionCodeLines, fmt.Sprintf("%s_uint64 elementsWritten%s = 0;", NameSpace, param.ParamName))
initCallParameter = fmt.Sprintf("0, &elementsNeeded%s, nullptr", param.ParamName);
functionCodeLines = append(functionCodeLines, fmt.Sprintf("%s.resize(elementsNeeded%s);", variableName, param.ParamName))
callParameter = fmt.Sprintf("elementsNeeded%s, &elementsWritten%s, %s.data()", param.ParamName, param.ParamName, variableName)
default:
callParameter = "&" + variableName
initCallParameter = callParameter
}
case "return":
commentcodeLines = append(commentcodeLines, fmt.Sprintf("* @return %s", param.ParamDescription))
returntype = getBindingCppParamType(param, NameSpace, false)
switch param.ParamType {
case "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64", "bool", "single", "double":
callParameter = fmt.Sprintf("&result%s", param.ParamName)
initCallParameter = callParameter;
definitionCodeLines = append(definitionCodeLines, fmt.Sprintf("%s result%s = 0;", returntype, param.ParamName))
returnCodeLines = append(returnCodeLines, fmt.Sprintf("return result%s;", param.ParamName))
case "string":
requiresInitCall = true;
definitionCodeLines = append(definitionCodeLines, fmt.Sprintf("%s_uint32 bytesNeeded%s = 0;", NameSpace, param.ParamName))
definitionCodeLines = append(definitionCodeLines, fmt.Sprintf("%s_uint32 bytesWritten%s = 0;", NameSpace, param.ParamName))
initCallParameter = fmt.Sprintf("0, &bytesNeeded%s, nullptr", param.ParamName);
functionCodeLines = append(functionCodeLines, fmt.Sprintf("std::vector<char> buffer%s;", param.ParamName))
functionCodeLines = append(functionCodeLines, fmt.Sprintf("buffer%s.resize(bytesNeeded%s + 2);", param.ParamName, param.ParamName))
callParameter = fmt.Sprintf("bytesNeeded%s + 2, &bytesWritten%s, &buffer%s[0]", param.ParamName, param.ParamName, param.ParamName)
returnCodeLines = append(returnCodeLines, fmt.Sprintf("buffer%s[bytesNeeded%s + 1] = 0;", param.ParamName, param.ParamName))
returnCodeLines = append(returnCodeLines, fmt.Sprintf("return std::string(&buffer%s[0]);", param.ParamName))
case "enum":
callParameter = fmt.Sprintf("&result%s", param.ParamName)
initCallParameter = callParameter;
definitionCodeLines = append(definitionCodeLines, fmt.Sprintf("e%s%s result%s = (e%s%s) 0;", NameSpace, param.ParamClass, param.ParamName, NameSpace, param.ParamClass))
returnCodeLines = append(returnCodeLines, fmt.Sprintf("return result%s;", param.ParamName))
case "struct":
callParameter = fmt.Sprintf("&result%s", param.ParamName)
initCallParameter = callParameter;
definitionCodeLines = append(definitionCodeLines, fmt.Sprintf("s%s%s result%s;", NameSpace, param.ParamClass, param.ParamName))
returnCodeLines = append(returnCodeLines, fmt.Sprintf("return result%s;", param.ParamName))
case "handle":
definitionCodeLines = append(definitionCodeLines, fmt.Sprintf("%sHandle h%s = nullptr;", NameSpace, param.ParamName))
callParameter = fmt.Sprintf("&h%s", param.ParamName)
initCallParameter = callParameter;
returnCodeLines = append(returnCodeLines, fmt.Sprintf("return std::make_shared<%s%s> (h%s);", cppClassPrefix, param.ParamClass, param.ParamName))
case "basicarray":
return fmt.Errorf("can not return basicarray \"%s\" for %s.%s (%s)", param.ParamPass, ClassName, method.MethodName, param.ParamName)
case "structarray":
return fmt.Errorf("can not return structarray \"%s\" for %s.%s (%s)", param.ParamPass, ClassName, method.MethodName, param.ParamName)
default:
return fmt.Errorf("invalid method parameter type \"%s\" for %s.%s (%s)", param.ParamType, ClassName, method.MethodName, param.ParamName)
}
default:
return fmt.Errorf("invalid method parameter passing \"%s\" for %s.%s (%s)", param.ParamPass, ClassName, method.MethodName, param.ParamName)
}
if callParameters != "" {
callParameters = callParameters + ", "
}
callParameters = callParameters + callParameter;
if (initCallParameters != "") {
initCallParameters = initCallParameters + ", ";
}
initCallParameters = initCallParameters + initCallParameter;
}
w.Writeln("")
w.Writeln(" /**")
w.Writeln(" * %s::%s - %s", cppClassName, method.MethodName, method.MethodDescription)
w.Writelns(" ", commentcodeLines)
w.Writeln(" */")
w.Writeln(" %s%s %s (%s);", staticPrefix, returntype, method.MethodName, parameters)
cppimplw.Writeln("")
cppimplw.Writeln("/**")
cppimplw.Writeln("* %s::%s - %s", cppClassName, method.MethodName, method.MethodDescription)
cppimplw.Writelns("", commentcodeLines)
cppimplw.Writeln("*/")
cppimplw.Writeln("%s %s::%s (%s)", returntype, cppClassName, method.MethodName, parameters)
cppimplw.Writeln("{")
cppimplw.Writelns(" ", definitionCodeLines)
if (requiresInitCall) {
cppimplw.Writeln(" %s %s (%s) );", checkErrorCode, CMethodName, initCallParameters)
}
cppimplw.Writelns(" ", functionCodeLines)
cppimplw.Writeln(" %s %s (%s) );", checkErrorCode, CMethodName, callParameters)
cppimplw.Writelns(" ", postCallCodeLines)
cppimplw.Writelns(" ", returnCodeLines)
cppimplw.Writeln("}")
return nil
}
func buildCppExample(componentdefinition ComponentDefinition, w LanguageWriter, outputFolder string) error {
NameSpace := componentdefinition.NameSpace
BaseName := componentdefinition.BaseName
w.Writeln("#include <iostream>")
w.Writeln("#include \"%s.hpp\"", strings.ToLower(BaseName) )
w.Writeln("")
w.Writeln("")
w.Writeln("int main()")
w.Writeln("{")
w.Writeln(" try")
w.Writeln(" {")
w.Writeln(" unsigned int nMajor, nMinor, nMicro;")
w.Writeln(" %s::C%sWrapper::GetLibraryVersion(nMajor, nMinor, nMicro);", NameSpace, NameSpace)
w.Writeln(" std::cout << \"%s.Version = \" << nMajor << \".\" << nMinor << \".\" << nMicro << std::endl;", NameSpace);
w.Writeln(" }")
w.Writeln(" catch (std::exception &e)")
w.Writeln(" {")
w.Writeln(" std::cout << e.what() << std::endl;")
w.Writeln(" return 1;")
w.Writeln(" }")
w.Writeln(" return 0;")
w.Writeln("}")
w.Writeln("")
return nil
}
func buildCppExampleCMake(componentdefinition ComponentDefinition, w LanguageWriter, outputFolder string) error {
NameSpace := componentdefinition.NameSpace
BaseName := componentdefinition.BaseName
w.Writeln("cmake_minimum_required(VERSION 3.5)")
w.Writeln("")
w.Writeln("project(%sExample_CPP)", NameSpace)
w.Writeln("set (CMAKE_CXX_STANDARD 11)")
// TODO: calculate relative path from ExampleOutputFolder to OUTPUTFOLDER based on CURRENT_SOURCE_DIR
linkfolder := strings.Replace(outputFolder, string(filepath.Separator), "/", -2)
w.Writeln("link_directories(\"%s\") # TODO: put the correct path of the import library here", linkfolder)
outputFolder = strings.Replace(outputFolder, string(filepath.Separator), "/", -1)
w.Writeln("add_executable(%sExample_CPP \"${CMAKE_CURRENT_SOURCE_DIR}/%s_example.cpp\"", NameSpace, NameSpace)
w.Writeln(" \"%s/%s.cpp\")", outputFolder, BaseName)
w.Writeln("target_link_libraries(%sExample_CPP %s)", NameSpace, BaseName)
// TODO: calculate relative path from ExampleOutputFolder to OUTPUTFOLDER based on CURRENT_SOURCE_DIR
w.Writeln("target_include_directories(%sExample_CPP PRIVATE \"%s\")", NameSpace, outputFolder)
return nil
}
|
package main
import (
"fmt"
"math"
)
func exported_names() {
fmt.Println("[exported_names.go]", math.Pi)
}
|
package main
import (
"bytes"
"compress/gzip"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"time"
"wishCollection/models"
"wishCollection/utility"
)
var stop bool
var runing bool
var collectionTime time.Duration
func init() {
collectionTime = time.Minute * 2
}
func main() {
go requestWishId()
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if runing == false {
requestWishId()
}
w.Write([]byte("ok"))
})
s := &http.Server{
Addr: ":7758",
}
log.Fatal(s.ListenAndServe())
}
type CollectionJSON struct {
Code int `json:"code"`
Id string `json:"id"`
Rate int `json:"rate"`
}
func requestWishId() {
// 获取指定id (GET http://localhost:3384/api/collection)
runing = true
// Create client
client := &http.Client{}
// Create request
req, err := http.NewRequest("GET", "http://45.76.220.102:2596/api/collection", nil)
// Fetch Request
resp, err := client.Do(req)
if err != nil {
fmt.Println("Failure : ", err)
}
if resp != nil {
defer resp.Body.Close()
}
// Read Response Body
respBody, _ := ioutil.ReadAll(resp.Body)
var cJSON CollectionJSON
if err := json.Unmarshal(respBody, &cJSON); err != nil {
fmt.Println(err)
requestWishId()
return
}
if cJSON.Code != 0 {
runing = false
return
}
if cJSON.Rate > 0 {
collectionTime = time.Second * time.Duration(cJSON.Rate)
}
ReRegister:
if user, err := models.RegisterUser(); err == nil {
getWishIdFromFeed("tabbed_feed_latest", user, cJSON.Id)
} else {
goto ReRegister
}
}
//13672
func getWishIdFromFeed(categoryId string, user models.User, wishId string) {
if collectionTime <= 0 {
collectionTime = 120
}
c := time.NewTicker(collectionTime)
go TimeOut(c)
fmt.Println(wishId)
page := 0
for {
if stop == true {
stop = false
if g := models.GetWisList(user); g.Code == 0 {
if len(g.Data.Wishlists) > 0 {
if a := models.AddProductToWishList(g.Data.Wishlists[0].ID, wishId, user); a.Code != 0 {
utility.SendLog(a.Msg)
}
} else {
utility.SendLog(fmt.Sprintln("创建收藏列表失败", wishId))
}
}
requestWishId()
return
}
if err := loadFeed(page, categoryId, user); err != nil {
utility.SendLog(err.Error())
continue
}
time.Sleep(time.Second * 10)
page++
}
}
func TimeOut(c *time.Ticker) {
for now := range c.C {
fmt.Println(now)
stop = true
c.Stop()
}
}
func loadFeed(page int, categoryId string, user models.User) error {
body := feedBodyWith(page, user, categoryId)
client := &http.Client{}
req, err := http.NewRequest("POST", "http://www.wish.com/api/feed/get-filtered-feed", body)
if err != nil {
return err
}
req = headerWish(req, user)
// Fetch Request
resp, err := client.Do(req)
if err != nil {
return err
}
if resp != nil {
defer resp.Body.Close()
}
var reader io.ReadCloser
switch resp.Header.Get("Content-Encoding") {
case "gzip":
reader, err = gzip.NewReader(resp.Body)
if err != nil {
utility.Errorln(err)
}
default:
reader = resp.Body
}
var b []byte
buf := bytes.NewBuffer(b)
buf.ReadFrom(reader)
if resp.StatusCode != 200 {
return fmt.Errorf("StatusCode: %d %s", resp.StatusCode, buf.Bytes())
}
var feeds Feeds
if err = json.Unmarshal(buf.Bytes(), &feeds); err != nil {
return err
}
if feeds.Code == 10 {
return fmt.Errorf("not more product")
} else {
if len(feeds.Data.Products) <= 0 {
return fmt.Errorf("not more product")
}
}
return nil
}
func feedBodyWith(page int, user models.User, category string) *bytes.Buffer {
params := url.Values{}
params.Set("_capabilities[]", "11")
params.Set("_capabilities[]", "12")
params.Set("_capabilities[]", "13")
params.Set("_capabilities[]", "15")
params.Set("_capabilities[]", "2")
params.Set("_capabilities[]", "21")
params.Set("_capabilities[]", "24")
params.Set("_capabilities[]", "25")
params.Set("_capabilities[]", "28")
params.Set("_capabilities[]", "32")
params.Set("_capabilities[]", "35")
params.Set("_capabilities[]", "39")
params.Set("_capabilities[]", "4")
params.Set("_capabilities[]", "40")
params.Set("_capabilities[]", "43")
params.Set("_capabilities[]", "6")
params.Set("_capabilities[]", "7")
params.Set("_capabilities[]", "8")
params.Set("_capabilities[]", "9")
params.Set("request_id", category)
params.Set("_app_type", "wish")
params.Set("_version", "3.20.6")
params.Set("_client", "iosapp")
params.Set("_xsrf", "1")
params.Set("app_device_model", "iPhone9,2")
params.Set("advertiser_id", user.AdvertiserId)
params.Set("_riskified_session_token", user.RiskifiedSessionToken)
params.Set("app_device_id", user.AppDeviceID)
//params.Set("_threat_metrix_session_token", user.)
params.Set("count", "30")
params.Set("offset", fmt.Sprintf("%d", page*30))
//params.Set("request_categories", "true")
body := bytes.NewBufferString(params.Encode())
return body
}
func headerWish(req *http.Request, user models.User) *http.Request {
// Headers
req.Header.Add("Accept", "*/*")
req.Header.Add("Accept-Encoding", "gzip")
req.Header.Add("Accept-Language", "zh-Hans-CN;q=1")
cookie := fmt.Sprintf("_xsrf=1; _timezone=8; _appLocale=zh-Hans-CN; sweeper_session=\"%s\"; bsid=%s", user.SweeperSession, user.Baid)
req.Header.Add("Cookie", cookie)
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("User-Agent", "Wish/3.20.6 (iPhone; iOS 10.3.2; Scale/3.00)")
return req
}
type Feeds struct {
Msg string `json:"msg"`
Code int `json:"code"`
Data struct {
Products []struct {
ID string `json:"id"`
} `json:"products"`
} `json:"data"`
}
|
package structs
type HealthStatus int
const (
Down HealthStatus = iota - 1
Unhealthy
Healthy
)
type Health struct {
Modules []ModuleHealth `json:"modules"`
}
type ModuleHealth struct {
ModuleName string `json:"module_name"`
Status HealthStatus `json:"status"`
StatusCode int `json:"status_code"`
LastUpdate string `json:"last_update"`
}
|
package main
import (
"bufio"
"errors"
"fmt"
"io"
"unicode"
)
type Parser struct {
w io.Writer
r *bufio.Reader
lookahead rune
}
func NewParser(w io.Writer, r io.Reader) *Parser {
return &Parser{
w: w,
r: bufio.NewReader(r),
}
}
func (p *Parser) Parse() error {
if err := p.next(); err != nil {
return err
}
return p.expr()
}
func (p *Parser) next() error {
c, _, err := p.r.ReadRune()
if err != nil && err != io.EOF {
return err
}
p.lookahead = c
return nil
}
func (p *Parser) expr() error {
if err := p.term(); err != nil {
return err
}
for {
switch p.lookahead {
case '+':
if err := p.match('+'); err != nil {
return err
}
if err := p.term(); err != nil {
return err
}
if _, err := fmt.Fprintf(p.w, "%c", '+'); err != nil {
return err
}
case '-':
if err := p.match('-'); err != nil {
return err
}
if err := p.term(); err != nil {
return err
}
if _, err := fmt.Fprintf(p.w, "%c", '-'); err != nil {
return err
}
default:
return nil
}
}
}
func (p *Parser) term() error {
if !unicode.IsDigit(p.lookahead) {
return errors.New("syntax error")
}
if _, err := fmt.Fprintf(p.w, "%c", p.lookahead); err != nil {
return err
}
if err := p.match(p.lookahead); err != nil {
return err
}
return nil
}
func (p *Parser) match(c rune) error {
if p.lookahead != c {
return errors.New("syntax error")
}
return p.next()
}
|
/*
Copyright 2021 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"context"
"fmt"
"io"
stdlog "log"
"net/http/httptest"
"path/filepath"
"runtime"
"testing"
"github.com/google/go-containerregistry/pkg/crane"
"github.com/google/go-containerregistry/pkg/registry"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/google/go-containerregistry/pkg/v1/random"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/build/ko"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/config"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/platform"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/schema/latest"
)
func TestBuildAndPushKoImageProgrammatically(t *testing.T) {
MarkIntegrationTest(t, CanRunWithoutGcp)
// Start a local registry server.
// This registry hosts the base image, and it is the target registry for the built image.
baseimageNamespace := "baseimage"
registryServer, err := registryServerWithImage(baseimageNamespace)
if err != nil {
t.Fatalf("could not create test registry server: %v", err)
}
defer registryServer.Close()
registryAddr := registryServer.Listener.Addr().String()
baseImage := fmt.Sprintf("%s/%s", registryAddr, baseimageNamespace)
// Get the directory of the basic ko sample app from the `examples` directory.
exampleAppDir, err := koExampleAppDir()
if err != nil {
t.Fatalf("could not get ko example app dir: %+v", err)
}
// Build the artifact
b := ko.NewArtifactBuilder(nil, true, config.RunModes.Build, nil)
artifact := &latest.Artifact{
ArtifactType: latest.ArtifactType{
KoArtifact: &latest.KoArtifact{
BaseImage: baseImage,
},
},
Workspace: exampleAppDir,
}
imageName := fmt.Sprintf("%s/%s", registryAddr, "skaffold-ko")
_, err = b.Build(context.Background(), nil, artifact, imageName, platform.Matcher{})
if err != nil {
t.Fatalf("b.Build(): %+v", err)
}
}
// registryServerWithImage starts a local registry and pushes a random image.
// Use this to speed up tests, by not having to reach out to a real registry.
// The registry uses a NOP logger to avoid spamming test logs.
// Remember to call `defer Close()` on the returned `httptest.Server`.
func registryServerWithImage(namespace string) (*httptest.Server, error) {
nopLog := stdlog.New(io.Discard, "", 0)
r := registry.New(registry.Logger(nopLog))
s := httptest.NewServer(r)
imageName := fmt.Sprintf("%s/%s", s.Listener.Addr().String(), namespace)
image, err := random.Image(1024, 1)
if err != nil {
return nil, fmt.Errorf("random.Image(): %+v", err)
}
// ko 0.12 starts to validate image platform, the random generated image doesn't that info, we need to manually to set
// image platform back to the image. image.ConfigFile() is a deepCopy() method, we need to use mutate.ConfigFile() to inject
// the platform info.
configFile, _ := image.ConfigFile()
configFile.OS = runtime.GOOS
configFile.Architecture = runtime.GOARCH
configFile.OSVersion = runtime.Version()
image, err = mutate.ConfigFile(image, configFile)
if err != nil {
return nil, fmt.Errorf("failed to mutate image: %+v", err)
}
err = crane.Push(image, imageName)
if err != nil {
return nil, fmt.Errorf("crane.Push(): %+v", err)
}
return s, nil
}
// koExampleAppDir returns the directory path of the basic ko builder sample app.
func koExampleAppDir() (string, error) {
_, filename, _, ok := runtime.Caller(0)
if !ok {
return "", fmt.Errorf("could not get current filename")
}
basepath := filepath.Dir(filename)
exampleDir, err := filepath.Abs(filepath.Join(basepath, "examples", "ko"))
if err != nil {
return "", fmt.Errorf("could not get absolute path of example from basepath %q: %w", basepath, err)
}
return exampleDir, nil
}
|
package sharedobj
import (
"github.com/cyberark/secretless-broker/pkg/secretless/log"
"github.com/cyberark/secretless-broker/pkg/secretless/plugin"
"github.com/cyberark/secretless-broker/pkg/secretless/plugin/connector/http"
"github.com/cyberark/secretless-broker/pkg/secretless/plugin/connector/tcp"
)
const pluginConflictMessage = "%s plugin ID '%s' conflicts with an existing internal plugin"
// CompatiblePluginAPIVersion indicates what matching API version an external plugin
// must have so that Secretless is capable of loading it.
var CompatiblePluginAPIVersion = "0.1.0"
// IsHTTPPlugin uses AvailablePlugins to determine if a pluginId is an HTTP
// plugin.
func IsHTTPPlugin(availPlugins plugin.AvailablePlugins, pluginID string) bool {
for id := range availPlugins.HTTPPlugins() {
if pluginID == id {
return true
}
}
return false
}
// AllAvailablePlugins returns the full list of internal and external plugins
// available to the broker.
func AllAvailablePlugins(
pluginDir string,
checksumsFile string,
logger log.Logger,
) (plugin.AvailablePlugins, error) {
return AllAvailablePluginsWithOptions(
pluginDir,
checksumsFile,
GetInternalPluginsFunc,
ExternalPlugins,
logger,
)
}
// checkPluginIDConflicts asserts that a given plugin ID is not used
// by any internal HTTP or TCP plugin.
func checkPluginIDConflicts(
pluginType string, // "HTTP" or "TCP"
pluginID string,
internalPlugins plugin.AvailablePlugins,
logger log.Logger) {
httpPlugins := internalPlugins.HTTPPlugins()
if _, ok := httpPlugins[pluginID]; ok {
logger.Panicf(pluginConflictMessage, pluginType, pluginID)
}
tcpPlugins := internalPlugins.TCPPlugins()
if _, ok := tcpPlugins[pluginID]; ok {
logger.Panicf(pluginConflictMessage, pluginType, pluginID)
}
}
// AllAvailablePluginsWithOptions returns the full list of internal and external
// plugins available to the broker using explicitly-defined lookup functions.
func AllAvailablePluginsWithOptions(
pluginDir string,
checksumsFile string,
internalLookupFunc InternalPluginLookupFunc,
externalLookupfunc ExternalPluginLookupFunc,
logger log.Logger,
) (plugin.AvailablePlugins, error) {
allHTTPPlugins := map[string]http.Plugin{}
allTCPPlugins := map[string]tcp.Plugin{}
// Assemble internal plugins. Plugin IDs for internal plugins are
// assumed to be unique because their definitions are hardcoded.
internalPlugins, err := InternalPlugins(internalLookupFunc)
if err != nil {
return nil, err
}
for pluginID, httpPlugin := range internalPlugins.HTTPPlugins() {
allHTTPPlugins[pluginID] = httpPlugin
}
for pluginID, tcpPlugin := range internalPlugins.TCPPlugins() {
allTCPPlugins[pluginID] = tcpPlugin
}
// Assemble external plugins. Check whether the plugin ID for each
// external plugin conflicts with any plugin IDs of internal plugins.
// (Checks for uniqueness among external HTTP and TCP plugins is
// done elsewhere, i.e. as external plugins are discovered.)
externalPlugins, err := externalLookupfunc(pluginDir, checksumsFile, logger)
if err != nil {
return nil, err
}
for pluginID, httpPlugin := range externalPlugins.HTTPPlugins() {
checkPluginIDConflicts("HTTP", pluginID, internalPlugins, logger)
allHTTPPlugins[pluginID] = httpPlugin
}
for pluginID, tcpPlugin := range externalPlugins.TCPPlugins() {
checkPluginIDConflicts("TCP", pluginID, internalPlugins, logger)
allTCPPlugins[pluginID] = tcpPlugin
}
return &Plugins{
HTTPPluginsByID: allHTTPPlugins,
TCPPluginsByID: allTCPPlugins,
}, nil
}
// NewPlugins plugins creates a new instance of Plugins with both maps
// initialized but empty.
func NewPlugins() Plugins {
return Plugins{
HTTPPluginsByID: map[string]http.Plugin{},
TCPPluginsByID: map[string]tcp.Plugin{},
}
}
// Plugins represent a holding object for a bundle of plugins of different types.
type Plugins struct {
HTTPPluginsByID map[string]http.Plugin
TCPPluginsByID map[string]tcp.Plugin
}
// HTTPPlugins returns only the HTTP plugins in the Plugins struct.
func (plugins *Plugins) HTTPPlugins() map[string]http.Plugin {
return plugins.HTTPPluginsByID
}
// TCPPlugins returns only the TCP plugins in the Plugins struct.
func (plugins *Plugins) TCPPlugins() map[string]tcp.Plugin {
return plugins.TCPPluginsByID
}
|
package checkifbst
import (
"golangexercises/binarytrees/fillfromarray"
"sort"
"testing"
)
func TestBST(t *testing.T) {
intArray := []int{2, 15, 3, 4, 5, 11, 6, 7, 9, 8, 10, 12, 1, 13, 14}
bstRoot1 := fillfromarray.CreateTree(intArray)
if IsBST(bstRoot1) {
t.Errorf("Not a BST\n")
}
sort.Ints(intArray)
bstRoot2 := fillfromarray.CreateTree(intArray)
if !IsBST(bstRoot2) {
t.Errorf("Is a BST\n")
}
}
|
/*
Copyright (c) 2014
Dario Brandes
Thies Johannsen
Paul Kröger
Sergej Mann
Roman Naumann
Sebastian Thobe
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* -*- Mode: Go; indent-tabs-mode: t; c-basic-offset: 4; tab-width: 4 -*- */
package client
import (
"../core/crypto/auth"
"../core/db"
"../external"
"../logger"
"../sync/protocol"
"crypto/rsa"
"errors"
"fmt"
"net"
"sync"
"time"
)
type OnionConnection struct {
*net.TCPConn
Onion string
Established bool
}
func ConnectToOnion(onion string) (OnionConnection, error) {
// step 1: connect to TOR proxy
laddr, _ := net.ResolveTCPAddr("tcp", "localhost")
raddr, _ := net.ResolveTCPAddr("tcp", "localhost:9050")
conn, err := net.DialTCP("tcp", laddr, raddr)
// _ = conn.SetDeadline(time.Now().Add(time.Second*5))
if err != nil {
return OnionConnection{conn, onion, false}, err
}
// step 2: tell TOR proxy to connect to onion address
err = socks.Connect(conn, onion)
if err != nil {
conn.Close()
return OnionConnection{conn, onion, false}, err
}
// success
return OnionConnection{conn, onion, true}, nil
}
func (conn OnionConnection) Auth(key *rsa.PrivateKey) error {
// 1. send auth request
//logger.Debug("...sending auth request")
pkg := protocol.EncodeAuth(key.PublicKey)
conn.Write(pkg)
// 2 read and check challenge
//logger.Debug("...waiting for challenge")
header, err := protocol.ReadHeader(conn)
if err != nil {
return errors.New("error while reading challenge header: " + err.Error())
}
if header.PacketType != protocol.CHALLENGE {
return errors.New("wrong package type waiting for challenge")
}
buffer := make([]byte, 16384)
_ = protocol.ReadPayload(conn, buffer[:header.PacketLength])
challenge, err := protocol.DecodeChallenge(buffer[:header.PacketLength])
if err != nil {
return errors.New("could not decode challenge" + err.Error())
}
// 3 build and send response
//logger.Debug("...generating response")
response, err := auth.GenerateResponse(challenge,
key,
conn.Onion)
if err != nil {
return errors.New("could not build resonionsponse: " + err.Error())
}
//logger.Debug("...sending response")
pkg = protocol.EncodeResponse(&response)
conn.Write(pkg)
// 4. await success notification
if header, err = protocol.ReadHeader(conn); err != nil {
return errors.New("could not receive success notification: " + err.Error())
}
if header.PacketType != protocol.SUCCESS {
return fmt.Errorf("expected success packet, but got %s instead",
header.PacketType)
}
return nil
}
func (conn OnionConnection) Pull(timestamp int64) ([]db.Post, []db.Profile, error) {
posts := []db.Post{}
profiles := []db.Profile{}
//logger.Debug(fmt.Sprint("sending PULL with timestamp ", timestamp))
conn.Write(protocol.EncodePull(timestamp))
// default length of post: 64 Kilobyte, relocation is implemented
length := uint32(65536)
buffer := make([]byte, length)
for {
header, err := protocol.ReadHeader(conn)
if err != nil {
return posts,
profiles,
errors.New("error while receiving posts: " + err.Error())
}
if 16777216 < header.PacketLength { // if payload greater than 16 Megabyte
return posts,
profiles,
errors.New("Received payload is greater than 16 Megabyte")
} else if length < header.PacketLength { // relocate buffer if required
buffer = nil // garbage collection help
length = header.PacketLength
buffer = make([]byte, length)
}
if header.PacketType == protocol.SUCCESS {
// finished, no more replies
return posts, profiles, nil
} else if header.PacketType == protocol.PUSH_POST {
_ = protocol.ReadPayload(conn, buffer[:header.PacketLength])
post, err := protocol.DecodePushPost(
buffer[:header.PacketLength],
conn.Onion)
if err != nil {
return posts,
profiles,
errors.New("decode of post failed: " + err.Error())
}
posts = append(posts, post)
} else if header.PacketType == protocol.PUSH_PROFILE {
_ = protocol.ReadPayload(conn, buffer[:header.PacketLength])
profile, err := protocol.DecodePushProfile(
buffer[:header.PacketLength],
conn.Onion)
if err != nil {
return posts,
profiles,
errors.New("decode of post failed: " + err.Error())
}
profiles = append(profiles, profile)
} else {
return posts,
profiles,
fmt.Errorf("expected push post, but got %s\n", header.PacketType)
}
}
}
func (conn OnionConnection) Trigger() error {
//logger.Debug("sending TRIGGER")
conn.Write(protocol.EncodeTrigger())
header, err := protocol.ReadHeader(conn)
if err != nil {
return errors.New("error while waiting for SUCCESS: " + err.Error())
} else if header.PacketType != protocol.SUCCESS {
return errors.New("expected SUCCESS, but got " + string(header.PacketType))
}
return nil
}
func (conn OnionConnection) ContactRequest(cr protocol.ContactRequest) error {
//logger.Debug("sending contact request")
conn.Write(protocol.EncodeContactRequest(cr))
header, err := protocol.ReadHeader(conn)
if err != nil {
return errors.New("error while waiting for SUCCESS message " + err.Error())
} else if header.PacketType != protocol.SUCCESS {
return errors.New("expected success message but received " + string(header.PacketType))
}
return nil
}
func TriggerHandling(dbconn *db.SSNDB, onions []db.Onion) {
key := dbconn.GetKey()
for _, onion := range onions {
onionconn, err := ConnectToOnion(onion.Onion)
/*if err != nil {
logger.Warning(fmt.Sprintf("could not conect to %s addr (TRIGGER)", onion.Onion))
*/
if err == nil {
err = onionconn.Auth(key)
if err != nil {
logger.Warning(fmt.Sprintf("authentication fail: to %s addr (TRIGGER)", onion.Onion))
} else {
err = onionconn.Trigger()
if err != nil {
logger.Warning(fmt.Sprintf("could not send a TRIGGER to %s addr", onion.Onion))
}
}
onionconn.Close()
}
}
}
func ContactRequestHandling(contact *db.Contact, myOnion *db.Onion) error {
if contact == nil {
return errors.New("nil argument")
}
logger.Debug(fmt.Sprint("SEND CONTACT REQUEST (", contact.Alias, ")"))
onionconn, err := ConnectToOnion(contact.Onion.Onion)
if err != nil { //logger.ConditionalWarning(err, fmt.Sprintf("could not conect to %s addr", contact.Onion.Onion)) {
return err
}
defer onionconn.Close()
cr := protocol.ContactRequest{
Message: contact.RequestMessage,
Onion: myOnion.Onion,
}
return onionconn.ContactRequest(cr)
}
func PullHandling(dbconn *db.SSNDB, lastActivity int64, contact *db.Contact, key *rsa.PrivateKey) ([]db.Post, []db.Profile, error) {
posts := []db.Post{}
profiles := []db.Profile{}
if contact == nil || key == nil {
return posts, profiles, errors.New("nil argument")
}
//logger.Debug(fmt.Sprintf("SEND PULL REQUEST (%s): timestamp %d\n", contact.Alias, lastActivity))
onionconn, err := ConnectToOnion(contact.Onion.Onion)
if err != nil { //logger.ConditionalWarning(err, fmt.Sprintf("could not conect to %s addr", contact.Onion.Onion)) {
return posts, profiles, err
}
defer onionconn.Close()
err = onionconn.Auth(key)
if logger.ConditionalWarning(err, "(authentication fail, trying to PULL without AUTH..)") {
onionconn, err = ConnectToOnion(contact.Onion.Onion) // needed for PULL request
if logger.ConditionalWarning(err, "could not conect to onion addr") {
return posts, profiles, err
}
} else {
// auth successful, set contact's status to SUCCESS
if contact.Status != db.SUCCESS {
SetContactToSuccess(contact, dbconn)
}
}
posts, profiles, err = onionconn.Pull(lastActivity)
if logger.ConditionalWarning(err, "client could not PULL") {
return posts, profiles, err
}
logger.Debug(fmt.Sprint("RECEIVED(", len(posts), " POSTS, ", len(profiles), " PROFILES) from ", contact.Alias))
return posts, profiles, nil
}
func SyncAllContacts(key *rsa.PrivateKey) {
dbconn := db.SSNDB{}
dbconn.Init()
//dbconn.LogMode(true)
contacts := []db.Contact{}
dbconn.Where(db.Contact{Status: db.SUCCESS}).Or(db.Contact{Status: db.PENDING}).Or(db.Contact{Status: db.FOLLOWING}).Find(&contacts)
myOnion := dbconn.GetSelfOnion()
// A WaitGroup waits for a collection of goroutines to finish.
var wg sync.WaitGroup
wg.Add(len(contacts)) // set the WaitGroup counter.
for _, contact := range contacts {
go func(dbconn *db.SSNDB, key *rsa.PrivateKey, contact db.Contact, wg *sync.WaitGroup) {
dbconn.Model(&contact).Related(&contact.Onion, "OnionId")
lastActivity := dbconn.GetContactsLastActivity(&contact)
posts, profiles, err := PullHandling(dbconn, lastActivity, &contact, key)
if err == nil {
dbconn.AddOrUpdateProfiles(profiles)
dbconn.AddOrUpdatePosts(posts)
for _, post := range posts {
TriggerOnReceivingComment(dbconn, &post)
}
}
if db.PENDING == contact.Status {
ContactRequestHandling(&contact, &myOnion)
/*if err != nil {
logger.Warning(fmt.Sprint(err))
} */
}
wg.Done() // decrements the WaitGroup counter.
}(&dbconn, key, contact, &wg)
}
wg.Wait() // blocks until the WaitGroup counter is zero.
dbconn.Close()
}
func TriggerOnReceivingComment(dbconn *db.SSNDB, comment *db.Post) {
if comment.ParentId == 0 {
return
}
var parentPost db.Post
dbconn.First(&parentPost, comment.ParentId)
if parentPost.OriginatorId != dbconn.GetSelfOnion().Id {
return
}
comment.PublishedAt = time.Now()
comment.Published = true
dbconn.Save(comment)
circles := dbconn.GetPostCircles(comment)
TriggerCircles(dbconn, circles)
}
func SetContactToSuccess(contact *db.Contact, dbconn *db.SSNDB) {
// set contact status to success
logger.Info("Updating status of contact " + contact.Alias + " to \"success\"")
contact.Status = db.SUCCESS
dbconn.Save(contact)
// add circle for new contact
circle := db.Circle{Name: contact.Alias, Creator: db.CREATOR_APP}
dbconn.Find(&circle, circle)
if circle.Id != 0 {
logger.Warning("circle \"" + circle.Name + "\" already exists")
return
}
logger.Debug("adding circle \"" + circle.Name + "\"")
dbconn.Create(&circle)
// add contact to circle
logger.Debug("adding user " + contact.Alias + " (" + contact.Nickname + ") to circle \"" + circle.Name + "\"\n")
dbconn.Model(&circle).Association("Contacts").Append(*contact)
var p db.Pending
dbconn.Find(&p, 1)
p.Contacts = true
dbconn.Save(&p)
}
func TriggerCircles(dbconn *db.SSNDB, circles []db.Circle) {
// sync trigger
// we abuse map type as set here, since golang does not have sets...
// we are only interested in the KEY, we ignore the value
self := dbconn.GetSelfOnion()
onionMap := map[db.Onion]bool{}
for _, circle := range circles {
// we need all onions in circle for trigger
var contacts []db.Contact
if circle.Name == "Public" {
dbconn.Find(&contacts)
} else {
dbconn.Model(&circle).Related(&circle.Contacts, "Contacts")
contacts = circle.Contacts
}
for _, contact := range contacts {
var onion db.Onion
dbconn.Model(&contact).Related(&onion, "Onion")
if onion.Id != self.Id {
onionMap[onion] = true
}
}
}
var onionsToTrigger []db.Onion
for onionKey, _ := range onionMap {
onionsToTrigger = append(onionsToTrigger, onionKey)
}
logger.Debug("I am going to trigger the following onions in a goroutine now: ")
logger.Debug(fmt.Sprint(onionsToTrigger))
go TriggerHandling(dbconn, onionsToTrigger)
}
|
/*
Go client for Intuit's Customer Account Data API
*/
package intuit
import (
"encoding/xml"
"fmt"
"github.com/MattNewberry/oauth"
"time"
)
const (
InstitutionXMLNS = "http://schema.intuit.com/platform/fdatafeed/institutionlogin/v1"
ChallengeXMLNS = "http://schema.intuit.com/platform/fdatafeed/challenge/v1"
BaseURL = "https://financialdatafeed.platform.intuit.com/v1/"
GET = "GET"
POST = "POST"
DELETE = "DELETE"
PUT = "PUT"
updateLoginType = 1 + iota
discoverAndAddType
)
var SessionConfiguration *Configuration
type challengeContextType int
type InstitutionLogin struct {
XMLName xml.Name `xml:"InstitutionLogin"`
XMLNS string `xml:"xmlns,attr"`
Credentials Credentials `xml:"credentials,omitempty"`
}
type InstitutionLoginMFA struct {
XMLName xml.Name `xml:"InstitutionLogin"`
XMLNS string `xml:"xmlns,attr"`
ChallengeResponses ChallengeResponses `xml:"challengeResponses"`
}
type Credentials struct {
Credentials []Credential
}
type Credential struct {
XMLName xml.Name `xml:"credential"`
Name string `xml:"name"`
Value string `xml:"value"`
}
type ChallengeResponses struct {
ChallengeResponses []ChallengeResponse
}
type Challenge struct {
Question string
Choices []Choice
}
type ChallengeResponse struct {
XMLName xml.Name `xml:"v11:response"`
XMLNS string `xml:"xmlns:v11,attr"`
Answer interface{} `xml:",innerxml"`
}
type Choice struct {
Value interface{}
Text string
}
type ChallengeSession struct {
InstitutionId string
LoginId string
SessionId string
NodeId string
Challenges []Challenge
Answers []interface{}
contextType challengeContextType
}
type Configuration struct {
CustomerId string
OAuthConsumerKey string
OAuthConsumerSecret string
oAuthToken *oauth.AccessToken
SamlProviderId string
CertificatePath string
}
/*
Configure the client for access to your application.
*/
func Configure(configuration *Configuration) {
SessionConfiguration = configuration
}
/*
Set the customer Id for the current session.
*/
func Scope(id string) {
if SessionConfiguration == nil {
SessionConfiguration = &Configuration{}
}
SessionConfiguration.CustomerId = id
}
/*
Discover new accounts for a customer, returning an MFA response if applicable.
In practice, the most efficient workflow is to cache the Institutions list and pass the username and password keys to this method. Without doing so, fetching the instituion's details will be required.
*/
func DiscoverAndAddAccounts(institutionId string, username string, password string, usernameKey string, passwordKey string) (accounts []interface{}, challengeSession *ChallengeSession, err error) {
userCredential := Credential{Name: usernameKey, Value: username}
passwordCredential := Credential{Name: passwordKey, Value: password}
credentials := Credentials{Credentials: []Credential{userCredential, passwordCredential}}
payload := &InstitutionLogin{Credentials: credentials, XMLNS: InstitutionXMLNS}
data, err := post(fmt.Sprintf("institutions/%v/logins", institutionId), payload, nil, nil)
if err == nil {
// Success
accounts = data.(map[string]interface{})["accounts"].([]interface{})
} else if data != nil {
challengeSession = parseChallengeSession(discoverAndAddType, data, err)
challengeSession.InstitutionId = institutionId
}
return
}
/*
Update login information for an account, returning an MFA response if applicable.
*/
func UpdateLoginAccount(loginId string, username string, password string, usernameKey string, passwordKey string) (accounts []interface{}, challengeSession *ChallengeSession, err error) {
userCredential := Credential{Name: usernameKey, Value: username}
passwordCredential := Credential{Name: passwordKey, Value: password}
credentials := Credentials{Credentials: []Credential{userCredential, passwordCredential}}
payload := &InstitutionLogin{Credentials: credentials, XMLNS: InstitutionXMLNS}
data, err := put(fmt.Sprintf("logins/%v?refresh=true", loginId), payload, nil, nil)
if err == nil {
// Success
accounts = data.(map[string]interface{})["accounts"].([]interface{})
} else if data != nil {
challengeSession = parseChallengeSession(updateLoginType, data, err)
challengeSession.LoginId = loginId
}
return
}
/*
Return all accounts stored for the scoped customer.
*/
func LoginAccounts(loginId string) ([]interface{}, error) {
res, err := get(fmt.Sprintf("logins/%v/accounts", loginId), nil)
data := res.(map[string]interface{})
return data["accounts"].([]interface{}), err
}
/*
When prompted with an MFA challenge, reply with an answer to the challenges.
*/
func RespondToChallenge(session *ChallengeSession) (data interface{}, err error) {
responses := make([]ChallengeResponse, len(session.Challenges))
for i, r := range session.Answers {
responses[i] = ChallengeResponse{Answer: r, XMLNS: ChallengeXMLNS}
}
response := ChallengeResponses{ChallengeResponses: responses}
payload := &InstitutionLoginMFA{ChallengeResponses: response, XMLNS: InstitutionXMLNS}
headers := map[string][]string{
"challengeNodeId": []string{session.NodeId},
"challengeSessionId": []string{session.SessionId},
}
switch session.contextType {
case discoverAndAddType:
data, err = post(fmt.Sprintf("institutions/%v/logins", session.InstitutionId), payload, nil, headers)
case updateLoginType:
data, err = put(fmt.Sprintf("logins/%v", session.LoginId), payload, nil, headers)
}
return
}
/*
Return all accounts stored for the scoped customer.
*/
func Accounts() ([]interface{}, error) {
res, err := get("accounts", nil)
data := res.(map[string]interface{})
return data["accounts"].([]interface{}), err
}
/*
Return a specific account for the scoped customer, given it's Id.
*/
func Account(accountId string) (map[string]interface{}, error) {
res, err := get(fmt.Sprintf("accounts/%s", accountId), nil)
data := res.(map[string]interface{})
account := data["accounts"].([]interface{})
return account[0].(map[string]interface{}), err
}
/*
Get all transactions for an account, filtered by the given start and end times.
*/
func Transactions(accountId string, start time.Time, end time.Time) (map[string]interface{}, error) {
params := make(map[string]string)
const timeFormat = "2006-01-02"
params["txnStartDate"] = start.Format(timeFormat)
params["tnxEndDate"] = end.Format(timeFormat)
res, err := get(fmt.Sprintf("accounts/%s/transactions", accountId), params)
var data map[string]interface{}
if err == nil {
data = res.(map[string]interface{})
}
return data, err
}
/*
Retrieve all known institutions.
Given the volume of institutions supported, this call can be very time consuming.
*/
func Institutions() ([]interface{}, error) {
res, err := get("institutions", nil)
data := res.(map[string]interface{})
all := data["institution"].([]interface{})
return all, err
}
/*
Retrieve an institution's detailed information.
*/
func Institution(institutionId string) (data map[string]interface{}, err error) {
res, err := get(fmt.Sprintf("institutions/%s", institutionId), nil)
if res != nil {
data = res.(map[string]interface{})
}
return
}
/*
Delete the scoped customer and all related accounts.
*/
func DeleteCustomer() error {
_, err := request(DELETE, "customers", "", nil, nil)
return err
}
/*
Delete an account for the scoped customer.
*/
func DeleteAccount(accountId string) error {
_, err := request(DELETE, "accounts/"+accountId, "", nil, nil)
return err
}
func parseChallengeSession(contextType challengeContextType, data interface{}, err error) *ChallengeSession {
challengeData := data.(map[string]interface{})
httpError := err.(oauth.HTTPExecuteError)
headers := httpError.ResponseHeaders
var challengeSession = &ChallengeSession{contextType: contextType}
challengeSession.SessionId = headers.Get("Challengesessionid")
challengeSession.NodeId = headers.Get("Challengenodeid")
challengeSession.Challenges = make([]Challenge, 0)
challenges := challengeData["challenge"].([]interface{})
for _, c := range challenges {
chal := c.(map[string]interface{})
for _, v := range chal {
vData := v.([]interface{})
challenge := Challenge{}
for i, val := range vData {
if i == 0 {
challenge.Question = val.(string)
challenge.Choices = make([]Choice, 0)
} else {
cData := val.(map[string]interface{})
choice := Choice{Value: cData["val"].(string), Text: cData["text"].(string)}
challenge.Choices = append(challenge.Choices, choice)
}
}
challengeSession.Challenges = append(challengeSession.Challenges, challenge)
}
}
return challengeSession
}
|
package gwfunc
import (
assert2 "github.com/stretchr/testify/assert"
"testing"
"time"
)
func TestExec_Normal(t *testing.T) {
var f = func() {
time.Sleep(1 * time.Second)
}
ok := Timeout(f, 5*time.Second)
assert2.False(t, ok)
}
func TestExec_Timeout(t *testing.T) {
var f = func() {
time.Sleep(5 * time.Second)
}
ok := Timeout(f, 1*time.Second)
assert2.True(t, ok)
}
func BenchmarkExec_Normal(b *testing.B) {
var f = func() {
time.Sleep(1 * time.Nanosecond)
}
for i := 0; i < b.N; i++ {
_ = Timeout(f, 1*time.Nanosecond)
}
}
func BenchmarkExec_Timeout(b *testing.B) {
var f = func() {
time.Sleep(5 * time.Nanosecond)
}
for i := 0; i < b.N; i++ {
_ = Timeout(f, 1*time.Nanosecond)
}
}
|
/*
*Copyright (c) 2019-2021, Alibaba Group Holding Limited;
*Licensed under the Apache License, Version 2.0 (the "License");
*you may not use this file except in compliance with the License.
*You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*Unless required by applicable law or agreed to in writing, software
*distributed under the License is distributed on an "AS IS" BASIS,
*WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*See the License for the specific language governing permissions and
*limitations under the License.
*/
package events
// EventBody
/**
* @Title: 上报日志的请求消息体
**/
type EventBody struct {
// 事件发生时业务域内唯一ID
EventId string `json:"eventId"`
// 事件规则编码
EventCode EventCode `json:"eventCode"`
// 来源
Source Source `json:"source"`
// 事件的级别
// - CRITICAL : "不可逆错误,等同系统宕机"
// - ERROR : "系统错误,流程还能继续"
// - WARN : "不影响系统正常流程"
// - INFO : "提示信息"
Level EventLevel `json:"level"`
// 事件发生时间 毫秒级时间戳
Time int64 `json:"time"`
// 事件内容,json格式
Body Body `json:"body"`
}
type Body struct {
// 描述,告警时会使用
Describe string `json:"describe"`
}
type Source struct {
// 资源类型
ResourceType string `json:"resourceType"`
// 实例Name
InsName string `json:"insName"`
// 发现问题的组件类型,英文编码(各组件自己命名)
From string `json:"from"`
// 发生的IP地址,如果拿不到发生的IP地址,可以置空。
Ip string `json:"ip"`
}
|
package timer
type TimerManager interface {
GetTimerTasks(blockHeight int64) (uint64, error)
}
|
//
// Copyright (c) SAS Institute Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package server
import (
"context"
"encoding/json"
"errors"
"net/http"
"github.com/sassoftware/relic/v7/internal/httperror"
"github.com/sassoftware/relic/v7/internal/zhttp"
"github.com/sassoftware/relic/v7/signers/sigerrors"
"github.com/sassoftware/relic/v7/token"
)
func handleFunc(f func(http.ResponseWriter, *http.Request) error) http.HandlerFunc {
return func(rw http.ResponseWriter, req *http.Request) {
ctx := req.Context()
if srv, ok := ctx.Value(http.ServerContextKey).(*http.Server); ok && srv.WriteTimeout > 0 {
// timeout request context when WriteTimeout is reached
ctx, cancel := context.WithTimeout(req.Context(), srv.WriteTimeout)
defer cancel()
req = req.WithContext(ctx)
}
err := f(rw, req)
if err == nil {
return
}
if resp, ok := err.(http.Handler); ok {
resp.ServeHTTP(rw, req)
} else if h := errToProblem(err); h != nil {
h.ServeHTTP(rw, req)
} else {
zhttp.WriteUnhandledError(rw, req, err, "")
}
}
}
func errToProblem(err error) http.Handler {
if e := new(token.KeyUsageError); errors.As(err, e) {
return httperror.Problem{
Status: http.StatusBadRequest,
Type: httperror.ProblemKeyUsage,
Title: "Incorrect Key Usage",
Detail: e.Error(),
}
} else if e := new(sigerrors.ErrNoCertificate); errors.As(err, e) {
return httperror.NoCertificateError(e.Type)
}
return nil
}
func writeJSON(rw http.ResponseWriter, data interface{}) error {
blob, err := json.Marshal(data)
if err != nil {
return err
}
rw.Header().Set("Content-Type", "application/json")
_, err = rw.Write(blob)
return err
}
|
package requesttotree
import (
"encoding/json"
"log"
"reflect"
)
// NewTree function return new empty tree.
func NewTree() *Tree {
return &Tree{}
}
// NewEmptyNode function return new empty node
// it can be either root node or normal node.
func NewEmptyNode(root bool) *Node {
if root {
return &Node{
name: RootName,
isRoot: root,
}
}
return &Node{}
}
const RootName = "root"
type Tree struct {
node *Node
source string
}
// GetNodeByName function find the node based on given
// node name.
func (t *Tree) GetNodeByName(name string) interface{} {
result := t.getNodeByName(name, t.GetRootNode())
return result
}
// getNodeByName function find the node based on given
// node name.
func (t *Tree) getNodeByName(name string, node *Node) interface{} {
if node.name == name {
return node
}
for _, e := range node.children {
if result := t.getNodeByName(name, e); result != nil {
return result
}
}
return nil
}
// GetRootNode function return root node
// of tree.
func (t *Tree) GetRootNode() *Node {
return t.node
}
type Node struct {
name string
leaf bool
value interface{}
nodeType reflect.Type
children []*Node
parent *Node
hasChildren bool
isRoot bool
}
//Load function load the data in tree format.
func (t *Tree) Load(raw []byte) *Tree {
mapData := make(map[string]interface{})
if err := json.Unmarshal(raw, &mapData); err != nil {
log.Println(err.Error())
}
nodes := make([]*Node, 0)
rootNode := NewEmptyNode(true)
for k, v := range mapData {
node := NewEmptyNode(false)
node.createNode(k, reflect.TypeOf(v), v, rootNode)
nodes = append(nodes, node)
}
rootNode.children = nodes
rootNode.hasChildren = true
t.source = string(raw)
t.node = rootNode
return t
}
// isRootNode function return true if
// selected node is root node.
func (n *Node) isRootNode() bool {
return n.isRoot
}
// hasChild function return true if selected
// node has child nodes or not
func (n *Node) hasChild() bool {
return n.hasChildren
}
// GetParentNode function return parent node of selected
// node.
func (n *Node) GetParentNode() *Node {
return n.parent
}
func (n *Node) createNode(key string, t reflect.Type, value interface{}, parent *Node) *Node {
n.name = string(key)
switch t.Kind().String() {
case "map", "slice":
n.hasChildren = true
n.leaf = false
break
case "string", "bool", "int", "float":
default:
n.hasChildren = false
n.leaf = true
}
n.value = value
n.nodeType = t
n.parent = parent
if n.hasChildren {
tyy := reflect.TypeOf(value).Kind().String()
if tyy == "slice" {
for _, ee := range value.([]interface{}) {
ch := ee.(map[string]interface{})
for ty, e := range ch {
node := NewEmptyNode(false)
cn := node.createNode(ty, reflect.TypeOf(e), e, n)
n.children = append(n.children, cn)
}
}
return n
}
ch := value.(map[string]interface{})
for ty, e := range ch {
node := NewEmptyNode(false)
cn := node.createNode(ty, reflect.TypeOf(e), e, n)
n.children = append(n.children, cn)
}
}
return n
}
|
/*
* Created on Mon Jan 13 2020 10:17:54
* Author: WuLC
* EMail: liangchaowu5@gmail.com
*/
/*
* Created on Mon Jan 13 2020 10:17:54
* Author: WuLC
* EMail: liangchaowu5@gmail.com
*/
func makeConnected(n int, connections [][]int) int {
if len(connections) < n - 1 {
return -1
}
graph := make(map[int][]int)
for _, k := range connections {
graph[k[0]] = append(graph[k[0]], k[1])
graph[k[1]] = append(graph[k[1]], k[0])
}
visited := []int{}
for i := 0; i < n; i++ {
visited = append(visited, 0)
}
result := 0
for i:= 0; i < n; i++ {
result += dfs(i, graph, visited)
}
return result - 1
}
func dfs(i int, graph map[int][]int, visited []int) int {
if visited[i] == 1 {
return 0
}
visited[i] = 1
for _, v := range graph[i] {
if visited[v] == 1 {
continue
}
dfs(v, graph, visited)
}
return 1
}
|
// noinspection GoStructTag
package test_select
// goDao: generate
// language=PostgreSQL
type GoDao struct {
Add func(a, b int64) (int64, error) `
select ($1::int8 + $2::int8)::int8 as sum;`
}
//go:generate go run ../..
|
package token
import "cointhink/db"
import "cointhink/proto"
import "log"
func FindByAccountId(accountId string, algorunId string) (*proto.Token, error) {
log.Printf("token.FindByAccount accountId %+v algorunId %+v", accountId, algorunId)
item := &proto.Token{}
err := db.D.Handle.Get(item,
"select "+Columns+" from "+Table+" where account_id = $1 and algorun_id = $2",
accountId, algorunId)
if err != nil {
return nil, err
} else {
return item, nil
}
}
func FindByToken(token_str string) (*proto.Token, error) {
item := &proto.Token{}
err := db.D.Handle.Get(item,
"select "+Columns+" from "+Table+" where token = $1", token_str)
if err != nil {
return item, err
} else {
return item, nil
}
}
|
package Week_03
import (
"fmt"
"testing"
)
var res = make([][]int, 0)
func permute(nums []int) [][]int {
r := []int{}
backtrack(nums, r)
return res
}
func backtrack(nums []int, r []int) {
if len(r) == len(nums) {
res = append(res, r)
return
}
for i := 0; i < len(nums); i++ {
if inSlice(r, nums[i]) {
continue
}
r = append(r, nums[i])
backtrack(nums, r)
r = r[:len(r)-1]
}
}
func inSlice(s []int, e int) bool {
for _, v := range s {
if v == e {
return true
}
}
return false
}
func Test_a02(t *testing.T) {
nums := []int{1, 2, 3}
fmt.Println(permute(nums))
}
|
package ravendb
import (
"errors"
"fmt"
"reflect"
)
// TODO: cleanup, possibly rethink entityToJSON
type entityToJSON struct {
session *InMemoryDocumentSessionOperations
missingDictionary map[interface{}]map[string]interface{}
//private final Map<Object, Map<string, Object>> _missingDictionary = new TreeMap<>((o1, o2) -> o1 == o2 ? 0 : 1);
}
// All the listeners for this session
func newEntityToJSON(session *InMemoryDocumentSessionOperations) *entityToJSON {
return &entityToJSON{
session: session,
}
}
func (e *entityToJSON) getMissingDictionary() map[interface{}]map[string]interface{} {
return e.missingDictionary
}
func convertEntityToJSON(entity interface{}, documentInfo *documentInfo) map[string]interface{} {
// maybe we don't need to do anything?
if v, ok := entity.(map[string]interface{}); ok {
return v
}
jsonNode := structToJSONMap(entity)
entityToJSONWriteMetadata(jsonNode, documentInfo)
tryRemoveIdentityProperty(jsonNode)
return jsonNode
}
// TODO: verify is correct, write a test
func isTypeObjectNode(entityType reflect.Type) bool {
var v map[string]interface{}
typ := reflect.ValueOf(v).Type()
return typ.String() == entityType.String()
}
// assumes v is ptr-to-struct and result is ptr-to-ptr-to-struct
func setInterfaceToValue(result interface{}, v interface{}) (err error) {
// this catches a panic that reflect.Value.Set() can produce
// and turns it into an error
// TODO: a cleaner way would be to check instead suppressing a panic by e.g.
// lifting implementation of func directlyAssignable(T, V *rtype) bool {
// from reflect package
defer func() {
if res := recover(); res != nil {
fmt.Printf("setInterfaceToValue: panic, res: %v %T\n", res, res)
if s, ok := res.(string); ok {
err = errors.New(s)
} else if panicErr, ok := res.(error); ok {
err = panicErr
} else {
err = fmt.Errorf("%v", res)
}
}
}()
out := reflect.ValueOf(result)
outt := out.Type()
if outt.Kind() == reflect.Ptr && out.IsNil() {
out.Set(reflect.New(outt.Elem()))
}
if outt.Kind() == reflect.Ptr {
out = out.Elem()
//outt = out.Type()
//outk = out.Kind()
}
vin := reflect.ValueOf(v)
if !out.CanSet() {
return fmt.Errorf("cannot set out %s\n", out.String())
}
out.Set(vin)
return
}
// makes a copy of a map and returns a pointer to it
func mapDup(m map[string]interface{}) *map[string]interface{} {
res := map[string]interface{}{}
for k, v := range m {
res[k] = v
}
return &res
}
// ConvertToEntity2 converts document to a value result, matching type of result
func (e *entityToJSON) convertToEntity2(result interface{}, id string, document map[string]interface{}) error {
if _, ok := result.(**map[string]interface{}); ok {
return setInterfaceToValue(result, mapDup(document))
}
if _, ok := result.(map[string]interface{}); ok {
// TODO: is this code path ever executed?
return setInterfaceToValue(result, document)
}
entityType := reflect.TypeOf(result)
entity, err := makeStructFromJSONMap(entityType, document)
if err != nil {
// fmt.Printf("makeStructFromJSONMap() failed with %s\n. Wanted type: %s, document: %v\n", err, entityType, document)
return err
}
trySetIDOnEntity(entity, id)
//fmt.Printf("result is: %T, entity is: %T\n", result, entity)
if entity == nil {
return newIllegalStateError("decoded entity is nil")
}
return setInterfaceToValue(result, entity)
}
// Converts a json object to an entity.
// TODO: remove in favor of entityToJSONConvertToEntity
func (e *entityToJSON) convertToEntity(entityType reflect.Type, id string, document map[string]interface{}) (interface{}, error) {
if isTypeObjectNode(entityType) {
return document, nil
}
entity, err := makeStructFromJSONMap(entityType, document)
if err != nil {
return nil, err
}
trySetIDOnEntity(entity, id)
return entity, nil
}
func entityToJSONConvertToEntity(entityType reflect.Type, id string, document map[string]interface{}) (interface{}, error) {
if isTypeObjectNode(entityType) {
return document, nil
}
entity, err := makeStructFromJSONMap(entityType, document)
if err != nil {
return nil, err
}
trySetIDOnEntity(entity, id)
return entity, nil
}
func entityToJSONWriteMetadata(jsonNode map[string]interface{}, documentInfo *documentInfo) {
if documentInfo == nil {
return
}
setMetadata := false
metadataNode := map[string]interface{}{}
metadata := documentInfo.metadata
metadataInstance := documentInfo.metadataInstance
if len(metadata) > 0 {
setMetadata = true
for property, v := range metadata {
v = deepCopy(v)
metadataNode[property] = v
}
} else if metadataInstance != nil {
setMetadata = true
for key, value := range metadataInstance.EntrySet() {
metadataNode[key] = value
}
}
collection := documentInfo.collection
if collection != "" {
setMetadata = true
metadataNode[MetadataCollection] = collection
}
if setMetadata {
jsonNode[MetadataKey] = metadataNode
}
}
/*
//TBD public static object ConvertToEntity(Type entityType, string id, BlittableJsonReaderObject document, DocumentConventions conventions)
}
*/
func tryRemoveIdentityProperty(document map[string]interface{}) bool {
delete(document, IdentityProperty)
return true
}
/*
public static Object convertToEntity(Class<?> entityClass, String id, ObjectNode document, DocumentConventions conventions) {
try {
Object defaultValue = InMemoryDocumentSessionOperations.getDefaultValue(entityClass);
Object entity = defaultValue;
String documentType = conventions.getJavaClass(id, document);
if (documentType != null) {
Class<?> clazz = Class.forName(documentType);
if (clazz != null && entityClass.isAssignableFrom(clazz)) {
entity = conventions.getEntityMapper().treeToValue(document, clazz);
}
}
if (entity == null) {
entity = conventions.getEntityMapper().treeToValue(document, entityClass);
}
return entity;
} catch (Exception e) {
throw new IllegalStateException("Could not convert document " + id + " to entity of type " + entityClass);
}
}
*/
|
// SPDX-License-Identifier: MIT
package openapi
import "github.com/caixw/apidoc/v7/core"
// 数据验证接口
type sanitizer interface {
Sanitize() *core.Error
}
|
package main
import (
"fmt"
"net/http"
"os"
"github.com/porter-dev/porter/internal/config"
)
func main() {
appConf := config.FromEnv()
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/api/livez", appConf.Server.Port))
if err != nil || resp.StatusCode >= http.StatusBadRequest {
os.Exit(1)
}
resp, err = http.Get(fmt.Sprintf("http://localhost:%d/api/readyz", appConf.Server.Port))
if err != nil || resp.StatusCode >= http.StatusBadRequest {
os.Exit(1)
}
}
|
package internal
import (
"log"
"os"
"path/filepath"
)
func Install(curdir, home string) {
index, err := OpenIndex(curdir)
if err != nil {
log.Fatal(err)
}
for _, file := range index.ListFiles() {
path := index.Get(file)
err := os.MkdirAll(filepath.Join(home, filepath.Dir(path)), os.ModePerm)
if err != nil {
log.Fatal("error mkdir", err)
}
MoveFile(filepath.Join(curdir, file), filepath.Join(home, path))
}
}
|
package manager
import (
"testing"
"github.com/xuperchain/xupercore/kernel/contract"
_ "github.com/xuperchain/xupercore/kernel/contract/kernel"
"github.com/xuperchain/xupercore/kernel/contract/mock"
"github.com/xuperchain/xupercore/kernel/contract/sandbox"
)
var contractConfig = &contract.ContractConfig{
Xkernel: contract.XkernelConfig{
Enable: true,
Driver: "default",
},
LogDriver: mock.NewMockLogger(),
}
func TestCreate(t *testing.T) {
th := mock.NewTestHelper(contractConfig)
defer th.Close()
}
func TestCreateSandbox(t *testing.T) {
th := mock.NewTestHelper(contractConfig)
defer th.Close()
m := th.Manager()
r := sandbox.NewMemXModel()
state, err := m.NewStateSandbox(&contract.SandboxConfig{
XMReader: r,
})
if err != nil {
t.Fatal(err)
}
state.Put("test", []byte("key"), []byte("value"))
if string(state.RWSet().WSet[0].Value) != "value" {
t.Error("unexpected value")
}
}
func TestInvoke(t *testing.T) {
th := mock.NewTestHelper(contractConfig)
defer th.Close()
m := th.Manager()
m.GetKernRegistry().RegisterKernMethod("$hello", "Hi", new(helloContract).Hi)
resp, err := th.Invoke("xkernel", "$hello", "Hi", map[string][]byte{
"name": []byte("xuper"),
})
if err != nil {
t.Fatal(err)
}
t.Logf("%s", resp.Body)
}
type helloContract struct {
}
func (h *helloContract) Hi(ctx contract.KContext) (*contract.Response, error) {
name := ctx.Args()["name"]
ctx.Put("test", []byte("k1"), []byte("v1"))
return &contract.Response{
Body: []byte("hello " + string(name)),
}, nil
}
|
package main
func main() {
var p []int
print(p)
}
|
package services
import (
"log"
"models"
"github.com/gosimple/slug"
"github.com/microcosm-cc/bluemonday"
"models/status"
"strings"
"config"
"path/filepath"
"utils"
"repository"
"controllers/viewmodels"
"fmt"
"github.com/dustin/go-humanize"
"strconv"
)
type ArticleService struct {
repo *repository.Repository
cs *CategoryService
ts *TaxonomyService
ps *PictureService
}
func NewArticleService() *ArticleService {
return &ArticleService{
repo: repository.NewRepo(),
cs: NewCategoryService(),
ts: NewTaxonomyService(),
ps: NewPictureService(),
}
}
func (s *ArticleService) SaveArticle(article *models.NewArticle) (*models.Article, error) {
title := strings.TrimSpace(article.Title)
art := &models.Article{
Title: title,
Slug: slug.Make(title),
Content: bluemonday.UGCPolicy().Sanitize(article.Content),
Excerpt: article.Excerpt,
Status: article.Status.String(),
AuthorID: utils.ToUInt32(article.Author.ID),
LastEditorID: utils.ToUInt32(article.Author.ID),
}
tx := s.repo.DB().Begin()
if article.HasFeaturedImage() {
picture, err := s.ps.CreateAndSavePicture(article.FeaturedImage)
if err != nil {
tx.Rollback()
log.Printf("Error saving article Picture: %v", err)
return nil, err
}
art.Picture = picture
art.Picture.AuthorID = utils.ToUInt32(article.Author.ID)
art.Picture.LastEditorID = utils.ToUInt32(article.Author.ID)
}
if err := tx.Create(art).Error; err != nil {
tx.Rollback()
log.Printf("Error creating Article: %v", err)
filename := filepath.Join(config.BasePath(), art.Picture.Url)
if err := s.ps.RemoveFile(filename); err != nil {
tx.Rollback()
return nil, err
}
return nil, err
}
for _, id := range article.Categories {
ac := &repository.ArticleCategory{
ArticleID: utils.ToUInt32(art.ID),
CategoryID: utils.ToUInt32(id)}
if err := tx.Save(ac).Error; err != nil {
tx.Rollback()
log.Printf("Error saving ArticleCategory: %v", err)
return nil, err
}
}
for _, id := range article.Taxonomies {
at := &repository.ArticleTaxonomy{
ArticleID: utils.ToUInt32(art.ID),
TaxonomyID: utils.ToUInt32(id)}
if err := tx.Create(at).Error; err != nil {
tx.Rollback()
log.Printf("Error saving ArticleTaxonomy: %v", err)
return nil, err
}
}
tx.Commit()
return art, nil
}
func (s *ArticleService) GetArticleForSlug(slug string) (*models.Article, error) {
filter := &models.Article{
Slug:slug,
Status:status.Draft.String(),
}
var article models.Article
if err := s.repo.FindOne(filter, &article); err != nil {
log.Printf("Error finding article for slug %s: %v", slug, err)
return nil, err
}
return &article, nil
}
func (s *ArticleService) GetArticles(query models.Query) ([]*models.Article, error) {
var articles []*models.Article
filter := &models.Article{
Status:status.Draft.String(),
}
if err := s.repo.FindByQueryAndFilter(query, filter, &articles); err != nil {
log.Printf("Error finding articles with Query %+v: %v", query, err)
return nil, err
}
return articles, nil
}
func (s *ArticleService) GetArticlesByQuery(query models.Query) ([]*viewmodels.Article, error) {
var result []repository.ArticleResult
err := s.repo.DB().Table(repository.TableViewArticles).
Limit(query.Total).
Offset(query.Offset).
Order(fmt.Sprintf("a.%s", query.Sort)).
Find(&result).Error
if err != nil {
log.Printf("Error finding Articles by Query %+v: %v", query, err)
return nil, err
}
articles := []*viewmodels.Article{}
for _, a := range result {
art := &viewmodels.Article{}
art.ID = strconv.Itoa(int(a.ArticleID))
art.Title = a.Title
art.Slug = a.Slug
art.Content = a.Content
art.Excerpt = a.Excerpt
art.FeaturedImage.ID = strconv.Itoa(int(a.FeaturedImageID))
art.FeaturedImage.Caption = a.FeaturedImageCaption
art.FeaturedImage.AltText = a.FeaturedImageCaption
art.FeaturedImage.Url = a.FeaturedImageUrl
art.Author.ID = strconv.Itoa(int(a.AuthorID))
art.Author.FullName = a.AuthorName
art.Author.NickName = a.AuthorNickName
art.Author.Website = a.AuthorWebsite
art.Author.Biography = a.AuthorBio
art.Author.ProfilePictureUrl = a.AuthorProfilePicUrl
art.Editor.ID = strconv.Itoa(int(a.EditorID))
art.Editor.FullName = a.EditorName
art.Editor.NickName = a.EditorNickName
art.Categories = toIDNameSlugs(a.Categories)
art.Taxonomies = toIDNameSlugs(a.Taxonomies)
art.Status = a.Status
art.CreatedAt = humanize.Time(a.CreatedAt)
art.UpdatedAt = humanize.Time(a.UpdatedAt)
articles = append(articles, art)
}
return articles, nil
}
func (s *ArticleService) GetArticlesByFilter(f repository.Filter) ([]*viewmodels.Article, error) {
t := s.repo.DB().
Table(repository.TableViewArticles).
Limit(f.Total).
Offset(f.Offset)
if len(f.Author) > 0 {
t = t.Where("`vw_articles`.`author_id` IN (?)", f.Author)
}
if len(f.Editor) > 0 {
t = t.Where("`vw_articles`.`editor_id` IN (?)", f.Editor)
}
if len(f.Category) > 0 {
t = t.Where("`vw_articles`.`categories` IN (?)", f.Category)
}
if len(f.Taxonomy) > 0 {
t = t.Where("`vw_articles`.`taxonomies` IN (?)", f.Taxonomy)
}
if f.Status != "" {
t = t.Where("`vw_articles`.`status` = ?", f.Status)
}
var result []repository.ArticleResult
if err := t.Find(&result).Error; err != nil {
log.Printf("Error finding Articles by Filter %+v: %v", f, err)
return nil, err
}
articles := []*viewmodels.Article{}
for _, a := range result {
art := &viewmodels.Article{}
art.ID = strconv.Itoa(int(a.ArticleID))
art.Title = a.Title
art.Slug = a.Slug
art.Content = a.Content
art.Excerpt = a.Excerpt
art.FeaturedImage.ID = strconv.Itoa(int(a.FeaturedImageID))
art.FeaturedImage.Caption = a.FeaturedImageCaption
art.FeaturedImage.AltText = a.FeaturedImageCaption
art.FeaturedImage.Url = a.FeaturedImageUrl
art.Author.ID = strconv.Itoa(int(a.AuthorID))
art.Author.FullName = a.AuthorName
art.Author.NickName = a.AuthorNickName
art.Author.Website = a.AuthorWebsite
art.Author.Biography = a.AuthorBio
art.Author.ProfilePictureUrl = a.AuthorProfilePicUrl
art.Editor.ID = strconv.Itoa(int(a.EditorID))
art.Editor.FullName = a.EditorName
art.Editor.NickName = a.EditorNickName
art.Categories = toIDNameSlugs(a.Categories)
art.Taxonomies = toIDNameSlugs(a.Taxonomies)
art.Status = a.Status
art.CreatedAt = humanize.Time(a.CreatedAt)
art.UpdatedAt = humanize.Time(a.UpdatedAt)
articles = append(articles, art)
}
return articles, nil
}
func (s *ArticleService) GetArticleBySlug(slug string) (*viewmodels.Article, error) {
var result repository.ArticleResult
if err := s.repo.DB().
Table(repository.TableViewArticles).
Where("`vw_articles`.`slug` = ?", slug).
First(&result).Error; err != nil {
log.Printf("Error finding Article by slug %s: %v", slug, err)
return nil, err
}
article := &viewmodels.Article{}
article.ID = strconv.Itoa(int(result.ArticleID))
article.Title = result.Title
article.Slug = result.Slug
article.Content = result.Content
article.Excerpt = result.Excerpt
article.FeaturedImage.ID = strconv.Itoa(int(result.FeaturedImageID))
article.FeaturedImage.Caption = result.FeaturedImageCaption
article.FeaturedImage.AltText = result.FeaturedImageCaption
article.FeaturedImage.Url = result.FeaturedImageUrl
article.Author.ID = strconv.Itoa(int(result.AuthorID))
article.Author.FullName = result.AuthorName
article.Author.NickName = result.AuthorNickName
article.Author.Website = result.AuthorWebsite
article.Author.Biography = result.AuthorBio
article.Author.ProfilePictureUrl = result.AuthorProfilePicUrl
article.Editor.ID = strconv.Itoa(int(result.EditorID))
article.Editor.FullName = result.EditorName
article.Editor.NickName = result.EditorNickName
article.Categories = toIDNameSlugs(result.Categories)
article.Taxonomies = toIDNameSlugs(result.Taxonomies)
article.Status = result.Status
article.CreatedAt = humanize.Time(result.CreatedAt)
article.UpdatedAt = humanize.Time(result.UpdatedAt)
return article, nil
}
func (s *ArticleService) GetArticleByID(id uint32) (*viewmodels.Article, error) {
var result repository.ArticleResult
if err := s.repo.DB().
Table("vw_articles").
Where("`vw_articles`.`article_id` = ?", id).
First(&result).Error; err != nil {
log.Printf("Error finding Article by ID %s: %v", id, err)
return nil, err
}
article := &viewmodels.Article{}
article.ID = strconv.Itoa(int(result.ArticleID))
article.Title = result.Title
article.Slug = result.Slug
article.Content = result.Content
article.Excerpt = result.Excerpt
article.FeaturedImage.ID = strconv.Itoa(int(result.FeaturedImageID))
article.FeaturedImage.Name = result.FeaturedImageName
article.FeaturedImage.Caption = result.FeaturedImageCaption
article.FeaturedImage.AltText = result.FeaturedImageDesc
article.FeaturedImage.Width = result.FeaturedImageWidth
article.FeaturedImage.Height = result.FeaturedImageHeight
article.FeaturedImage.Size = humanize.Bytes(uint64(result.FeaturedImageSize))
article.FeaturedImage.Url = result.FeaturedImageUrl
article.Author.ID = strconv.Itoa(int(result.AuthorID))
article.Author.FullName = result.AuthorName
article.Author.NickName = result.AuthorNickName
article.Author.Website = result.AuthorWebsite
article.Author.Biography = result.AuthorBio
article.Author.ProfilePictureUrl = result.AuthorProfilePicUrl
article.Editor.ID = strconv.Itoa(int(result.EditorID))
article.Editor.FullName = result.EditorName
article.Editor.NickName = result.EditorNickName
article.Categories = toIDNameSlugs(result.Categories)
article.Taxonomies = toIDNameSlugs(result.Taxonomies)
article.Status = result.Status
article.CreatedAt = humanize.Time(result.CreatedAt)
article.UpdatedAt = humanize.Time(result.UpdatedAt)
return article, nil
}
func toIDNameSlug(s string) struct{ID, Name, Slug string} {
result := struct{ID, Name, Slug string}{}
pair := strings.Split(s, ":")
if len(pair) == 3 {
result.ID = pair[0]
result.Name = pair[1]
result.Slug = pair[2]
}
return result
}
func toIDNameSlugs(s string) []struct{ ID, Name, Slug string} {
result := []struct{ID, Name, Slug string}{}
for _, s := range strings.Split(s, ",") {
result = append(result, toIDNameSlug(s))
}
return result
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package storage
import (
"bytes"
"fmt"
"sort"
"unsafe"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts/ctpb"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
)
// SingleStorage stores and manages closed timestamp updates originating from a
// single source (i.e. node). A SingleStorage internally maintains multiple
// buckets for historical closed timestamp information. The reason for this is
// twofold:
//
// 1. The most recent closed timestamp update is also the hardest to prove a
// read for, since it comes with larger minimum lease applied indexes. In
// situations in which followers are lagging behind with their command
// application, this could lead to a runaway scenario, in which a closed
// timestamp update can never be used until it is replaced by a new one, which
// in turn also will never be used, etc. Instead, a SingleStorage keeps some
// amount of history and upstream systems can try to prove a follower read using
// an older closed timestamp instead.
//
// 2. Follower reads can be used to implement recovery of a consistent
// cluster-wide snapshot after catastrophic loss of quorum. To do this, the
// mechanism must locate at least one replica of every range in the cluster, and
// for each range find the largest possible timestamp at which follower reads
// are possible among the surviving replicas. Of all these per-range timestamps,
// the smallest can be used to read from all ranges, resulting in a consistent
// snapshot. This makes it crucial that every replica can serve at least some
// follower reads, even when regularly outpaced by the closed timestamp
// frontier. Emitted MLAIs may never even be proposed to Raft in the event of
// an ill-timed crash, and so historic information is invaluable.
//
// TODO(tschottdorf): revisit whether this shouldn't be a concrete impl instead,
// with only the buckets abstracted out.
type SingleStorage interface {
fmt.Stringer
// VisitAscending walks through the buckets of the storage in ascending
// closed timestamp order, until the closure returns true (or all buckets
// have been visited).
VisitAscending(func(ctpb.Entry) (done bool))
// VisitDescending walks through the buckets of the storage in descending
// closed timestamp order, until the closure returns true (or all buckets
// have been visited).
VisitDescending(func(ctpb.Entry) (done bool))
// Add adds a new Entry to this storage. The entry is added to the most
// recent bucket and remaining buckets are rotated as indicated by their age
// relative to the newly added Entry.
Add(ctpb.Entry)
// Clear removes all Entries from this storage.
Clear()
}
type entry struct {
SingleStorage
}
// MultiStorage implements the closedts.Storage interface.
type MultiStorage struct {
// constructor creates a SingleStorage whenever one is initialized for a new
// NodeID.
constructor func() SingleStorage
// TODO(tschottdorf): clean up storages that haven't been used for extended
// periods of time.
m syncutil.IntMap
}
var _ closedts.Storage = (*MultiStorage)(nil)
// NewMultiStorage sets up a MultiStorage which uses the given factory method
// for setting up the SingleStorage used for each individual NodeID for which
// operations are received.
func NewMultiStorage(constructor func() SingleStorage) *MultiStorage {
return &MultiStorage{constructor: constructor}
}
func (ms *MultiStorage) getOrCreate(nodeID roachpb.NodeID) SingleStorage {
key := int64(nodeID)
p, found := ms.m.Load(key)
if found {
// Fast path that avoids calling f().
return (*entry)(p).SingleStorage
}
ss := ms.constructor()
p, _ = ms.m.LoadOrStore(key, unsafe.Pointer(&entry{ss}))
return (*entry)(p).SingleStorage
}
// VisitAscending implements closedts.Storage.
func (ms *MultiStorage) VisitAscending(nodeID roachpb.NodeID, f func(ctpb.Entry) (done bool)) {
ss := ms.getOrCreate(nodeID)
ss.VisitAscending(f)
}
// VisitDescending implements closedts.Storage.
func (ms *MultiStorage) VisitDescending(nodeID roachpb.NodeID, f func(ctpb.Entry) (done bool)) {
ss := ms.getOrCreate(nodeID)
ss.VisitDescending(f)
}
// Add implements closedts.Storage.
func (ms *MultiStorage) Add(nodeID roachpb.NodeID, entry ctpb.Entry) {
ss := ms.getOrCreate(nodeID)
ss.Add(entry)
}
// Clear implements closedts.Storage.
func (ms *MultiStorage) Clear() {
ms.m.Range(func(_ int64, p unsafe.Pointer) bool {
(*entry)(p).SingleStorage.Clear()
return true // continue
})
}
// String prints a tabular rundown of the contents of the MultiStorage.
func (ms *MultiStorage) String() string {
return ms.StringForNodes()
}
// StringForNodes is like String, but restricted to the supplied NodeIDs.
// If none are specified, is equivalent to String().
func (ms *MultiStorage) StringForNodes(nodes ...roachpb.NodeID) string {
type tuple struct {
roachpb.NodeID
SingleStorage
}
var shouldPrint map[roachpb.NodeID]struct{}
if len(nodes) > 0 {
shouldPrint = make(map[roachpb.NodeID]struct{}, len(nodes))
for _, nodeID := range nodes {
shouldPrint[nodeID] = struct{}{}
}
}
var sl []tuple
ms.m.Range(func(k int64, p unsafe.Pointer) bool {
nodeID := roachpb.NodeID(k)
if _, ok := shouldPrint[nodeID]; ok || len(shouldPrint) == 0 {
sl = append(sl, tuple{nodeID, (*entry)(p).SingleStorage})
}
return true // want more
})
sort.Slice(sl, func(i, j int) bool {
return sl[i].NodeID < sl[j].NodeID
})
var buf bytes.Buffer
for i := range sl {
buf.WriteString(fmt.Sprintf("***** n%d *****\n", sl[i].NodeID))
buf.WriteString(sl[i].SingleStorage.String())
}
return buf.String()
}
|
// ------------------------------------------------------ {COPYRIGHT-TOP} ---
// Licensed Materials - Property of IBM
// 5900-AEO
//
// Copyright IBM Corp. 2020, 2021. All Rights Reserved.
//
// US Government Users Restricted Rights - Use, duplication, or
// disclosure restricted by GSA ADP Schedule Contract with IBM Corp.
// ------------------------------------------------------ {COPYRIGHT-END} ---
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"log"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"github.ibm.com/automation-base-pak/abp-demo-cartridge/pkg/config"
"github.ibm.com/automation-base-pak/abp-demo-cartridge/pkg/operator"
"github.ibm.com/automation-base-pak/abp-demo-cartridge/pkg/producer"
"github.ibm.com/automation-base-pak/abp-demo-cartridge/pkg/server"
// +kubebuilder:scaffold:imports
)
func main() {
cfg, err := config.Parse()
if err != nil {
log.Fatal("failed to parse environment configuration")
}
switch cfg.Function {
case "operator":
operator.Start(cfg)
case "server":
server.Start()
case "producer":
producer.Start(cfg)
default:
log.Fatalf("FUNCTION \"%s\" not recognised", cfg.Function)
}
}
|
package logs
import (
"github.com/sirupsen/logrus"
)
//Logger based logger struct
type Logger struct {
*logrus.Logger
}
//Get returns the logger instance with specified parameters
func Get(level string) *Logger {
var logger Logger
ll := logrus.New()
ll.SetLevel(getLogLevel(level))
ll.SetReportCaller(true)
logger = Logger{ll}
return &logger
}
//getLogLevel returns the log level
func getLogLevel(ll string) logrus.Level {
switch ll {
case "panic":
return logrus.PanicLevel
case "fatal":
return logrus.FatalLevel
case "error":
return logrus.ErrorLevel
case "warn":
return logrus.WarnLevel
case "info":
return logrus.InfoLevel
case "debug":
return logrus.DebugLevel
case "trace":
return logrus.TraceLevel
default:
return logrus.ErrorLevel
}
}
|
package main
import (
"fmt"
)
func isCancelled(threshold int, timings []int) bool {
count := 0
for _, v := range timings {
if v <= 0 {
count++
}
}
return count < threshold
}
func main() {
scores := []int{-1, -3, 4, 2}
fmt.Println(isCancelled(3, scores))
}
|
package model
type Body struct {
Package string
Struct string
Alias string
}
|
// description : Echoing the program arguments
// author : Tom Geudens (https://github.com/tomgeudens/)
// modified : 2016/06/26
//
package main
import(
"fmt"
"os"
)
func main() {
s, sep := "", ""
for _, arg := range os.Args[1:] {
s += sep + arg
sep = " "
}
fmt.Println(s)
}
|
package model
type List []*Interval
func (list List) Len() int {
return len(list)
}
func (list List) Less(i, j int) bool {
return list[i].Before(list[j])
}
func (list List) Swap(i, j int) {
list[i], list[j] = list[j], list[i]
}
|
package grpc
import (
"context"
"encoding/json"
"github.com/gogo/protobuf/jsonpb"
"github.com/gogo/protobuf/proto"
"github.com/iancoleman/strcase"
)
//Parse request proto message to model
func Parse(ctx context.Context, request interface{}, model interface{}) (interface{}, error) {
protoMessage := request.(proto.Message)
m := jsonpb.Marshaler{}
stringProtoMessage, err := m.MarshalToString(protoMessage)
if err != nil {
return nil, err
}
mapString := make(map[string]interface{})
err = json.Unmarshal([]byte(stringProtoMessage), &mapString)
if m, ok := model.(proto.Message); ok {
jsonByte, err := json.Marshal(mapString)
err = jsonpb.UnmarshalString(string(jsonByte), m)
if err != nil {
return nil, err
}
return m, nil
}
for key, value := range mapString {
keySnake := strcase.ToSnake(key)
mapString[keySnake] = value
}
jsonByte, err := json.Marshal(mapString)
err = json.Unmarshal(jsonByte, model)
if err != nil {
return nil, err
}
return model, nil
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
import (
"fmt"
"runtime"
"strings"
"github.com/google/gapid/core/codegen"
"github.com/google/gapid/core/log"
"github.com/google/gapid/gapil/compiler/mangling"
"github.com/google/gapid/gapil/semantic"
)
type refRel struct {
name string
reference *codegen.Function // void T_reference(T)
release *codegen.Function // void T_release(T)
}
func (f *refRel) declare(c *C, name, ref, rel string, ty codegen.Type) {
f.reference = c.M.Function(c.T.Void, ref, ty).LinkOnceODR().Inline()
f.release = c.M.Function(c.T.Void, rel, ty).LinkOnceODR().Inline()
f.name = name
}
func (f *refRel) delegate(c *C, to refRel) {
c.Delegate(f.reference, to.reference)
c.Delegate(f.release, to.release)
}
func (f *refRel) build(
c *C,
isNull func(s *S, val *codegen.Value) *codegen.Value,
getRefPtr func(s *S, val *codegen.Value) *codegen.Value,
del func(s *S, val *codegen.Value),
) {
c.Build(f.reference, func(s *S) {
val := s.Parameter(0)
s.If(isNull(s, val), func(s *S) {
s.Return(nil)
})
refPtr := getRefPtr(s, val)
oldCount := refPtr.Load()
s.If(s.Equal(oldCount, s.Scalar(uint32(0))), func(s *S) {
c.Log(s, log.Fatal, "Attempting to reference released "+f.name+" (%p)", refPtr)
})
newCount := s.Add(oldCount, s.Scalar(uint32(1)))
if debugRefCounts {
c.LogI(s, f.name+" %p ref_count: %d -> %d", refPtr, oldCount, newCount)
}
refPtr.Store(newCount)
})
c.Build(f.release, func(s *S) {
val := s.Parameter(0)
s.If(isNull(s, val), func(s *S) {
s.Return(nil)
})
refPtr := getRefPtr(s, val)
oldCount := refPtr.Load()
s.If(s.Equal(oldCount, s.Scalar(uint32(0))), func(s *S) {
c.Log(s, log.Fatal, "Attempting to release "+f.name+" with no remaining references! (%p)", refPtr)
})
newCount := s.Sub(oldCount, s.Scalar(uint32(1)))
if debugRefCounts {
c.LogI(s, f.name+" %p ref_count: %d -> %d", refPtr, oldCount, newCount)
}
refPtr.Store(newCount)
s.If(s.Equal(newCount, s.Scalar(uint32(0))), func(s *S) {
del(s, val)
})
})
}
type refRels struct {
tys map[semantic.Type]refRel // Delegate on to impls
impls map[semantic.Type]refRel // Implementations of lowered map types
}
var slicePrototype = &semantic.Slice{}
// declareRefRels declares all the reference type's reference() and release()
// functions.
func (c *C) declareRefRels() {
c.refRels = refRels{
tys: map[semantic.Type]refRel{},
impls: map[semantic.Type]refRel{},
}
sli := refRel{}
sli.declare(c, "slice", "gapil_slice_reference", "gapil_slice_release", c.T.Sli)
c.refRels.tys[slicePrototype] = sli
c.refRels.impls[slicePrototype] = sli
str := refRel{}
str.declare(c, "string", "gapil_string_reference", "gapil_string_release", c.T.StrPtr)
c.refRels.tys[semantic.StringType] = str
c.refRels.impls[semantic.StringType] = str
any := refRel{name: "any", reference: c.callbacks.anyReference, release: c.callbacks.anyRelease}
c.refRels.tys[semantic.AnyType] = any
c.refRels.impls[semantic.AnyType] = any
msg := refRel{name: "message", reference: c.callbacks.msgReference, release: c.callbacks.msgRelease}
c.refRels.tys[semantic.MessageType] = msg
c.refRels.impls[semantic.MessageType] = msg
var isRefTy func(ty semantic.Type) bool
isRefTy = func(ty semantic.Type) bool {
ty = semantic.Underlying(ty)
if ty == semantic.StringType {
return true
}
switch ty := ty.(type) {
case *semantic.Slice, *semantic.Reference, *semantic.Map:
return true
case *semantic.Class:
for _, f := range ty.Fields {
if isRefTy(f.Type) {
return true
}
}
}
return false
}
// Forward declare all the reference types.
// impls is a map of type mangled type name to the public reference and
// release functions.
// This is used to deduplicate types that have the same underlying key and
// value LLVM types when lowered.
impls := map[string]refRel{}
for _, api := range c.APIs {
declare := func(apiTy semantic.Type) {
cgTy := c.T.Target(apiTy)
apiTy = semantic.Underlying(apiTy)
switch apiTy {
case semantic.StringType:
// Already implemented
default:
switch apiTy := apiTy.(type) {
case *semantic.Slice:
c.refRels.tys[apiTy] = sli
default:
if isRefTy(apiTy) {
name := fmt.Sprintf("%v_%v", api.Name(), apiTy.Name())
// Use the mangled name of the type to determine whether
// the reference and release functions have already been
// declared for the lowered type.
m := c.Mangle(cgTy)
mangled := c.Mangler(m)
impl, seen := impls[mangled]
if !seen {
// First instance of this lowered type. Declare it.
ref := c.Mangler(&mangling.Function{
Name: "reference",
Parent: m.(mangling.Scope),
Parameters: []mangling.Type{m},
})
rel := c.Mangler(&mangling.Function{
Name: "release",
Parent: m.(mangling.Scope),
Parameters: []mangling.Type{m},
})
impl.declare(c, name, ref, rel, cgTy)
impls[mangled] = impl
c.refRels.impls[apiTy] = impl
}
// Delegate the reference and release functions of this type
// on to the common implementation.
funcs := refRel{}
funcs.declare(c, name, name+"_reference", name+"_release", cgTy)
funcs.delegate(c, impl)
c.refRels.tys[apiTy] = funcs
}
}
}
}
for _, ty := range api.Slices {
declare(ty)
}
for _, ty := range api.Maps {
declare(ty)
}
for _, ty := range api.References {
declare(ty)
}
for _, ty := range api.Classes {
declare(ty)
}
}
}
// buildRefRels implements all the reference type's reference() and release()
// functions.
func (c *C) buildRefRels() {
r := c.refRels.impls
sli := r[slicePrototype]
sli.build(c,
func(s *S, sli *codegen.Value) *codegen.Value {
poolPtr := sli.Extract(SlicePool)
return s.Equal(poolPtr, s.Zero(poolPtr.Type()))
},
func(s *S, sli *codegen.Value) *codegen.Value {
poolPtr := sli.Extract(SlicePool)
return poolPtr.Index(0, PoolRefCount)
},
func(s *S, sli *codegen.Value) {
poolPtr := sli.Extract(SlicePool)
s.Call(c.callbacks.freePool, poolPtr)
})
str := r[semantic.StringType]
str.build(c,
func(s *S, strPtr *codegen.Value) *codegen.Value {
return s.Equal(strPtr, s.Zero(c.T.StrPtr))
},
func(s *S, strPtr *codegen.Value) *codegen.Value {
return strPtr.Index(0, StringRefCount)
},
func(s *S, strPtr *codegen.Value) {
s.Call(c.callbacks.freeString, strPtr)
})
for _, api := range c.APIs {
for _, apiTy := range api.Maps {
if funcs, ok := r[apiTy]; ok {
funcs.build(c,
func(s *S, mapPtr *codegen.Value) *codegen.Value {
return mapPtr.IsNull()
},
func(s *S, mapPtr *codegen.Value) *codegen.Value {
return mapPtr.Index(0, MapRefCount)
},
func(s *S, mapPtr *codegen.Value) {
s.Arena = mapPtr.Index(0, MapArena).Load().SetName("arena")
s.Call(c.T.Maps[apiTy].Clear, mapPtr)
c.Free(s, mapPtr)
})
}
}
for _, apiTy := range api.References {
if funcs, ok := r[apiTy]; ok {
funcs.build(c,
func(s *S, refPtr *codegen.Value) *codegen.Value {
return refPtr.IsNull()
},
func(s *S, refPtr *codegen.Value) *codegen.Value {
return refPtr.Index(0, RefRefCount)
},
func(s *S, refPtr *codegen.Value) {
s.Arena = refPtr.Index(0, RefArena).Load().SetName("arena")
c.doRelease(s, refPtr.Index(0, RefValue).Load(), apiTy.To)
c.Free(s, refPtr)
})
}
}
for _, apiTy := range api.Classes {
if funcs, ok := r[apiTy]; ok {
refFields := []*semantic.Field{}
for _, f := range apiTy.Fields {
ty := semantic.Underlying(f.Type)
if _, ok := c.refRels.tys[ty]; ok {
refFields = append(refFields, f)
}
}
c.Build(funcs.reference, func(s *S) {
val := s.Parameter(0)
for _, f := range refFields {
c.doReference(s, val.Extract(f.Name()), f.Type)
}
})
c.Build(funcs.release, func(s *S) {
val := s.Parameter(0)
for _, f := range refFields {
c.doRelease(s, val.Extract(f.Name()), f.Type)
}
})
}
}
}
}
func caller() string {
locs := []string{}
for i := 2; i < 10; i++ {
_, file, line, ok := runtime.Caller(i)
if !ok {
break
}
if i := strings.LastIndex(file, "/"); i > 0 {
file = file[i+1:]
}
locs = append(locs, fmt.Sprintf("%v:%v", file, line))
}
return strings.Join(locs, " ")
}
func (c *C) reference(s *S, val *codegen.Value, ty semantic.Type) {
if got, expect := val.Type(), c.T.Target(ty); got != expect {
fail("reference() called with a value of an unexpected type. Got %+v, expect %+v", got, expect)
}
if !c.isRefCounted(semantic.Underlying(ty)) {
return
}
if debugRefCounts {
c.LogI(s, fmt.Sprintf("reference(%v: %%v): %v", ty, caller()), val)
}
if debugDisableRefCounts {
return
}
if debugDisableRefCountOpts {
c.doReference(s, val, ty)
} else {
s.pendingRefRels.add(c, val, ty, 1)
}
}
func (c *C) release(s *S, val *codegen.Value, ty semantic.Type) {
if got, expect := val.Type(), c.T.Target(ty); got != expect {
fail("release() called with a value of an unexpected type. Got %+v, expect %+v", got, expect)
}
if !c.isRefCounted(semantic.Underlying(ty)) {
return
}
if debugRefCounts {
c.LogI(s, fmt.Sprintf("release(%v: %%v): %v", ty, caller()), val)
}
if debugDisableRefCounts {
return
}
if debugDisableRefCountOpts {
c.doRelease(s, val, ty)
} else {
s.pendingRefRels.add(c, val, ty, -1)
}
}
func (c *C) deferRelease(s *S, val *codegen.Value, ty semantic.Type) {
if got, expect := val.Type(), c.T.Target(ty); got != expect {
fail("deferRelease() called with a value of an unexpected type. Got %+v, expect %+v", got, expect)
}
if !c.isRefCounted(semantic.Underlying(ty)) {
return
}
if debugRefCounts {
c.LogI(s, fmt.Sprintf("deferRelease(%v: %%v): %v", ty, caller()), val)
}
if debugDisableRefCounts {
return
}
s.pendingRefRels.add(c, val, ty, -1)
}
type pendingRefRels struct {
c *C
indices map[*codegen.Value]int
list []pendingRefRel
}
// pendingRefRel is a pending reference count change when the scope closes.
type pendingRefRel struct {
val *codegen.Value
ty semantic.Type
delta int
caller string
}
func (p *pendingRefRels) add(c *C, val *codegen.Value, ty semantic.Type, delta int) {
if !c.isRefCounted(ty) {
return
}
if p.c == nil {
*p = pendingRefRels{
c: c,
indices: map[*codegen.Value]int{},
}
}
if i, ok := p.indices[val]; ok {
p.list[i].delta = p.list[i].delta + delta
} else {
p.indices[val] = len(p.list)
r := pendingRefRel{
val: val,
ty: ty,
delta: delta,
}
if debugRefCounts {
r.caller = caller()
}
p.list = append(p.list, r)
}
}
func (p *pendingRefRels) apply(s *S) {
if p.c == nil {
return
}
if debugRefCounts {
p.c.LogI(s, "vvvvvvvvvvvvvvvvvvvvvv")
p.c.LogI(s, "pendingRefRels.apply()")
for _, r := range p.list {
if r.delta != 0 {
p.c.LogI(s, fmt.Sprintf("%+d %v: %%v", r.delta, r.ty), r.val)
}
}
p.c.LogI(s, "^^^^^^^^^^^^^^^^^^^^^^")
}
for _, r := range p.list {
for i := 0; i < r.delta; i++ {
if f, ok := p.c.refRels.tys[semantic.Underlying(r.ty)]; ok {
if debugRefCounts {
p.c.LogI(s, fmt.Sprintf("pending-reference(%v: %%v): %v", r.ty, r.caller), r.val)
}
s.Call(f.reference, r.val)
}
}
}
for _, r := range p.list {
for i := 0; i > r.delta; i-- {
if f, ok := p.c.refRels.tys[semantic.Underlying(r.ty)]; ok {
if debugRefCounts {
p.c.LogI(s, fmt.Sprintf("pending-release(%v: %%v): %v", r.ty, r.caller), r.val)
}
s.Call(f.release, r.val)
}
}
}
}
func (c *C) doReference(s *S, val *codegen.Value, ty semantic.Type) {
if f, ok := c.refRels.tys[semantic.Underlying(ty)]; ok {
if debugRefCounts {
c.LogI(s, fmt.Sprintf("doReference(%v: %%v): %v", ty, caller()), val)
}
s.Call(f.reference, val)
}
}
func (c *C) doRelease(s *S, val *codegen.Value, ty semantic.Type) {
if f, ok := c.refRels.tys[semantic.Underlying(ty)]; ok {
if debugRefCounts {
c.LogI(s, fmt.Sprintf("doRelease(%v: %%v): %v", ty, caller()), val)
}
s.Call(f.release, val)
}
}
func (c *C) isRefCounted(ty semantic.Type) bool {
_, ok := c.refRels.tys[semantic.Underlying(ty)]
return ok
}
|
// Copyright 2016 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package util
import "reflect"
// EqualPtrFields uses reflection to check two "mirror" structures for matching pointer fields that
// point to the same object. Used to verify cloning/deep copy functions.
//
// Returns the names of equal pointer fields.
func EqualPtrFields(src, dst reflect.Value, prefix string) []string {
t := dst.Type()
if t.Kind() != reflect.Struct {
return nil
}
if srcType := src.Type(); srcType != t {
return nil
}
var res []string
for i := 0; i < t.NumField(); i++ {
srcF, dstF := src.Field(i), dst.Field(i)
switch f := t.Field(i); f.Type.Kind() {
case reflect.Ptr:
if srcF.Interface() == dstF.Interface() {
res = append(res, prefix+f.Name)
}
case reflect.Slice:
if srcF.Pointer() == dstF.Pointer() {
res = append(res, prefix+f.Name)
}
l := dstF.Len()
if srcLen := srcF.Len(); srcLen < l {
l = srcLen
}
for i := 0; i < l; i++ {
res = append(res, EqualPtrFields(srcF.Index(i), dstF.Index(i), f.Name+".")...)
}
case reflect.Struct:
res = append(res, EqualPtrFields(srcF, dstF, f.Name+".")...)
}
}
return res
}
|
/*
Package rpm currently serves no purpose other than to namespace the rpm/spec
package.
In the future, this package may hold functions and structs for working with RPM
packages directly, but for now, it is nothing more than a placeholder.
*/
package rpm
|
/*
* Lean tool - hypothesis testing application
*
* https://github.com/MikaelLazarev/willie/
* Copyright (c) 2020. Mikhail Lazarev
*
*/
package helpers
import (
"bytes"
"encoding/json"
"errors"
"fmt"
errors2 "github.com/MikaelLazarev/willie/server/errors"
"io/ioutil"
"log"
"net"
"net/http"
"time"
)
type apiClient struct {
host string
key string
client *http.Client
}
type ApiClient interface {
Request(method, url string, data interface{}, response interface{}) error
}
func NewApiCall(host, key string) ApiClient {
timeout := 10 * time.Second
transport := &http.Transport{
DialContext: (&net.Dialer{
Timeout: timeout,
}).DialContext,
}
return &apiClient{host: host, key: key, client: &http.Client{
Timeout: timeout,
Transport: transport,
}}
}
func (s *apiClient) Request(method, url string, data interface{}, response interface{}) error {
dataJson, err := json.Marshal(data)
if err != nil {
return &errors2.ApiError{
Module: "helpers/request.go",
Problem: "Cant marshal data",
Code: 500,
Err: err,
}
}
req, err := http.NewRequest(method, s.host+url, bytes.NewBuffer(dataJson))
if err != nil {
return &errors2.ApiError{
Module: "helpers/request.go",
Problem: "Cant make request",
Code: 500,
Err: err,
}
}
// Setting content-type
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+s.key)
// Making request
res, err := s.client.Do(req)
if err != nil {
log.Println(err)
return &errors2.ApiError{
Module: "helpers/request.go",
Problem: "Network failure",
Code: 500,
Err: err,
}
}
log.Printf("%v", res)
if res.StatusCode < http.StatusOK || res.StatusCode > 299 {
var problem string
switch res.StatusCode {
case http.StatusNotFound:
problem = errors2.ErrorRecordNotFound.Error()
case http.StatusForbidden:
problem = "Forbidden"
default:
problem = "Network error"
}
return &errors2.ApiError{
Module: "helpers/request.go",
Problem: problem,
Code: res.StatusCode,
Err: errors.New(fmt.Sprintf("Network failure with %d code", res.StatusCode)),
}
}
responseBody, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Printf("Error reading body: %v", err)
return &errors2.ApiError{
Module: "helpers/request.go",
Problem: "Cant get body",
Code: 500,
Err: err,
}
}
err = json.Unmarshal(responseBody, &response)
if err != nil {
return &errors2.ApiError{
Module: "helpers/request.go",
Problem: "Cant unmarshal data",
Code: 500,
Err: err,
}
}
return nil
}
|
package engine
import (
"log"
)
type ConcurrentEngine struct {
Scheduler Scheduler
WorkerCount int
}
type Scheduler interface {
Submit(Request)
ConfigureMasterWorkerChan(chan Request)
// WorkerReady(chan Request)
// Run()
}
func (e *ConcurrentEngine)Run(seeds ...Request){
in :=make(chan Request)
out :=make(chan ParseResult)
e.Scheduler.ConfigureMasterWorkerChan(in)
//channel里读数据
for i:=0;i<e.WorkerCount;i++{
createWorker(in,out)
}
// 往channel写入数据,往scheduler发任务
for _,r:=range seeds{
// chan<-
e.Scheduler.Submit(r)
}
for {
result:=<-out
for _,item :=range result.Items{
log.Printf("Got item :%v",item)
}
for _,request:=range result.Requests{
e.Scheduler.Submit(request)
}
}
}
func createWorker(
in chan Request,out chan ParseResult){
go func(){
for{
// tell scheduler
request:= <- in
result,err:= worker(request)
if err!=nil{
log.Printf("worker error :%s",err.Error())
continue
}
out <-result
}
}()
}
|
package main
import (
"database/sql"
"github.com/99designs/gqlgen/handler"
"github.com/deslee/gqlgen_todos"
"github.com/deslee/gqlgen_todos/database"
_ "github.com/mattn/go-sqlite3"
"log"
"net/http"
"os"
)
const defaultPort = "8080"
func main() {
db, err := sql.Open("sqlite3", "file:database.db")
if err != nil {
panic(err)
}
err = db.Ping()
if err != nil {
panic(err)
}
todos_db.CreateTablesIfNotExist(db)
port := os.Getenv("PORT")
if port == "" {
port = defaultPort
}
http.Handle("/", handler.Playground("GraphQL playground", "/query"))
http.Handle("/query", handler.GraphQL(gqlgen_todos.NewExecutableSchema(gqlgen_todos.Config{Resolvers: &gqlgen_todos.Resolver{Db: db}})))
log.Printf("connect to http://localhost:%s/ for GraphQL playground", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
|
package fluvio
type Record struct {
Offset int64
Key []byte
Value []byte
}
|
package cosmwasm
import (
"encoding/json"
"fmt"
"github.com/CosmWasm/go-cosmwasm/api"
"github.com/CosmWasm/go-cosmwasm/types"
)
// CodeID represents an ID for a given wasm code blob, must be generated from this library
type CodeID []byte
// WasmCode is an alias for raw bytes of the wasm compiled code
type WasmCode []byte
// KVStore is a reference to some sub-kvstore that is valid for one instance of a code
type KVStore = api.KVStore
// GoAPI is a reference to some "precompiles", go callbacks
type GoAPI = api.GoAPI
// Querier lets us make read-only queries on other modules
type Querier = types.Querier
// GasMeter is a read-only version of the sdk gas meter
type GasMeter = api.GasMeter
// Wasmer is the main entry point to this library.
// You should create an instance with it's own subdirectory to manage state inside,
// and call it for all cosmwasm code related actions.
type Wasmer struct {
cache api.Cache
}
// NewWasmer creates an new binding, with the given dataDir where
// it can store raw wasm and the pre-compile cache.
// cacheSize sets the size of an optional in-memory LRU cache for prepared VMs.
// They allow popular contracts to be executed very rapidly (no loading overhead),
// but require ~32-64MB each in memory usage.
func NewWasmer(dataDir string, supportedFeatures string, cacheSize uint64) (*Wasmer, error) {
cache, err := api.InitCache(dataDir, supportedFeatures, cacheSize)
if err != nil {
return nil, err
}
return &Wasmer{cache: cache}, nil
}
// Cleanup should be called when no longer using this to free resources on the rust-side
func (w *Wasmer) Cleanup() {
api.ReleaseCache(w.cache)
}
// Create will compile the wasm code, and store the resulting pre-compile
// as well as the original code. Both can be referenced later via CodeID
// This must be done one time for given code, after which it can be
// instatitated many times, and each instance called many times.
//
// For example, the code for all ERC-20 contracts should be the same.
// This function stores the code for that contract only once, but it can
// be instantiated with custom inputs in the future.
//
// TODO: return gas cost? Add gas limit??? there is no metering here...
func (w *Wasmer) Create(code WasmCode) (CodeID, error) {
return api.Create(w.cache, code)
}
// GetCode will load the original wasm code for the given code id.
// This will only succeed if that code id was previously returned from
// a call to Create.
//
// This can be used so that the (short) code id (hash) is stored in the iavl tree
// and the larger binary blobs (wasm and pre-compiles) are all managed by the
// rust library
func (w *Wasmer) GetCode(code CodeID) (WasmCode, error) {
return api.GetCode(w.cache, code)
}
// Instantiate will create a new contract based on the given codeID.
// We can set the initMsg (contract "genesis") here, and it then receives
// an account and address and can be invoked (Execute) many times.
//
// Storage should be set with a PrefixedKVStore that this code can safely access.
//
// Under the hood, we may recompile the wasm, use a cached native compile, or even use a cached instance
// for performance.
func (w *Wasmer) Instantiate(
code CodeID,
env types.Env,
initMsg []byte,
store KVStore,
goapi GoAPI,
querier Querier,
gasMeter GasMeter,
gasLimit uint64,
) (*types.InitResponse, uint64, error) {
paramBin, err := json.Marshal(env)
if err != nil {
return nil, 0, err
}
data, gasUsed, err := api.Instantiate(w.cache, code, paramBin, initMsg, &gasMeter, store, &goapi, &querier, gasLimit)
if err != nil {
return nil, gasUsed, err
}
var resp types.InitResult
err = json.Unmarshal(data, &resp)
if err != nil {
return nil, gasUsed, err
}
if resp.Err != nil {
return nil, gasUsed, fmt.Errorf("%v", resp.Err)
}
return resp.Ok, gasUsed, nil
}
// Execute calls a given contract. Since the only difference between contracts with the same CodeID is the
// data in their local storage, and their address in the outside world, we need no ContractID here.
// (That is a detail for the external, sdk-facing, side).
//
// The caller is responsible for passing the correct `store` (which must have been initialized exactly once),
// and setting the env with relevent info on this instance (address, balance, etc)
func (w *Wasmer) Execute(
code CodeID,
env types.Env,
executeMsg []byte,
store KVStore,
goapi GoAPI,
querier Querier,
gasMeter GasMeter,
gasLimit uint64,
) (*types.HandleResponse, uint64, error) {
paramBin, err := json.Marshal(env)
if err != nil {
return nil, 0, err
}
data, gasUsed, err := api.Handle(w.cache, code, paramBin, executeMsg, &gasMeter, store, &goapi, &querier, gasLimit)
if err != nil {
return nil, gasUsed, err
}
var resp types.HandleResult
err = json.Unmarshal(data, &resp)
if err != nil {
return nil, gasUsed, err
}
if resp.Err != nil {
return nil, gasUsed, fmt.Errorf("%v", resp.Err)
}
return resp.Ok, gasUsed, nil
}
// Query allows a client to execute a contract-specific query. If the result is not empty, it should be
// valid json-encoded data to return to the client.
// The meaning of path and data can be determined by the code. Path is the suffix of the abci.QueryRequest.Path
func (w *Wasmer) Query(
code CodeID,
queryMsg []byte,
store KVStore,
goapi GoAPI,
querier Querier,
gasMeter GasMeter,
gasLimit uint64,
) ([]byte, uint64, error) {
data, gasUsed, err := api.Query(w.cache, code, queryMsg, &gasMeter, store, &goapi, &querier, gasLimit)
if err != nil {
return nil, gasUsed, err
}
var resp types.QueryResponse
err = json.Unmarshal(data, &resp)
if err != nil {
return nil, gasUsed, err
}
if resp.Err != nil {
return nil, gasUsed, fmt.Errorf("%v", resp.Err)
}
return resp.Ok, gasUsed, nil
}
// Migrate will migrate an existing contract to a new code binary.
// This takes storage of the data from the original contract and the CodeID of the new contract that should
// replace it. This allows it to run a migration step if needed, or return an error if unable to migrate
// the given data.
//
// MigrateMsg has some data on how to perform the migration.
func (w *Wasmer) Migrate(
code CodeID,
env types.Env,
migrateMsg []byte,
store KVStore,
goapi GoAPI,
querier Querier,
gasMeter GasMeter,
gasLimit uint64,
) (*types.MigrateResponse, uint64, error) {
paramBin, err := json.Marshal(env)
if err != nil {
return nil, 0, err
}
data, gasUsed, err := api.Migrate(w.cache, code, paramBin, migrateMsg, &gasMeter, store, &goapi, &querier, gasLimit)
if err != nil {
return nil, gasUsed, err
}
var resp types.MigrateResult
err = json.Unmarshal(data, &resp)
if err != nil {
return nil, gasUsed, err
}
if resp.Err != nil {
return nil, gasUsed, fmt.Errorf("%v", resp.Err)
}
return resp.Ok, gasUsed, nil
}
|
package typeutils
import (
"encoding/json"
"fmt"
"testing"
"github.com/stretchr/testify/suite"
)
// These tests demonstrates and validates use of a Registry to marshal/unmarshal JSON.
type JsonTestSuite struct {
suite.Suite
film *filmJson
}
var testRegistryJson = NewRegistry()
func init() {
if err := testRegistryJson.Alias("test", filmJson{}); err != nil {
fmt.Printf("*** Error creating alias: %s\n", err)
}
if err := testRegistryJson.Register(&alpha{}); err != nil {
fmt.Printf("*** Error registering alpha: %s\n", err)
}
if err := testRegistryJson.Register(&bravo{}); err != nil {
fmt.Printf("*** Error registering bravo: %s\n", err)
}
}
func (suite *JsonTestSuite) SetupTest() {
copyMapFromItemFn = copyItemToMap
copyItemFromMapFn = copyMapToItem
suite.film = &filmJson{Name: "Test JSON", Index: make(map[string]actor)}
suite.film.Lead = &alpha{Name: "Goober", Percent: 13.23}
suite.film.addActor("Goober", suite.film.Lead)
suite.film.addActor("Snoofus", &bravo{Finished: false, Iterations: 17, extra: "stuff"})
suite.film.addActor("Noodle", &alpha{Name: "Noodle", Percent: 19.57, extra: "stuff"})
suite.film.addActor("Soup", &bravo{Finished: true, Iterations: 79})
}
func (suite *JsonTestSuite) TearDownSuite() {
copyMapFromItemFn = nil
copyItemFromMapFn = nil
}
func TestJsonSuite(t *testing.T) {
suite.Run(t, new(JsonTestSuite))
}
//////////////////////////////////////////////////////////////////////////
type filmJson struct {
json.Marshaler
json.Unmarshaler
Name string
Lead actor
Cast []actor
Index map[string]actor
}
type filmJsonConvert struct {
Name string
Lead interface{}
Cast []interface{}
Index map[string]interface{}
}
func (film *filmJson) addActor(name string, act actor) {
film.Cast = append(film.Cast, act)
film.Index[name] = act
}
func (film *filmJson) MarshalJSON() ([]byte, error) {
var err error
convert := filmJsonConvert{
Name: film.Name,
}
if convert.Lead, err = testRegistryJson.ConvertItemToMap(film.Lead); err != nil {
return nil, fmt.Errorf("converting lead to map: %w", err)
}
convert.Cast = make([]interface{}, len(film.Cast))
for i, member := range film.Cast {
if convert.Cast[i], err = testRegistryJson.ConvertItemToMap(member); err != nil {
return nil, fmt.Errorf("converting cast member to map: %w", err)
}
}
convert.Index = make(map[string]interface{}, len(film.Index))
for key, member := range film.Index {
if convert.Index[key], err = testRegistryJson.ConvertItemToMap(member); err != nil {
return nil, fmt.Errorf("converting cast member to map: %w", err)
}
}
return json.Marshal(convert)
}
func (film *filmJson) UnmarshalJSON(input []byte) error {
var err error
convert := filmJsonConvert{}
if err = json.Unmarshal(input, &convert); err != nil {
return fmt.Errorf("unmarshaling input JSON into struct: %w", err)
}
film.Name = convert.Name
if film.Lead, err = film.unmarshalActor(convert.Lead); err != nil {
return fmt.Errorf("unmarshaling lead actor: %w", err)
}
film.Cast = make([]actor, len(convert.Cast))
for i, member := range convert.Cast {
if film.Cast[i], err = film.unmarshalActor(member); err != nil {
return fmt.Errorf("unmarshaling cast member: %w", err)
}
}
film.Index = make(map[string]actor, len(convert.Index))
for name, member := range convert.Index {
if film.Index[name], err = film.unmarshalActor(member); err != nil {
return fmt.Errorf("unmarshaling index member: %w", err)
}
}
return nil
}
func (film *filmJson) unmarshalActor(input interface{}) (actor, error) {
actMap, ok := input.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("actor input should be map")
}
if item, err := testRegistryJson.CreateItemFromMap(actMap); err != nil {
return nil, fmt.Errorf("creating item from map: %w", err)
} else if act, ok := item.(actor); !ok {
return nil, fmt.Errorf("item is not an actor")
} else {
return act, nil
}
}
func copyItemToMap(toMap map[string]interface{}, fromItem interface{}) error {
if bytes, err := json.Marshal(fromItem); err != nil {
return fmt.Errorf("marshaling from %v: %w", fromItem, err)
} else if err = json.Unmarshal(bytes, &toMap); err != nil {
return fmt.Errorf("unmarshaling to %v: %w", toMap, err)
}
return nil
}
func copyMapToItem(toItem interface{}, fromMap map[string]interface{}) error {
if bytes, err := json.Marshal(fromMap); err != nil {
return fmt.Errorf("marshaling from %v: %w", fromMap, err)
} else if err = json.Unmarshal(bytes, toItem); err != nil {
return fmt.Errorf("unmarshaling to %v: %w", toItem, err)
}
return nil
}
//////////////////////////////////////////////////////////////////////////
// TestExample duplicates the YAML test.
// Not directly applicable to this test suite.
func (suite *JsonTestSuite) TestExample() {
type T struct {
F int `json:"a,omitempty"`
B int
}
t := T{F: 1, B: 2}
bytes, err := json.Marshal(t)
suite.Assert().NoError(err)
var x T
suite.Assert().NoError(json.Unmarshal(bytes, &x))
suite.Assert().Equal(t, x)
}
func (suite *JsonTestSuite) TestCycle() {
bytes, err := json.Marshal(suite.film)
suite.Assert().NoError(err)
//fmt.Printf(">>> marshaled:\n%s\n", string(bytes))
var film filmJson
suite.Assert().NoError(json.Unmarshal(bytes, &film))
suite.Assert().NotEqual(suite.film, &film) // fails due to unexported field 'extra'
for _, act := range suite.film.Cast {
// Remove unexported field.
if alf, ok := act.(*alpha); ok {
alf.extra = ""
} else if bra, ok := act.(*bravo); ok {
bra.extra = ""
}
}
suite.Assert().Equal(suite.film, &film) // succeeds now that unexported fields are gone.
}
|
// osdconfig is a package to work with distributed config parameters
package osdconfig
// A config manager interface allows management of osdconfig parameters
// It defines setters, getters and callback management functions
type ConfigManager interface {
// GetClusterConf fetches cluster configuration data from a backend such as kvdb
GetClusterConf() (*ClusterConfig, error)
// Fetch node configuration data using node id
GetNodeConf(nodeID string) (*NodeConfig, error)
// SetClusterConf pushes cluster configuration data to the backend
// It is assumed that the backend will notify the implementor of this interface
// when a change is triggered
SetClusterConf(config *ClusterConfig) error
// SetNodeConf pushes node configuration data to the backend
// It is assumed that the backend will notify the implementor of this interface
// when a change is triggered
SetNodeConf(config *NodeConfig) error
// WatchCluster registers a user defined function as callback watching for changes
// in the cluster configuration
WatchCluster(name string, cb func(config *ClusterConfig) error) error
// WatchNode registers a user defined function as callback watching for changes
// in the node configuration
WatchNode(name string, cb func(config *NodeConfig) error) error
// Close performs internal cleanup
Close()
}
|
//go:build !windows
// +build !windows
package fleetyaml
import (
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestBundleYaml(t *testing.T) {
a := assert.New(t)
for _, path := range []string{"/foo", "foo", "/foo/", "foo/", "../foo/bar"} {
// Test both the primary extension and the fallback extension.
for _, fullPath := range []string{GetFleetYamlPath(path, false), GetFleetYamlPath(path, true)} {
a.True(IsFleetYaml(filepath.Base(fullPath)))
a.True(IsFleetYamlSuffix(fullPath))
}
}
// Test expected failure payloads.
for _, fullPath := range []string{"fleet.yaaaaaaaaaml", "", ".", "weakmonkey.yaml", "../fleet.yaaaaml"} {
a.False(IsFleetYaml(filepath.Base(fullPath)))
a.False(IsFleetYamlSuffix(fullPath))
}
}
|
// Package encoding defines interfaces shared by other packages that are used by
// usrv servers and clients to encode and decode the endpoint-specific request
// and response messages into a byte-level format suitable for transmitting over
// a transport.
package encoding
// Marshaler is the interface implemented by objects that can produce a byte
// representation of another object.
type Marshaler func(interface{}) ([]byte, error)
// Unmarshaler is the interface implemented by objects that can unmarshal a byte
// representation of an object into an object instance.
type Unmarshaler func([]byte, interface{}) error
// Codec is implemented by objects that can produce marshalers and unmarshalers
// for a given object type.
//
// Marshaler returns a Marshaler implementation that can marshal instances of a
// particular type into a byte slice.
//
// Unmarshaler returns a Unmarshaler implementation that can unmarshal instances
// of a particular type from a byte slice.
type Codec interface {
Marshaler() Marshaler
Unmarshaler() Unmarshaler
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package model
const (
execMMCTL = "mmctl"
execMattermostCLI = "mattermost"
)
// IsValidExecCommand returns wheather the provided command is valid or not.
func IsValidExecCommand(command string) bool {
switch command {
case execMMCTL, execMattermostCLI:
return true
}
return false
}
|
package main
//Invalid
//To check if bool is okay to use or not as an expression statemnet
func main() {
true;
}
|
package gen
import (
"fmt"
"github.com/felixangell/goof/ast"
"github.com/felixangell/goof/cc/unit"
"github.com/felixangell/goof/types"
"io"
"os"
"os/exec"
"reflect"
"strconv"
)
// TODO:
// there are some inconsistencies
// in generation where we should convert
// to the weird IR abstraction for
// easier generation...
type Section string
const (
Data Section = "data"
Text = "text"
Global = "globl"
Byte = "byte"
Short = "short"
Long = "long"
Quad = "quad"
)
type FunctionContext struct {
offsets map[string]int
}
func newFunctionContext() *FunctionContext {
return &FunctionContext{
offsets: make(map[string]int),
}
}
func (f *FunctionContext) getOffset(name string) int {
if offs, ok := f.offsets[name]; ok {
return offs
}
return -1
}
func (f *FunctionContext) addOffset(name string, offset int) bool {
if _, ok := f.offsets[name]; ok {
// offset already exists for this
// identifier... should never happen
// when I add a sem analyzer.
return false
}
f.offsets[name] = offset
return true
}
var CCRegisterLookup = [...]*Value{
RDI, RSI, RDX, RCX, R8, R9,
}
type AMD64_Generator struct {
out io.Writer
currentContext *FunctionContext
}
func New_AMD64_Generator() *AMD64_Generator {
return &AMD64_Generator{}
}
func (g *AMD64_Generator) generateGlobal(variable *ast.Variable) {
g.emit(".data\n")
g.emit(".globl %s\n", variable.Name)
g.emit("\t")
switch variable.Type().BitWidth() {
case 8:
g.emitSection(Byte, variable.Value.String())
case 16:
g.emitSection(Short, variable.Value.String())
case 32:
g.emitSection(Long, variable.Value.String())
case 64:
g.emitSection(Quad, variable.Value.String())
}
}
func (g *AMD64_Generator) generateFunction(fun *ast.Function) {
g.emitSection(Text)
// TODO(Felix): This is a hacky fix
// for rewriting main to _main to make
// it play nice with GCC
if fun.Name == "main" {
fun.Name = "_" + fun.Name
}
g.emitSection(Global, fun.Name)
g.emit("%s:\n", fun.Name)
g.emit(Push.Emit(RBP))
g.emit(Mov.Emit(RSP, RBP))
g.currentContext = newFunctionContext()
offs := 0
// calculate offsets for arguments
for _, arg := range fun.Args {
size := 0
size = types.GetSize(arg.Type)
size += size % 8
offs -= size
g.currentContext.addOffset(arg.Name+"_arg", offs)
}
// calculate the offsets for all the variables...
for _, node := range fun.Block {
switch node := node.(type) {
case *ast.Variable:
var size int
switch typ := node.Type().(type) {
case *types.Array:
for i := typ.Length - 1; i >= 0; i-- {
size = types.GetSize(typ.Base)
size += size % 8
offs -= size
g.currentContext.addOffset(node.Name+"_arr_"+strconv.Itoa(i), offs)
}
default:
size = types.GetSize(node.Type())
size += size % 8
offs -= size
g.currentContext.addOffset(node.Name, offs)
}
}
}
// set offsets for parameters
// TODO..
if offs != 0 {
// allocate space on stack
g.emit(Sub.Emit(NewNumber(-offs), RSP))
}
// loads args from registers
for idx, arg := range fun.Args {
offs := g.currentContext.getOffset(arg.Name + "_arg")
g.emit(Mov.Emit(CCRegisterLookup[idx], NewOffset(offs, RBP)))
}
for _, node := range fun.Block {
g.generateBlockLevelNode(node)
}
if offs != 0 {
// free up space
g.emit(Add.Emit(NewNumber(-offs), RSP))
}
// TODO: make this not stupid
// this is under the assumption that
// return is the last node appended to
// a block...
returnIndex := len(fun.Block) - 1
g.generateReturn(fun.Block[returnIndex].(*ast.Return))
}
func (g *AMD64_Generator) generateReturn(ret *ast.Return) {
if ret.Value != nil {
g.emit(Mov.Emit(g.literalToValue(ret.Value), RAX))
}
g.emit(Pop.Emit(RBP))
g.emit(Ret.Emit())
}
func (g *AMD64_Generator) literalToValue(node ast.Literal) *Value {
switch node := node.(type) {
case *ast.IntegerLiteral:
return NewNumber(int(node.Raw()))
case *ast.Unresolved:
// TODO: we can clean this up
// by having different types of
// unresolved thingies based on context
offset := g.currentContext.getOffset(node.String())
if offset == -1 {
offset = g.currentContext.getOffset(node.String() + "_arg")
}
return NewOffset(offset, RBP)
case *ast.Call:
g.generateCall(node)
return RAX
case *ast.ArrayAccess:
where := node.ArrayName()
pos := node.Index.(*ast.IntegerLiteral).String()
offset := g.currentContext.getOffset(where + "_arr_" + pos)
return NewOffset(offset, RBP)
default:
panic("unsupported " + reflect.TypeOf(node).String())
}
return nil
}
func (g *AMD64_Generator) generateCall(call *ast.Call) {
passedAsReg := []ast.Expr{}
passedInStack := []ast.Expr{}
for _, arg := range call.Arguments {
if arg.Type().IsIntegerType() {
passedAsReg = append(passedAsReg, arg)
} else {
passedInStack = append(passedInStack, arg)
}
}
size := 0
for i := len(passedInStack) - 1; i >= 0; i-- {
arg := passedInStack[i]
g.emit(Mov.Emit(g.literalToValue(arg), RAX))
g.emit(Push.Emit(RAX))
size += 8
}
for i := len(passedAsReg) - 1; i >= 0; i-- {
arg := passedAsReg[i]
g.emit(Mov.Emit(g.literalToValue(arg), RAX))
g.emit(Push.Emit(RAX))
}
for idx, _ := range passedAsReg {
g.emit(Pop.Emit(CCRegisterLookup[idx]))
}
g.emit(Call.Emit(NewString(call.Callee())))
if size != 0 {
g.emit(Add.Emit(NewNumber(size), RSP))
}
}
func (g *AMD64_Generator) generateBlockLevelExpr(node ast.Expr, parent *ast.Variable) string {
switch node := node.(type) {
case *ast.ArrayInitializer:
for idx, val := range node.Values {
offs := g.currentContext.getOffset(parent.Name + "_arr_" + strconv.Itoa(idx))
initValue := g.literalToValue(val)
// we can't do
// mov offset, offset
// so we need to do
// mov offset, rax
// mov rax, offset
// TODO: handle more cases
// like this properly
if initValue.kind == Offset {
g.emit(Mov.Emit(initValue, RAX))
g.emit(Mov.Emit(RAX, NewOffset(offs, RBP)))
} else {
g.emit(Mov.Emit(initValue, NewOffset(offs, RBP)))
}
}
case *ast.ArrayAccess:
offs := g.currentContext.getOffset(parent.Name)
arrayAccess := g.literalToValue(node)
g.emit(Mov.Emit(arrayAccess, RAX))
g.emit(Mov.Emit(RAX, NewOffset(offs, RBP)))
case *ast.Call:
val := g.literalToValue(node)
offs := g.currentContext.getOffset(parent.Name)
g.emit(Mov.Emit(val, NewOffset(offs, RBP)))
case ast.Literal:
value := g.literalToValue(node)
offs := g.currentContext.getOffset(parent.Name)
g.emit(Mov.Emit(value, NewOffset(offs, RBP)))
default:
panic("unsupported expr " + reflect.TypeOf(node).String())
}
return ""
}
func (g *AMD64_Generator) generateBlockLevelNode(node ast.Node) {
switch node := node.(type) {
case *ast.Variable:
g.generateBlockLevelExpr(node.Value, node)
}
}
func (g *AMD64_Generator) generateTopLevelNode(node ast.Node) {
switch node := node.(type) {
case *ast.Function:
g.generateFunction(node)
case *ast.Variable:
g.generateGlobal(node)
}
}
func (g *AMD64_Generator) ExecutePhase(file *unit.SourceFile) {
fmt.Println("Generating x86_64 assembly\n")
asmName := "__" + file.Name + ".s"
if out, err := os.Create(asmName); err != nil {
panic("Failed to create file __" + file.Name + ".s")
} else {
g.out = out
}
for _, node := range file.Program {
g.generateTopLevelNode(node)
}
// run gcc on the assembly to
// produce a binary
cmd := exec.Command("gcc", asmName)
err := cmd.Start()
if err != nil {
fmt.Println(err.Error())
}
fmt.Println("Compiled with `gcc`")
}
func (g *AMD64_Generator) emit(s string, mess ...interface{}) {
fmt.Printf(s, mess...)
_, err := fmt.Fprintf(g.out, s, mess...)
if err != nil {
panic("fuck me")
}
}
func (g *AMD64_Generator) emitSection(section Section, attributes ...string) {
g.emit(".%s", string(section))
for _, attrib := range attributes {
g.emit(" %s", attrib)
}
g.emit("\n")
}
|
package main
import (
"bytes"
"fmt"
"math"
"net/smtp"
"path/filepath"
"strconv"
"strings"
"text/tabwriter"
)
func SendMail(from string, to string, cc string, pwd string, title string, msg string) error {
header := ""
header += "From:" + from + "\n"
header += "To:" + to + "\n"
header += "Cc:" + cc + "\n"
header += "Subject:" + title + "\n"
header += "\n"
msg = header + msg
auth := smtp.PlainAuth("", from, pwd, "smtp.gmail.com")
var recievers []string
if cc != "" {
recievers = append(strings.Split(to, ","), strings.Split(cc, ",")...)
} else {
recievers = strings.Split(to, ",")
}
err := smtp.SendMail("smtp.gmail.com:587", auth, from, recievers, []byte(msg))
if err != nil {
return err
}
return nil
}
func BuildMailBody(vol *VolumeInfo, list [][]*DirectoryInfo) (string, string) {
title := "【容量確認のお知らせ : " + filepath.Base(conf.Directory.ROOT_DIRECTORY) + "】"
msg := ""
msg += "このメールは自動実行で送信しています。\n"
msg += "残り容量が少なくなってきています。早めのバックアップをお願いいたします。\n"
msg += "\n"
msg += fmt.Sprintf("[ %v ] の現在状況\n", filepath.VolumeName(conf.Directory.ROOT_DIRECTORY))
msg += fmt.Sprintf("総容量: %v\n", CalcByteToStr(vol.Total))
msg += fmt.Sprintf("空き容量: %v\n", CalcByteToStr(vol.Free))
msg += fmt.Sprintf("使用容量: %v\n", CalcByteToStr(vol.Used))
msg += fmt.Sprintf("使用率: %v", vol.UsedPercent) + "%\n"
msg += "\n"
for i, dir := range strings.Split(conf.Directory.TARGET_DIRECTORIES, ",") {
total := 0
for _, d := range list[i] {
total += d.Size
}
msg += "[ " + filepath.Join(conf.Directory.ROOT_DIRECTORY, dir) + " : " + CalcByteToStr(total) + " ] 使用状況順(容量x保存経過時間)\n"
buf := new(bytes.Buffer)
w := new(tabwriter.Writer)
w.Init(buf, 0, 8, 0, '\t', 0)
fmt.Fprintln(w, "順\tディレクトリ名\t容量\t更新日時\t所有者")
list[i] = list[i][:int(math.Min(float64(conf.Rank.MAX), float64(len(list[i]))))]
for j, d := range list[i] {
n := strconv.Itoa(j + 1)
s := CalcByteToStr(d.Size)
t := d.ModTime.Format("2006-01-02")
o := GetDirOwner(d.Path)
fmt.Fprintln(w, n+"\t"+d.Name+"\t"+s+"\t"+t+"\t"+o)
}
fmt.Fprintln(w)
w.Flush()
msg += buf.String()
msg += "\n"
}
return title, msg
}
|
/*
* Copyright 2020 Kaiserpfalz EDV-Service, Roland T. Lichti.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package aws_provider_test
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"net"
"strconv"
)
var _ = Describe("CheckIP", func() {
BeforeEach(func() {
initMock()
})
AfterEach(func() {
mockCtrl.Finish()
})
It("should be fine when the IP is assigned to the specified host", func() {
secondaryIPs := make([]*net.IP, sut.MaxIPsPerInstance-1)
for i := 0; i < sut.MaxIPsPerInstance-1; i++ {
s := strconv.Itoa(20 + i)
ip := net.ParseIP("10.0.1." + s)
secondaryIPs[i] = &ip
}
awsDirect.
EXPECT().DescribeInstances(createDescribeInstancesInput(hostName)).
Return(
createDescribeInstancesOutput(hostName, hostId, networkInterfaceId, mainIP, secondaryIPs),
nil,
)
err := sut.CheckIP(secondaryIPs[1], hostName)
Expect(err).To(BeNil())
})
It("should throw an error when the IP is not assigned to the specified host", func() {
unassignedIP := net.ParseIP("10.1.2.3")
expectedErr := fmt.Errorf(
"ip '%v' is not assigned to instance '%v'",
unassignedIP.String(), hostId,
)
awsDirect.
EXPECT().DescribeInstances(createDescribeInstancesInput(hostName)).
Return(
createDescribeInstancesOutput(hostName, hostId, networkInterfaceId, mainIP, []*net.IP{}),
nil,
)
err := sut.CheckIP(&unassignedIP, hostName)
Expect(err).To(MatchError(expectedErr))
})
})
|
/*
* Copyright (C) 2018 eeonevision
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package state
import (
"errors"
"github.com/eeonevision/anychaindb/crypto"
)
//go:generate msgp
// Account struct keeps account related fields.
type Account struct {
ID string `msg:"_id" json:"_id" mapstructure:"_id" bson:"_id"`
PubKey string `msg:"public_key" json:"public_key" mapstructure:"public_key" bson:"public_key"`
}
const accountsCollection = "accounts"
// AddAccount method adds new account if all checks were passed.
func (s *State) AddAccount(account *Account) error {
if s.HasAccount(account.ID) {
return errors.New("account exists")
}
return s.SetAccount(account)
}
// SetAccount method adds account in state.
func (s *State) SetAccount(account *Account) error {
return s.DB.C(accountsCollection).Insert(account)
}
// HasAccount method checks if account exists in state or not exists.
func (s *State) HasAccount(id string) bool {
if res, _ := s.GetAccount(id); res != nil {
return true
}
return false
}
// GetAccount method returns account from accounts collection by given accoutn id.
func (s *State) GetAccount(id string) (*Account, error) {
var result *Account
return result, s.DB.C(accountsCollection).FindId(id).One(&result)
}
// GetAccountPubKey method returns public key by given account id.
func (s *State) GetAccountPubKey(id string) (*crypto.Key, error) {
acc, err := s.GetAccount(id)
if err != nil {
return nil, err
}
return crypto.NewFromStrings(acc.PubKey, "")
}
// ListAccounts method returns all accounts from the state.
func (s *State) ListAccounts() (result []*Account, err error) {
return result, s.DB.C(accountsCollection).Find(nil).All(&result)
}
// SearchAccounts method returns accounts by given search query, limit and offset parameters.
func (s *State) SearchAccounts(query interface{}, limit, offset int) (result []*Account, err error) {
return result, s.DB.C(accountsCollection).Find(query).Skip(offset).Limit(limit).All(&result)
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"path/filepath"
"github.com/btcsuite/btcd/btcjson"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/rpcclient"
"github.com/btcsuite/btcutil"
)
//default:
//"sb1qg6ydpvdx8hhdrtvfs46zx0wx049nyht8xjn7zr",
//"SVBCk1pR524saYZWeUMUiurkR7hj65QeKV",
//"SYAFazGGjch4Afni6AgsgJMACZrtAj1cix"
//
//kingsley:
//"SXavGjPTKSEo7JZoW6gsxu684NTmJmqts1",
//"Sgofz1WsD8oYYkzaJFaHjT7d7f6oMP8JEE"
//
//walker:
//"SQp9JS8c94hCeZYV9UqxogmdGwVtMJbKBo",
//"SdEFkyipUUzNTtRDjVBWK5ECJkRZRniGMb"
func main() {
client := initClient()
err := client.WalletPassphrase("walker", 600)
if err != nil {
log.Fatal(err)
}
client.ListAccounts()
transferFrom(client, "default", "SdEFkyipUUzNTtRDjVBWK5ECJkRZRniGMb", 9.9)
//fmt.Println(transfer(client, "SQp9JS8c94hCeZYV9UqxogmdGwVtMJbKBo", 0.5))
generate(client)
//fmt.Println(getBalanceByAccount(client,"default"))
//fmt.Println(getBalanceByAccount(client,"kingsley"))
//fmt.Println(getBalanceByAccount(client,"walker"))
//addresses,err :=client.GetAccountAddress("walker")
//fmt.Println(addresses,err)
}
func initClient() *rpcclient.Client {
ntfnHandlers := rpcclient.NotificationHandlers{
OnAccountBalance: func(account string, balance btcutil.Amount, confirmed bool) {
log.Printf("New balance for account %s: %v", account,
balance)
},
OnRecvTx: func(transaction *btcutil.Tx, details *btcjson.BlockDetails) {
log.Printf("RecvTx: %v,%v", transaction, details)
},
OnTxAccepted: func(hash *chainhash.Hash, amount btcutil.Amount) {
log.Printf("TxAccepted : %v,%v", hash, amount)
},
}
certHomeDir := btcutil.AppDataDir("btcwallet", false)
certs, err := ioutil.ReadFile(filepath.Join(certHomeDir, "rpc.cert"))
if err != nil {
log.Fatal(err)
}
connCfg := &rpcclient.ConnConfig{
Host: "localhost:18554",
Endpoint: "ws",
User: "walker",
Pass: "12345",
Certificates: certs,
}
client, err := rpcclient.New(connCfg, &ntfnHandlers)
if err != nil {
log.Fatal(err)
}
address, _ := btcutil.DecodeAddress("SQp9JS8c94hCeZYV9UqxogmdGwVtMJbKBo", &chaincfg.SimNetParams)
if err := client.NotifyReceived([]btcutil.Address{address}); err != nil {
log.Fatal(err)
}
if err := client.NotifyReceived([]btcutil.Address{address}); err != nil {
log.Fatal(err)
}
if err := client.NotifyNewTransactions(false); err != nil {
log.Fatal(err)
}
return client
}
func transferFrom(client *rpcclient.Client, account, addr string, amount float64) bool {
address, _ := btcutil.DecodeAddress(addr, &chaincfg.SimNetParams)
btcAmount, _ := btcutil.NewAmount(amount)
hash, err := client.SendFrom(account, address, btcAmount)
if err != nil {
fmt.Println(err)
log.Fatal(err)
return false
} else {
log.Println("txid:", hash)
return true
}
}
func transfer(client *rpcclient.Client, addr string, amount float64) bool {
err := client.WalletPassphrase("walker", 600)
if err != nil {
log.Fatal(err)
}
address, _ := btcutil.DecodeAddress(addr, &chaincfg.SimNetParams)
btcAmount, _ := btcutil.NewAmount(amount)
hash, err := client.SendToAddress(address, btcAmount)
if err != nil {
log.Fatal(err)
return false
} else {
log.Println("txid:", hash)
return true
}
}
func generate(client *rpcclient.Client) bool {
_, err := client.Generate(12)
if err != nil {
return false
} else {
return true
}
}
func getBalanceByAccount(client *rpcclient.Client, account string) btcutil.Amount {
err := client.WalletPassphrase("walker", 600)
if err != nil {
log.Fatal(err)
}
amount, err := client.GetBalance(account)
if err != nil {
return 0
} else {
return amount
}
}
|
// Copyright (c) KwanJunWen
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
package estemplate
import "fmt"
// DatatypeFlattened Specialised Datatype that allows an entire JSON object
// to be indexed as a single field. This data type can be useful for indexing
// objects with a large or unknown number of unique keys.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/flattened.html
// for details.
type DatatypeFlattened struct {
Datatype
name string
copyTo []string
// fields specific to flattened datatype
boost *float32
depthLimit *int
docValues *bool
eagerGlobalOrdinals *bool
ignoreAbove *int
index *bool
indexOptions string
nullValue string
similarity string
splitQueriesOnWhitespace *bool
}
// NewDatatypeFlattened initializes a new DatatypeFlattened.
func NewDatatypeFlattened(name string) *DatatypeFlattened {
return &DatatypeFlattened{
name: name,
}
}
// Name returns field key for the Datatype.
func (f *DatatypeFlattened) Name() string {
return f.name
}
// CopyTo sets the field(s) to copy to which allows the values of multiple fields to be
// queried as a single field.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/copy-to.html
// for details.
func (f *DatatypeFlattened) CopyTo(copyTo ...string) *DatatypeFlattened {
f.copyTo = append(f.copyTo, copyTo...)
return f
}
// Boost sets Mapping field-level query time boosting. Defaults to 1.0.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/mapping-boost.html
// for details.
func (f *DatatypeFlattened) Boost(boost float32) *DatatypeFlattened {
f.boost = &boost
return f
}
// DepthLimit sets the maximum allowed depth of the flattened object field,
// in terms of nested inner objects. Note that `depth_limit` can be updated
// dynamically through the put mapping API.
// Defaults to 20.
func (f *DatatypeFlattened) DepthLimit(depthLimit int) *DatatypeFlattened {
f.depthLimit = &depthLimit
return f
}
// DocValues sets whether if the field should be stored on disk in a column-stride fashion
// so that it can later be used for sorting, aggregations, or scripting.
// Defaults to true.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/doc-values.html
// for details.
func (f *DatatypeFlattened) DocValues(docValues bool) *DatatypeFlattened {
f.docValues = &docValues
return f
}
// EagerGlobalOrdinals sets whether if global ordinals be loaded eagerly on refresh. Defaults to false.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/eager-global-ordinals.html
// for details.
func (f *DatatypeFlattened) EagerGlobalOrdinals(eagerGlobalOrdinals bool) *DatatypeFlattened {
f.eagerGlobalOrdinals = &eagerGlobalOrdinals
return f
}
// IgnoreAbove sets the limit for the leaf values to be indexed, leaf values longer than
// the `ignore_above` setting will not be indexed or stored. Note that this limit applies
// to the leaf values within the flattened object field, and not the length of the entire
// field.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/ignore-above.html
// for details.
func (f *DatatypeFlattened) IgnoreAbove(ignoreAbove int) *DatatypeFlattened {
f.ignoreAbove = &ignoreAbove
return f
}
// Index sets whether if the field should be searchable. Defaults to true.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/mapping-index.html
// for details.
func (f *DatatypeFlattened) Index(index bool) *DatatypeFlattened {
f.index = &index
return f
}
// IndexOptions sets information which will be stored in the index for search and highlighting purposes.
// Can be set to the following values:
// "docs" - Index only Doc number.
// "freqs" - Index both Doc number and term frequencies.
// "positions" - Index Doc number, term frequencies, and term positions (or order).
// "offsets" - Index Doc number, term frequencies, positions and start and end character offsets.
// Defaults to "docs".
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/index-options.html
// for details.
func (f *DatatypeFlattened) IndexOptions(indexOptions string) *DatatypeFlattened {
f.indexOptions = indexOptions
return f
}
// NullValue sets a string value which is substituted for any explicit null values.
// Defaults to null.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/null-value.html
// for details.
func (f *DatatypeFlattened) NullValue(nullValue string) *DatatypeFlattened {
f.nullValue = nullValue
return f
}
// Similarity sets the scoring algorithm or similarity that should be used.
// Can be set to the following values:
// "BM25" - Okapi BM25 algorithm.
// "classic" - TF/IDF algorithm.
// "boolean" - Simple boolean similarity.
// Defaults to "BM25".
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/similarity.html
// for details.
func (f *DatatypeFlattened) Similarity(similarity string) *DatatypeFlattened {
f.similarity = similarity
return f
}
// SplitQueriesOnWhitespace sets whether full text queries should split the input on
// whitespace when building a query for this field.
// Defaults to false.
func (f *DatatypeFlattened) SplitQueriesOnWhitespace(splitQueriesOnWhitespace bool) *DatatypeFlattened {
f.splitQueriesOnWhitespace = &splitQueriesOnWhitespace
return f
}
// Validate validates DatatypeFlattened.
func (f *DatatypeFlattened) Validate(includeName bool) error {
var invalid []string
if includeName && f.name == "" {
invalid = append(invalid, "Name")
}
if f.indexOptions != "" {
for _, valid := range validIndexOptions {
if f.indexOptions != valid {
invalid = append(invalid, "IndexOptions")
break
}
}
}
if f.similarity != "" {
for _, valid := range validSimilarity {
if f.similarity != valid {
invalid = append(invalid, "Similarity")
break
}
}
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields or invalid values: %v", invalid)
}
return nil
}
// Source returns the serializable JSON for the source builder.
func (f *DatatypeFlattened) Source(includeName bool) (interface{}, error) {
// {
// "test": {
// "type": "flattened",
// "copy_to": ["field_1", "field_2"],
// "boost": 2,
// "depth_limit": 20,
// "doc_values": true,
// "eager_global_ordinals": true,
// "ignore_above": 256,
// "index": true,
// "index_options": "docs",
// "null_value": "NULL",
// "similarity": "BM25",
// "split_queries_on_whitespace": true
// }
// }
options := make(map[string]interface{})
options["type"] = "flattened"
if len(f.copyTo) > 0 {
var copyTo interface{}
switch {
case len(f.copyTo) > 1:
copyTo = f.copyTo
break
case len(f.copyTo) == 1:
copyTo = f.copyTo[0]
break
default:
copyTo = ""
}
options["copy_to"] = copyTo
}
if f.boost != nil {
options["boost"] = f.boost
}
if f.depthLimit != nil {
options["depth_limit"] = f.depthLimit
}
if f.docValues != nil {
options["doc_values"] = f.docValues
}
if f.eagerGlobalOrdinals != nil {
options["eager_global_ordinals"] = f.eagerGlobalOrdinals
}
if f.ignoreAbove != nil {
options["ignore_above"] = f.ignoreAbove
}
if f.index != nil {
options["index"] = f.index
}
if f.indexOptions != "" {
options["index_options"] = f.IndexOptions
}
if f.nullValue != "" {
options["null_value"] = f.nullValue
}
if f.similarity != "" {
options["similarity"] = f.similarity
}
if f.splitQueriesOnWhitespace != nil {
options["split_queries_on_whitespace"] = f.splitQueriesOnWhitespace
}
if !includeName {
return options, nil
}
source := make(map[string]interface{})
source[f.name] = options
return source, nil
}
|
package util
const (
prefix = "harvesterhci.io"
RemovedPVCsAnnotationKey = prefix + "/removedPersistentVolumeClaims"
AnnotationMigrationTarget = prefix + "/migrationTargetNodeName"
AnnotationMigrationUID = prefix + "/migrationUID"
AnnotationMigrationState = prefix + "/migrationState"
AnnotationTimestamp = prefix + "/timestamp"
AnnotationVolumeClaimTemplates = prefix + "/volumeClaimTemplates"
AnnotationImageID = prefix + "/imageId"
LonghornSystemNamespaceName = "longhorn-system"
)
|
package lib_gc_conf
import (
"fmt"
"os"
"strings"
)
func init() {
//flag.StringVar(&CONF_PREFIX, "loaderConfigMisc", ".", "Is necessary to spcify the application configuration directory: -loaderConfigMisc ./myconf")
//flag.Parse()
setLoaderConf()
}
var Dummy struct{}
var CONF_PREFIX string = ""
const CONF_PREFIX_NAME = "loaderConfig"
const CONF_FILE_NAME = "LoaderConfiguration.ini"
func setLoaderConf() {
for _, k := range os.Args {
if strings.Contains(k, CONF_PREFIX_NAME) {
CONF_PREFIX = k[strings.Index(k, CONF_PREFIX_NAME)+len(CONF_PREFIX_NAME)+1:]
}
}
fmt.Println("Set loader go-misc library configuration directory to: ", CONF_PREFIX)
}
|
package sensor
import "time"
//type properties struct {
// ValueType string `json:"value_type"`
// Value string `json:"value"`
//}
//type sensorReads struct {
// SensorID string `json:"esp8266id"`
// SensorData []properties `json:"sensordatavalues"`
//}
// Record single measurement record
type Record struct {
PM25 float64
PM10 float64
Temperature float64
Humidity float64
Date time.Time
}
|
package main
import (
"errors"
"fmt"
"log"
)
func main() {
r, err := doError()
if err != nil {
log.Printf("There was an error: %v\n", err)
}
fmt.Println("My message:", r)
r, _ = doNoError()
fmt.Println(r)
err = moreError()
if err != nil {
log.Println(err)
}
}
func doError() (string, error) {
return "", errors.New("This is my custom error")
}
func doNoError() (string, error) {
return "My response", nil
}
func moreError() error {
errCode := 401
return fmt.Errorf("This is my custom error from frmt package: %v", errCode)
}
|
package client
import (
"context"
"crypto/tls"
"errors"
"log"
"net"
"net/http"
"regexp"
"strconv"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
)
const (
defaultHTTPTimeout = 10 * time.Second
)
var (
ErrInvalidDNSResolver = errors.New("invalid DNS resolver specified. Required format is {proto}://{ip}:{port}")
ErrInvalidDNSResolverPort = errors.New("invalid DNS resolver port")
ErrInvalidClientOAuth2Config = errors.New("invalid oauth2 configuration: must define all fields for client credentials flow (token-url, client-id, client-secret, scopes)")
defaultConfig = Config{
Insecure: false,
IgnoreRedirect: false,
Timeout: defaultHTTPTimeout,
}
)
// GetDefaultConfig returns a copy of the default configuration
func GetDefaultConfig() *Config {
cfg := defaultConfig
return &cfg
}
// Config is the configuration for clients
type Config struct {
// Insecure determines whether to skip verifying the server's certificate chain and host name
Insecure bool `yaml:"insecure,omitempty"`
// IgnoreRedirect determines whether to ignore redirects (true) or follow them (false, default)
IgnoreRedirect bool `yaml:"ignore-redirect,omitempty"`
// Timeout for the client
Timeout time.Duration `yaml:"timeout"`
// DNSResolver override for the HTTP client
// Expected format is {protocol}://{host}:{port}, e.g. tcp://8.8.8.8:53
DNSResolver string `yaml:"dns-resolver,omitempty"`
// OAuth2Config is the OAuth2 configuration used for the client.
//
// If non-nil, the http.Client returned by getHTTPClient will automatically retrieve a token if necessary.
// See configureOAuth2 for more details.
OAuth2Config *OAuth2Config `yaml:"oauth2,omitempty"`
httpClient *http.Client
}
// DNSResolverConfig is the parsed configuration from the DNSResolver config string.
type DNSResolverConfig struct {
Protocol string
Host string
Port string
}
// OAuth2Config is the configuration for the OAuth2 client credentials flow
type OAuth2Config struct {
TokenURL string `yaml:"token-url"` // e.g. https://dev-12345678.okta.com/token
ClientID string `yaml:"client-id"`
ClientSecret string `yaml:"client-secret"`
Scopes []string `yaml:"scopes"` // e.g. ["openid"]
}
// ValidateAndSetDefaults validates the client configuration and sets the default values if necessary
func (c *Config) ValidateAndSetDefaults() error {
if c.Timeout < time.Millisecond {
c.Timeout = 10 * time.Second
}
if c.HasCustomDNSResolver() {
// Validate the DNS resolver now to make sure it will not return an error later.
if _, err := c.parseDNSResolver(); err != nil {
return err
}
}
if c.HasOAuth2Config() && !c.OAuth2Config.isValid() {
return ErrInvalidClientOAuth2Config
}
return nil
}
// HasCustomDNSResolver returns whether a custom DNSResolver is configured
func (c *Config) HasCustomDNSResolver() bool {
return len(c.DNSResolver) > 0
}
// parseDNSResolver parses the DNS resolver into the DNSResolverConfig struct
func (c *Config) parseDNSResolver() (*DNSResolverConfig, error) {
re := regexp.MustCompile(`^(?P<proto>(.*))://(?P<host>[A-Za-z0-9\-\.]+):(?P<port>[0-9]+)?(.*)$`)
matches := re.FindStringSubmatch(c.DNSResolver)
if len(matches) == 0 {
return nil, ErrInvalidDNSResolver
}
r := make(map[string]string)
for i, k := range re.SubexpNames() {
if i != 0 && k != "" {
r[k] = matches[i]
}
}
port, err := strconv.Atoi(r["port"])
if err != nil {
return nil, err
}
if port < 1 || port > 65535 {
return nil, ErrInvalidDNSResolverPort
}
return &DNSResolverConfig{
Protocol: r["proto"],
Host: r["host"],
Port: r["port"],
}, nil
}
// HasOAuth2Config returns true if the client has OAuth2 configuration parameters
func (c *Config) HasOAuth2Config() bool {
return c.OAuth2Config != nil
}
// isValid() returns true if the OAuth2 configuration is valid
func (c *OAuth2Config) isValid() bool {
return len(c.TokenURL) > 0 && len(c.ClientID) > 0 && len(c.ClientSecret) > 0 && len(c.Scopes) > 0
}
// GetHTTPClient return an HTTP client matching the Config's parameters.
func (c *Config) getHTTPClient() *http.Client {
if c.httpClient == nil {
c.httpClient = &http.Client{
Timeout: c.Timeout,
Transport: &http.Transport{
MaxIdleConns: 100,
MaxIdleConnsPerHost: 20,
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: c.Insecure,
},
},
CheckRedirect: func(req *http.Request, via []*http.Request) error {
if c.IgnoreRedirect {
// Don't follow redirects
return http.ErrUseLastResponse
}
// Follow redirects
return nil
},
}
if c.HasCustomDNSResolver() {
dnsResolver, err := c.parseDNSResolver()
if err != nil {
// We're ignoring the error, because it should have been validated on startup ValidateAndSetDefaults.
// It shouldn't happen, but if it does, we'll log it... Better safe than sorry ;)
log.Println("[client][getHTTPClient] THIS SHOULD NOT HAPPEN. Silently ignoring invalid DNS resolver due to error:", err.Error())
} else {
dialer := &net.Dialer{
Resolver: &net.Resolver{
PreferGo: true,
Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
d := net.Dialer{}
return d.DialContext(ctx, dnsResolver.Protocol, dnsResolver.Host+":"+dnsResolver.Port)
},
},
}
c.httpClient.Transport.(*http.Transport).DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
return dialer.DialContext(ctx, network, addr)
}
}
}
if c.HasOAuth2Config() {
c.httpClient = configureOAuth2(c.httpClient, *c.OAuth2Config)
}
}
return c.httpClient
}
// configureOAuth2 returns an HTTP client that will obtain and refresh tokens as necessary.
// The returned Client and its Transport should not be modified.
func configureOAuth2(httpClient *http.Client, c OAuth2Config) *http.Client {
oauth2cfg := clientcredentials.Config{
ClientID: c.ClientID,
ClientSecret: c.ClientSecret,
Scopes: c.Scopes,
TokenURL: c.TokenURL,
}
ctx := context.WithValue(context.Background(), oauth2.HTTPClient, httpClient)
return oauth2cfg.Client(ctx)
}
|
package service
import (
"github.com/bioxeed/momenton-challenge/db"
"github.com/sirupsen/logrus"
)
// EmployeeService provides methods to work with Employees
type EmployeeService interface {
// List returns all employee records
List() ([]Employee, error)
}
type employeeService struct {
log logrus.FieldLogger
repo db.EmployeeRepo
}
// NewEmployeeService creates and initialises an EmployeeService
func NewEmployeeService(log logrus.FieldLogger, repo db.EmployeeRepo) EmployeeService {
return &employeeService{
log: log,
repo: repo,
}
}
// List returns all employee records
func (s employeeService) List() ([]Employee, error) {
s.log.Info("Listing employees")
rows, err := s.repo.List()
if err != nil {
s.log.WithError(err).Error("Error listing employees")
return nil, err
}
if len(rows) == 0 {
return nil, nil
}
return s.sort(rows)
}
func (s employeeService) sort(rows []db.Employee) ([]Employee, error) {
var results []Employee
// map by employee_id for lookup
employees := make(map[int]db.Employee)
children := make(map[int][]Employee)
for _, v := range rows {
employees[v.ID] = v
children[v.ManagerID] = append(children[v.ManagerID], Employee{
ID: v.ID,
Name: v.Name,
})
}
for _, v := range rows {
_, ok := employees[v.ManagerID]
if v.ManagerID == 0 || !ok {
e := Employee{
ID: v.ID,
Name: v.Name,
}
c, err := s.mapChildren(v, employees, children)
if err != nil {
s.log.WithError(err).Error("Error mapping child nodes")
}
e.Children = c
results = append(results, e)
}
}
return results, nil
}
func (s employeeService) mapChildren(v db.Employee, employees map[int]db.Employee, children map[int][]Employee) ([]Employee, error) {
result := children[v.ID]
if len(result) == 0 {
return nil, nil
}
for k, v := range result {
c, err := s.mapChildren(employees[v.ID], employees, children)
if err != nil {
s.log.WithError(err).Error("Error mapping child nodes")
}
result[k].Children = c
}
return result, nil
}
|
package main
import (
"fmt"
"strings"
)
func bomberman(seconds int, grid []string) (output []string) {
if seconds < 2 {
output = grid
} else if seconds%2 == 0 {
var row string = strings.Repeat("O", len(grid[0]))
for i := 0; i < len(grid); i++ {
output = append(output, row)
}
} else {
nGrid := [][]int{}
for _, row := range grid {
nRow := []int{}
for _, c := range row {
if string(c) == "." {
nRow = append(nRow, 0)
} else {
nRow = append(nRow, 2)
}
}
nGrid = append(nGrid, nRow)
}
R := len(grid)
C := len(grid[0])
t := 1
for t < 4+seconds%4 {
t++
destroyed := [][]int{}
for r := 0; r < R; r++ {
for c := 0; c < C; c++ {
if nGrid[r][c] > 0 {
nGrid[r][c]--
}
if nGrid[r][c] == 0 {
if t%2 == 0 {
nGrid[r][c] = 3
} else {
destroyed = append(destroyed, []int{r, c})
if r < R-1 {
destroyed = append(destroyed, []int{r + 1, c})
}
if r > 0 {
destroyed = append(destroyed, []int{r - 1, c})
}
if c < C-1 {
destroyed = append(destroyed, []int{r, c + 1})
}
if c > 0 {
destroyed = append(destroyed, []int{r, c - 1})
}
}
}
}
}
if len(destroyed) > 0 {
for r, row := range nGrid {
for c := range row {
nGrid[r][c] = 2
}
}
for _, v := range destroyed {
nGrid[v[0]][v[1]] = 0
}
}
}
for _, row := range nGrid {
var x string
for _, c := range row {
if c == 2 {
x += "O"
} else {
x += "."
}
}
output = append(output, x)
}
}
return output
}
func main() {
grid := []string{
".......",
"...O...",
"....O..",
".......",
"OO.....",
"OO.....",
}
output := bomberman(3, grid)
for _, v := range output {
fmt.Println(v)
}
}
|
package main
import (
"github.com/gorilla/mux"
"log"
"net/http"
)
type middleWareHandler struct {
r *mux.Router
l *ConnLimiter
}
func (m middleWareHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if !m.l.GetConn() {
w.WriteHeader(http.StatusTooManyRequests)
w.Write([]byte("Too many request"))
return
}
m.r.ServeHTTP(w,r)
defer m.l.ReleaseConn()
}
func NewMiddleWareHandler(r *mux.Router,limit int) http.Handler{
return middleWareHandler{r,NewConnLimiter(limit)}
}
func RegisterHandlers() *mux.Router{
router := mux.NewRouter()
router.HandleFunc("/videos/{videoId}",streamHandler).Methods("GET")
router.HandleFunc("/videos",uploadHandler).Methods("POST")
return router
}
func main(){
router := RegisterHandlers()
m := NewMiddleWareHandler(router,2)
log.Fatal(http.ListenAndServe(":8080", m))
}
|
package algorun
func Owns(algologId string, accountId string) bool {
log, err := Find(algologId)
if err != nil {
return false
} else {
return log.AccountId == accountId
}
return false
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.