text stringlengths 11 4.05M |
|---|
package logger
import (
"fmt"
"go.uber.org/zap"
)
// Logger zap log
type Logger struct {
Log *zap.Logger
}
// ZapLogger alias
type ZapLogger = Logger
// InitZapLogger initial
func InitZapLogger(log *zap.Logger) *Logger {
return &Logger{
log,
}
}
// Debug logs a message at level Debug on the ZapLogger.
func (l *Logger) Debug(args ...interface{}) {
l.Log.Debug(fmt.Sprint(args...))
}
// Debugf logs a message at level Debug on the ZapLogger.
func (l *Logger) Debugf(template string, args ...interface{}) {
l.Log.Debug(fmt.Sprintf(template, args...))
}
// Info logs a message at level Info on the ZapLogger.
func (l *Logger) Info(args ...interface{}) {
l.Log.Info(fmt.Sprint(args...))
}
// Infof logs a message at level Info on the ZapLogger.
func (l *Logger) Infof(template string, args ...interface{}) {
l.Log.Info(fmt.Sprintf(template, args...))
}
// Warn logs a message at level Warn on the ZapLogger.
func (l *Logger) Warn(args ...interface{}) {
l.Log.Warn(fmt.Sprint(args...))
}
// Warning logs a message at level Warn on the ZapLogger.
func (l *Logger) Warning(args ...interface{}) {
l.Log.Warn(fmt.Sprint(args...))
}
// Warnf logs a message at level Warn on the ZapLogger.
func (l *Logger) Warnf(template string, args ...interface{}) {
l.Log.Warn(fmt.Sprintf(template, args...))
}
// Warningf logs a message at level Warn on the ZapLogger.
func (l *Logger) Warningf(template string, args ...interface{}) {
l.Log.Warn(fmt.Sprintf(template, args...))
}
// Error logs a message at level Error on the ZapLogger.
func (l *Logger) Error(args ...interface{}) {
l.Log.Error(fmt.Sprint(args...))
}
// Errorf logs a message at level Warn on the ZapLogger.
func (l *Logger) Errorf(template string, args ...interface{}) {
l.Log.Error(fmt.Sprintf(template, args...))
}
// Fatal logs a message at level Fatal on the ZapLogger.
func (l *Logger) Fatal(args ...interface{}) {
l.Log.Fatal(fmt.Sprint(args...))
}
// Fatalf logs a message at level Warn on the ZapLogger.
func (l *Logger) Fatalf(template string, args ...interface{}) {
l.Log.Fatal(fmt.Sprintf(template, args...))
}
// Panic logs a message at level Painc on the ZapLogger.
func (l *Logger) Panic(args ...interface{}) {
l.Log.Panic(fmt.Sprint(args...))
}
// Panicf logs a message at level Warn on the ZapLogger.
func (l *Logger) Panicf(template string, args ...interface{}) {
l.Log.Panic(fmt.Sprintf(template, args...))
}
// With return a log with an extra field.
func (l *Logger) With(key string, value interface{}) *Logger {
return &ZapLogger{l.Log.With(zap.Any(key, value))}
}
// Printf logs a message at level Info on the ZapLogger.
func (l *Logger) Printf(format string, args ...interface{}) {
l.Log.Info(fmt.Sprintf(format, args...))
}
// Print logs a message at level Info on the ZapLogger.
func (l *Logger) Print(args ...interface{}) {
l.Log.Info(fmt.Sprint(args...))
}
// WithField return a log with an extra field.
func (l *Logger) WithField(key string, value interface{}) *Logger {
return &ZapLogger{l.Log.With(zap.Any(key, value))}
}
// WithFields return a log with extra fields.
func (l *Logger) WithFields(fields map[string]interface{}) *Logger {
i := 0
var clog *Logger
for k, v := range fields {
if i == 0 {
clog = l.WithField(k, v)
} else {
clog = clog.WithField(k, v)
}
i++
}
return clog
}
|
package gnet
import (
exp "github.com/jholowczak/guacamole_client_go"
"github.com/jholowczak/guacamole_client_go/gio"
guid "github.com/satori/go.uuid"
)
//InternalDataOpcode const Globle value
// * The Guacamole protocol instruction opcode reserved for arbitrary
// * internal use by tunnel implementations. The value of this opcode is
// * guaranteed to be the empty string (""). Tunnel implementations may use
// * this opcode for any purpose. It is currently used by the HTTP tunnel to
// * mark the end of the HTTP response, and by the WebSocket tunnel to
// * transmit the tunnel UUID.
const InternalDataOpcode = ""
// GuacamoleTunnel Provides a unique identifier and synchronized access
// to the GuacamoleReader and GuacamoleWriter associated with a GuacamoleSocket.
type GuacamoleTunnel interface {
/**
* Acquires exclusive read access to the Guacamole instruction stream
* and returns a GuacamoleReader for reading from that stream.
*
* @return A GuacamoleReader for reading from the Guacamole instruction
* stream.
*/
AcquireReader() gio.GuacamoleReader
/**
* Relinquishes exclusive read access to the Guacamole instruction
* stream. This function should be called whenever a thread finishes using
* a GuacamoleTunnel's GuacamoleReader.
*/
ReleaseReader()
/**
* Returns whether there are threads waiting for read access to the
* Guacamole instruction stream.
*
* @return true if threads are waiting for read access the Guacamole
* instruction stream, false otherwise.
*/
HasQueuedReaderThreads() bool
/**
* Acquires exclusive write access to the Guacamole instruction stream
* and returns a GuacamoleWriter for writing to that stream.
*
* @return A GuacamoleWriter for writing to the Guacamole instruction
* stream.
*/
AcquireWriter() gio.GuacamoleWriter
/**
* Relinquishes exclusive write access to the Guacamole instruction
* stream. This function should be called whenever a thread finishes using
* a GuacamoleTunnel's GuacamoleWriter.
*/
ReleaseWriter()
/**
* Returns whether there are threads waiting for write access to the
* Guacamole instruction stream.
*
* @return true if threads are waiting for write access the Guacamole
* instruction stream, false otherwise.
*/
HasQueuedWriterThreads() bool
/**
* Returns the unique identifier associated with this GuacamoleTunnel.
*
* @return The unique identifier associated with this GuacamoleTunnel.
*/
GetUUID() guid.UUID
/**
* Returns the GuacamoleSocket used by this GuacamoleTunnel for reading
* and writing.
*
* @return The GuacamoleSocket used by this GuacamoleTunnel.
*/
GetSocket() GuacamoleSocket
/**
* Release all resources allocated to this GuacamoleTunnel.
*
* @throws GuacamoleException if an error occurs while releasing
* resources.
*/
Close() exp.ExceptionInterface
/**
* Returns whether this GuacamoleTunnel is open, or has been closed.
*
* @return true if this GuacamoleTunnel is open, false if it is closed.
*/
IsOpen() bool
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package camera
import (
"context"
"math/rand"
"regexp"
"strconv"
"strings"
"time"
"chromiumos/tast/common/media/caps"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/camera/cca"
"chromiumos/tast/local/camera/testutil"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/display"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: CCAUIStress,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Opens CCA and stress testing common functions randomly",
Contacts: []string{"shik@chromium.org", "pihsun@chromium.org", "chromeos-camera-eng@google.com"},
SoftwareDeps: []string{"camera_app", "chrome", caps.BuiltinOrVividCamera},
Vars: []string{
// Number of iterations to test.
"iterations",
// Skip first skip_iterations iterations for reproducing failures faster.
"skip_iterations",
// The seed for deterministically generating the random action sequence.
"seed",
// The action filter regular expression. Only action names match
// the filter will be stressed.
"action_filter",
// The list of comma separated actions(more than 1 action) that will be stressed.
// In a single iteration, these actions will be stressed in the same order as given.
"action_sequence",
// Optional. Expecting "tablet".
"mode",
},
Params: []testing.Param{{
Name: "real",
ExtraSoftwareDeps: []string{caps.BuiltinCamera},
ExtraAttr: []string{"group:mainline", "informational", "group:camera-libcamera"},
Fixture: "ccaLaunched",
Timeout: 5 * time.Minute,
Val: testutil.UseRealCamera,
}, {
Name: "vivid",
ExtraSoftwareDeps: []string{caps.VividCamera},
ExtraAttr: []string{"group:mainline", "informational", "group:camera-libcamera"},
Fixture: "ccaLaunched",
Timeout: 5 * time.Minute,
Val: testutil.UseVividCamera,
}, {
Name: "fake",
ExtraAttr: []string{"group:mainline", "informational", "group:camera-libcamera"},
Fixture: "ccaLaunchedWithFakeCamera",
Timeout: 5 * time.Minute,
Val: testutil.UseFakeCamera,
}, {
// For stress testing manually with real camera and longer timeout.
Name: "manual",
ExtraSoftwareDeps: []string{caps.BuiltinCamera},
Fixture: "ccaLaunched",
Timeout: 30 * 24 * time.Hour,
Val: testutil.UseRealCamera,
}},
})
}
type stressAction struct {
name string
perform func(context.Context) error
}
func intVar(s *testing.State, name string, defaultValue int) int {
str, ok := s.Var(name)
if !ok {
return defaultValue
}
val, err := strconv.Atoi(str)
if err != nil {
s.Fatalf("Failed to parse integer variable %v: %v", name, err)
}
return val
}
func stringVar(s *testing.State, name, defaultValue string) string {
str, ok := s.Var(name)
if !ok {
return defaultValue
}
return str
}
// switchToRearCamera checks if the current camera is Rear camera or not. If user facing camera is open, it will switch to rear camera.
func switchToRearCamera(ctx context.Context, app cca.App) error {
facing, err := app.GetFacing(ctx)
if err != nil {
return errors.Wrap(err, "failed to get camera facing")
}
if facing == cca.FacingBack {
return nil
}
if err := app.SwitchCamera(ctx); err != nil {
return errors.Wrap(err, "failed to switch camera")
}
if err := app.CheckFacing(ctx, cca.FacingBack); err != nil {
return errors.Wrap(err, "failed to verify camera facing back")
}
return nil
}
func CCAUIStress(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(cca.FixtureData).Chrome
app := s.FixtValue().(cca.FixtureData).App()
tb := s.FixtValue().(cca.FixtureData).TestBridge()
s.FixtValue().(cca.FixtureData).SetDebugParams(cca.DebugParams{SaveScreenshotWhenFail: true})
const defaultIterations = 20
const defaultSkipIterations = 0
const defaultSeed = 1
const actionTimeout = 30 * time.Second
const cleanupTimeout = 20 * time.Second
iterations := intVar(s, "iterations", defaultIterations)
skipIterations := intVar(s, "skip_iterations", defaultSkipIterations)
actionFilter, err := regexp.Compile(stringVar(s, "action_filter", ".*"))
if err != nil {
s.Fatal("Failed to compile action_filter as a regexp")
}
actionSequences := strings.Split(stringVar(s, "action_sequence", ""), ",")
seed := intVar(s, "seed", defaultSeed)
rand.Seed(int64(seed))
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to connect to test API: ", err)
}
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
var tabletMode bool
if mode, ok := s.Var("mode"); ok {
tabletMode = mode == "tablet"
cleanup, err := ash.EnsureTabletModeEnabled(ctx, tconn, tabletMode)
if err != nil {
s.Fatalf("Failed to enable tablet mode to %v: %v", tabletMode, err)
}
defer cleanup(cleanupCtx)
} else {
// Use default screen mode of the DUT.
tabletMode, err = ash.TabletModeEnabled(ctx, tconn)
if err != nil {
s.Fatal("Failed to get DUT default screen mode: ", err)
}
}
s.Log("Running test with tablet mode: ", tabletMode)
if tabletMode {
cleanup, err := display.RotateToLandscape(ctx, tconn)
if err != nil {
s.Fatal("Failed to rotate display to landscape: ", err)
}
defer cleanup(cleanupCtx)
}
// TODO(b/182248415): Add variables to control per action parameters, like
// how many photo should be taken consecutively or how long the video
// recording should be.
allActions := []stressAction{
{
name: "restart-app",
perform: func(ctx context.Context) error {
return app.Restart(ctx, tb)
},
},
{
name: "take-photo",
perform: func(ctx context.Context) error {
if err := app.SwitchMode(ctx, cca.Photo); err != nil {
return err
}
_, err := app.TakeSinglePhoto(ctx, cca.TimerOff)
return err
},
},
{
name: "record-video",
perform: func(ctx context.Context) error {
if err := app.SwitchMode(ctx, cca.Video); err != nil {
return err
}
_, err := app.RecordVideo(ctx, cca.TimerOff, 3*time.Second)
return err
},
},
{
name: "switch-photo",
perform: func(c context.Context) error {
return app.SwitchMode(ctx, cca.Photo)
},
},
{
name: "switch-video",
perform: func(c context.Context) error {
return app.SwitchMode(ctx, cca.Video)
},
},
}
numCameras, err := app.GetNumOfCameras(ctx)
if err != nil {
s.Fatal("Failed to get number of cameras: ", err)
}
if numCameras > 1 {
allActions = append(allActions,
stressAction{
name: "switch-camera",
perform: func(ctx context.Context) error {
return app.SwitchCamera(ctx)
},
},
stressAction{
name: "switch-photo-rear",
perform: func(ctx context.Context) error {
if err := switchToRearCamera(ctx, *app); err != nil {
return errors.Wrap(err, "failed to switch to rear camera")
}
return app.SwitchMode(ctx, cca.Photo)
},
},
stressAction{
name: "switch-video-rear",
perform: func(ctx context.Context) error {
if err := switchToRearCamera(ctx, *app); err != nil {
return errors.Wrap(err, "failed to switch to rear camera")
}
return app.SwitchMode(ctx, cca.Video)
},
},
)
}
var actions []stressAction
if len(actionSequences) > 1 {
for _, actionSeq := range actionSequences {
for _, action := range allActions {
if string(actionSeq) == action.name {
actions = append(actions, action)
break
}
}
}
} else {
for _, action := range allActions {
if actionFilter.MatchString(action.name) {
actions = append(actions, action)
}
}
}
s.Logf("Start stressing for %v iterations with seed = %v, skipIterations = %v", iterations, seed, skipIterations)
// TODO(b/182248415): Clear camera/ folder periodically, otherwise the disk
// might be full after running many iterations.
for i := 1; i <= iterations; i++ {
if len(actionSequences) > 1 {
for _, action := range actions {
s.Logf("Iteration %d/%d: Performing action %s", i, iterations, action.name)
func() {
actionCtx, actionCancel := context.WithTimeout(ctx, actionTimeout)
defer actionCancel()
if err := action.perform(actionCtx); err != nil {
s.Fatalf("Failed to perform action %v: %v", action.name, err)
}
}()
}
} else {
action := actions[rand.Intn(len(actions))]
if i <= skipIterations {
// We still need to call rand.Intn() to advance the internal state of PRNG.
continue
}
s.Logf("Iteration %d/%d: Performing action %s", i, iterations, action.name)
func() {
actionCtx, actionCancel := context.WithTimeout(ctx, actionTimeout)
defer actionCancel()
if err := action.perform(actionCtx); err != nil {
s.Fatalf("Failed to perform action %v: %v", action.name, err)
}
}()
}
}
}
|
package bill
import (
"github.com/life-assistant-go/base"
)
// Bill table struct
type Bill struct {
base.Database
billNo string
}
// TableName set the database table name
func (Bill) TableName() string {
return "bills"
}
|
package martinier
import (
"github.com/go-martini/martini"
"github.com/martini-contrib/binding"
"github.com/martini-contrib/render"
"gopkg.in/mgo.v2"
"net/http"
)
func NewServer(db *DatabaseConnection) *martini.Martini {
engine := martini.New()
engine.Use(render.Renderer(render.Options{IndentJSON: true}))
engine.Use(db.Database())
router := martini.NewRouter()
router.Get("/users", binding.Json(User{}), func(user User, render render.Render, db *mgo.Database) {
render.JSON(http.StatusOK, user.all(db))
})
router.Post("/users", binding.Json(User{}), func(user User, render render.Render, db *mgo.Database) {
render.JSON(http.StatusCreated, user.store(db, user))
})
//router.Get("/users/:id", handler)
//router.Put("/users/:id", handler)
//router.Delete("/users/:id", handler)
engine.Action(router.Handle)
return engine
}
|
package menu
//前端权限菜单节点
var ManageMenus = []map[string]string{
{
"key":"index.index.get",
"value":"IndexController.Index",
},
{
"key":"index.index.post",
"value":"IndexController.Index1",
},
{
"key":"common.get_role",//获取所有权限
"value":"CommonController.GetRole",
},
{
"key":"package.levels.get",//获取行业套餐列表
"value":"PackageController.Levels",
},
{
"key":"package.levels.post",//添加行业套餐
"value":"PackageController.AddLevel",
},
{
"key":"package.levels.put",//更新行业套餐
"value":"PackageController.UpdateLevels",
},
{
"key":"package.levels.delete",//删除行业套餐
"value":"PackageController.DelLevels",
},
{
"key":"package.roles.get",//获取权限节点-当前用户已有
"value":"AuthController.GetRoles",
},
}
//var ManageMenus MenusMap
//var ManageMenus initial.MenusMap = initial.MenusMap{
// initial.Map{
// "cn_name": "首页",
// "en_name": "welcome",
// "icon": "icon icon-edit",
// "sort": 1, //这里必须是数字才能排序
// "level":2,//一级还是二级菜单
// "sub": []map[string]string{
// {
// "name": "首页",
// "path": "IndexController.Index",
// "icon": "icon icon-index",
// },
// },
// },
// initial.Map{
// "cn_name": "站点管理",
// "en_name": "system",
// "icon": "icon icon-edit",
// "sort": 2, //这里必须是数字才能排序
// "level": 1,//一级还是二级菜单
// "sub": []map[string]string{
// {
// "name": "站点设置",
// "path": "IndexController.Index",
//
// },
// },
// },
//}
func init() {
//sort.Sort(ManageMenus)
}
|
/*
Copyright 2018 The Chronologist Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
"go.uber.org/zap"
core_v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"github.com/hypnoglow/chronologist/internal/helm"
"github.com/hypnoglow/chronologist/internal/zaplog"
)
func (c *Controller) setupSecretsInformer(kube kubernetes.Interface) {
// informer watches for secrets with label OWNER=TILLER
// and invokes handlers that add those secrets to the queue.
c.informer = cache.NewSharedInformer(
// TODO: It would be great if we could filter outdated secrets here, and not
// in handler funcs. But this seems impossible currently.
&cache.ListWatch{
ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) {
options.LabelSelector = releaseLabelSelector
return kube.CoreV1().Secrets(meta_v1.NamespaceAll).List(options)
},
WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) {
options.LabelSelector = releaseLabelSelector
return kube.CoreV1().Secrets(meta_v1.NamespaceAll).Watch(options)
},
},
&core_v1.Secret{},
releasesResyncPeriod,
)
c.informer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: c.addSecret,
UpdateFunc: c.updateSecret,
DeleteFunc: c.deleteSecret,
},
)
}
func (c *Controller) addSecret(obj interface{}) {
sec := obj.(*core_v1.Secret)
// We operate on secrets that are not outdated.
if c.maxAge != 0 && time.Now().Add(-c.maxAge).After(sec.CreationTimestamp.Time) {
c.log.Sugar().Debugf("addSecret: Secret %s/%s is too old, skip", sec.Name, sec.Namespace)
return
}
c.log.Sugar().Infof("Adding Secret %s/%s", sec.Namespace, sec.Name)
c.enqueueSecret(sec)
}
func (c *Controller) updateSecret(old, new interface{}) {
sec := new.(*core_v1.Secret)
// We operate on secrets that are not outdated.
if c.maxAge != 0 && time.Now().Add(-c.maxAge).After(sec.CreationTimestamp.Time) {
c.log.Sugar().Debugf("updateSecret: Secret %s/%s is too old, skip", sec.Name, sec.Namespace)
return
}
c.log.Sugar().Infof("Updating Secret %s/%s", sec.Namespace, sec.Name)
c.enqueueSecret(sec)
}
func (c *Controller) deleteSecret(obj interface{}) {
sec, ok := obj.(*core_v1.Secret)
if ok {
// We operate on secrets that are not outdated.
if c.maxAge != 0 && time.Now().Add(-c.maxAge).After(sec.CreationTimestamp.Time) {
c.log.Sugar().Debugf("deleteSecret: Secret %s/%s is too old, skip", sec.Name, sec.Namespace)
return
}
c.log.Sugar().Infof("Deleting Secret %s/%s", sec.Namespace, sec.Name)
c.enqueueSecret(sec)
return
}
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("failed to get object from tombstone %#v", obj))
return
}
_, ok = tombstone.Obj.(*core_v1.Secret)
if !ok {
utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a Secret %#v", obj))
return
}
}
func (c *Controller) enqueueSecret(sec *core_v1.Secret) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(sec)
if err != nil {
utilruntime.HandleError(fmt.Errorf("failed to get key for Secret %s/%s: %v", sec.Namespace, sec.Name, err))
return
}
c.queue.Add(key)
}
// syncSecret method contains logic that is responsible for synchronizing
// a specific secret with a relevant annotation.
func (c *Controller) syncSecret(key string) error {
log := c.log.With(zap.String("secret", key))
startTime := time.Now()
log.Sugar().Infof("Started syncing Secret at %v", startTime.Format(time.RFC3339Nano))
defer func() {
log.Sugar().Infof("Finished syncing Secret in %v", time.Since(startTime))
}()
// secrets are always named after release revision.
name, revision, err := c.keyToRelease(key)
if err != nil {
return err
}
ctx := zaplog.WithFields(
context.Background(),
zap.String("release", name),
zap.String("revision", revision),
)
item, exists, err := c.informer.GetStore().GetByKey(key)
if err != nil {
return errors.Wrap(err, "get from store by key")
}
if !exists {
return c.deleteReleaseEvent(ctx, name, revision)
}
sec := item.(*core_v1.Secret)
re, err := helm.EventFromRawRelease(string(sec.Data["release"]))
if err != nil {
return errors.Wrap(err, "create a release event from raw helm release data")
}
return c.syncReleaseEvent(ctx, re, name, revision)
}
|
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT License was not distributed with this
// file, you can obtain one at https://opensource.org/licenses/MIT.
//
// Copyright (c) DUSK NETWORK. All rights reserved.
package wallet
import (
"bytes"
"context"
"crypto/rand"
"os"
"testing"
"time"
"github.com/dusk-network/dusk-blockchain/harness/tests"
"github.com/dusk-network/dusk-protobuf/autogen/go/rusk"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"github.com/dusk-network/dusk-blockchain/pkg/core/data/database"
"github.com/dusk-network/dusk-blockchain/pkg/core/data/ipc/keys"
assert "github.com/stretchr/testify/require"
)
const dbPath = "testDb"
const (
seedFile = "seed.dat"
secretFile = "key.dat"
)
const address = "127.0.0.1:5051"
func TestMain(m *testing.M) {
// start rusk mock rpc server
tests.StartMockServer(address)
// Start all tests
code := m.Run()
os.Exit(code)
}
func createRPCConn(t *testing.T) (client rusk.KeysClient, conn *grpc.ClientConn) {
assert := assert.New(t)
dialCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
var err error
conn, err = grpc.DialContext(dialCtx, address, grpc.WithInsecure())
assert.NoError(err)
return rusk.NewKeysClient(conn), conn
}
func TestNewWallet(t *testing.T) {
assert := assert.New(t)
netPrefix := byte(1)
db, err := database.New(dbPath)
assert.NoError(err)
defer os.RemoveAll(dbPath)
defer os.Remove(seedFile)
defer os.Remove(secretFile)
client, conn := createRPCConn(t)
defer conn.Close()
seed, err := GenerateNewSeed(nil)
assert.NoError(err)
ctx := context.Background()
secretKey, err := client.GenerateKeys(ctx, &rusk.GenerateKeysRequest{})
assert.NoError(err)
// Since the dusk-protobuf mocks currently do not fill up the scalars,
// we will have to do it ourselves.
require.Nil(t, fillSecretKey(secretKey.Sk))
sk := keys.NewSecretKey()
keys.USecretKey(secretKey.Sk, sk)
assert.NotNil(sk)
assert.NotNil(sk.A)
assert.NotNil(sk.B)
w, err := New(nil, seed, netPrefix, db, "pass", seedFile, sk)
assert.NoError(err)
// wrong wallet password
loadedWallet, err := LoadFromFile(netPrefix, db, "wrongPass", seedFile)
assert.NotNil(err)
assert.Nil(loadedWallet)
// correct wallet password
loadedWallet, err = LoadFromFile(netPrefix, db, "pass", seedFile)
assert.Nil(err)
assert.Equal(w.SecretKey.A, loadedWallet.SecretKey.A)
assert.Equal(w.SecretKey.B, loadedWallet.SecretKey.B)
assert.Equal(w.consensusKeys.BLSSecretKey, loadedWallet.consensusKeys.BLSSecretKey)
assert.True(bytes.Equal(w.consensusKeys.BLSPubKeyBytes, loadedWallet.consensusKeys.BLSPubKeyBytes))
}
func TestCatchEOF(t *testing.T) {
netPrefix := byte(1)
client, conn := createRPCConn(t)
defer conn.Close()
db, err := database.New(dbPath)
assert.Nil(t, err)
defer os.RemoveAll(dbPath)
defer os.Remove(seedFile)
defer os.Remove(secretFile)
// Generate 1000 new wallets
for i := 0; i < 1000; i++ {
seed, err := GenerateNewSeed(nil)
require.Nil(t, err)
ctx := context.Background()
secretKey, err := client.GenerateKeys(ctx, &rusk.GenerateKeysRequest{})
require.Nil(t, err)
require.Nil(t, fillSecretKey(secretKey.Sk))
sk := keys.NewSecretKey()
keys.USecretKey(secretKey.Sk, sk)
_, err = New(nil, seed, netPrefix, db, "pass", seedFile, sk)
assert.Nil(t, err)
os.Remove(seedFile)
os.Remove(secretFile)
}
}
func fillSecretKey(sk *rusk.SecretKey) error {
bs := make([]byte, 32)
if _, err := rand.Read(bs); err != nil {
return err
}
sk.A = bs
bs2 := make([]byte, 32)
if _, err := rand.Read(bs); err != nil {
return err
}
sk.B = bs2
return nil
}
|
package log4g_test
import (
"github.com/kcmvp/log4g"
"log"
"testing"
)
func TestLogToFile(t *testing.T) {
rf := log4g.NewRollingFile("hello.log", "", &log4g.TimeRollingPolicy{
Pattern: log4g.Hourly,
BasicPolicy: log4g.BasicPolicy{
Backups: 24,
Compress: true,
},
})
logger := log4g.NewLogger(rf, log4g.INFO, log.Ldate|log.Lmicroseconds|log.Lshortfile, "")
for i := 0; i < 5; i++ {
logger.Info("Hello, so nice to meet you!")
}
for i := 0; i < 100; i++ {
logger.Debug("Debug ...")
}
for i := 0; i < 5; i++ {
logger.Error("Error ...")
}
}
|
package air
import (
"bufio"
"bytes"
"compress/gzip"
"crypto/tls"
"encoding/base64"
"encoding/binary"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"html/template"
"io"
"io/ioutil"
"mime"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/BurntSushi/toml"
"github.com/aofei/mimesniffer"
"github.com/cespare/xxhash/v2"
"github.com/gorilla/websocket"
"github.com/vmihailenco/msgpack"
"golang.org/x/net/html"
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2"
"google.golang.org/protobuf/proto"
"gopkg.in/yaml.v2"
)
// Response is an HTTP response.
//
// The `Response` not only represents HTTP/1.x responses, but also represents
// HTTP/2 responses, and always acts as HTTP/2 responses.
type Response struct {
// Air is where the response belongs.
Air *Air
// Status is the status code.
//
// See RFC 7231, section 6.
//
// For HTTP/1.x, it will be put in the response-line.
//
// For HTTP/2, it will be the ":status" pseudo-header.
//
// E.g.: 200
Status int
// Header is the header map.
//
// By setting the Trailer header to the names of the trailers which will
// come later. In this case, those names of the header map are treated
// as if they were trailers.
//
// See RFC 7231, section 7.
//
// The `Header` is basically the same for both HTTP/1.x and HTTP/2. The
// only difference is that HTTP/2 requires header names to be lowercase
// (for aesthetic reasons, this framework decided to follow this rule
// implicitly, so please use the header name in the HTTP/1.x way). See
// RFC 7540, section 8.1.2.
//
// E.g.: {"Foo": ["bar"]}
Header http.Header
// Body is the message body. It can be used to write a streaming
// response.
Body io.Writer
// ContentLength records the length of the `Body`. The value -1
// indicates that the length is unknown (it will continue to increase
// as the data written to the `Body` increases). Values >= 0 indicate
// that the given number of bytes has been written to the `Body`.
ContentLength int64
// Written indicates whether at least one byte has been written to the
// client, or the underlying connection has been hijacked.
Written bool
// Minified indicates whether the `Body` has been minified.
Minified bool
// Gzipped indicates whether the `Body` has been gzipped.
Gzipped bool
req *Request
hrw http.ResponseWriter
servingContent bool
serveContentError error
reverseProxying bool
deferredFuncs []func()
}
// HTTPResponseWriter returns the underlying `http.ResponseWriter` of the r.
//
// ATTENTION: You should never call this method unless you know what you are
// doing. And, be sure to call the `SetHTTPResponseWriter` of the r when you
// have modified it.
func (r *Response) HTTPResponseWriter() http.ResponseWriter {
return r.hrw
}
// SetHTTPResponseWriter sets the hrw to the underlying `http.ResponseWriter` of
// the r.
//
// ATTENTION: You should never call this method unless you know what you are
// doing.
func (r *Response) SetHTTPResponseWriter(hrw http.ResponseWriter) {
r.Header = hrw.Header()
r.Body = hrw
r.hrw = hrw
}
// SetCookie sets the c to the `Header` of the r. Invalid cookies will be
// silently dropped.
func (r *Response) SetCookie(c *http.Cookie) {
if v := c.String(); v != "" {
r.Header.Add("Set-Cookie", v)
}
}
// Write writes the content to the client.
//
// The main benefit of the `Write` over the `io.Copy` with the `Body` of the r
// is that it handles range requests properly, sets the Content-Type response
// header, and handles the If-Match, If-Unmodified-Since, If-None-Match,
// If-Modified-Since and If-Range request headers.
func (r *Response) Write(content io.ReadSeeker) error {
if content == nil { // No content, no benefit
if !r.Written {
r.hrw.WriteHeader(r.Status)
}
return nil
}
if r.Written {
if r.req.Method != http.MethodHead {
io.Copy(r.hrw, content)
}
return nil
}
if r.Header.Get("Content-Type") == "" {
b := r.Air.contentTypeSnifferBufferPool.Get().([]byte)
defer r.Air.contentTypeSnifferBufferPool.Put(b)
n, err := io.ReadFull(content, b)
if err != nil &&
!errors.Is(err, io.EOF) &&
!errors.Is(err, io.ErrUnexpectedEOF) {
return err
}
if _, err := content.Seek(0, io.SeekStart); err != nil {
return err
}
r.Header.Set("Content-Type", mimesniffer.Sniff(b[:n]))
}
if !r.Minified && r.Air.MinifierEnabled {
mt, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
if stringSliceContains(r.Air.MinifierMIMETypes, mt, true) {
b, err := ioutil.ReadAll(content)
if err != nil {
return err
}
if b, err = r.Air.minifier.minify(mt, b); err != nil {
return err
}
content = bytes.NewReader(b)
r.Minified = true
defer func() {
if !r.Written {
r.Minified = false
}
}()
}
}
if r.Status < http.StatusBadRequest {
lm := time.Time{}
if lmh := r.Header.Get("Last-Modified"); lmh != "" {
lm, _ = http.ParseTime(lmh)
}
r.servingContent = true
r.serveContentError = nil
http.ServeContent(r.hrw, r.req.HTTPRequest(), "", lm, content)
r.servingContent = false
return r.serveContentError
}
if r.Header.Get("Content-Encoding") == "" {
cl, err := content.Seek(0, io.SeekEnd)
if err != nil {
return err
}
if _, err := content.Seek(0, io.SeekStart); err != nil {
return err
}
r.Header.Set("Content-Length", strconv.FormatInt(cl, 10))
}
r.Header.Del("ETag")
r.Header.Del("Last-Modified")
if r.req.Method == http.MethodHead {
r.hrw.WriteHeader(r.Status)
} else {
io.Copy(r.hrw, content)
}
return nil
}
// WriteString writes the s as a "text/plain" content to the client.
func (r *Response) WriteString(s string) error {
r.Header.Set("Content-Type", "text/plain; charset=utf-8")
return r.Write(strings.NewReader(s))
}
// WriteJSON writes an "application/json" content encoded from the v to the
// client.
func (r *Response) WriteJSON(v interface{}) error {
var (
b []byte
err error
)
if r.Air.DebugMode {
b, err = json.MarshalIndent(v, "", "\t")
} else {
b, err = json.Marshal(v)
}
if err != nil {
return err
}
r.Header.Set("Content-Type", "application/json; charset=utf-8")
return r.Write(bytes.NewReader(b))
}
// WriteXML writes an "application/xml" content encoded from the v to the
// client.
func (r *Response) WriteXML(v interface{}) error {
var (
b []byte
err error
)
if r.Air.DebugMode {
b, err = xml.MarshalIndent(v, "", "\t")
} else {
b, err = xml.Marshal(v)
}
if err != nil {
return err
}
r.Header.Set("Content-Type", "application/xml; charset=utf-8")
return r.Write(strings.NewReader(xml.Header + string(b)))
}
// WriteProtobuf writes an "application/protobuf" content encoded from the v to
// the client.
func (r *Response) WriteProtobuf(v interface{}) error {
b, err := proto.Marshal(v.(proto.Message))
if err != nil {
return err
}
r.Header.Set("Content-Type", "application/protobuf")
return r.Write(bytes.NewReader(b))
}
// WriteMsgpack writes an "application/msgpack" content encoded from the v to
// the client.
func (r *Response) WriteMsgpack(v interface{}) error {
b, err := msgpack.Marshal(v)
if err != nil {
return err
}
r.Header.Set("Content-Type", "application/msgpack")
return r.Write(bytes.NewReader(b))
}
// WriteTOML writes an "application/toml" content encoded from the v to the
// client.
func (r *Response) WriteTOML(v interface{}) error {
buf := bytes.Buffer{}
if err := toml.NewEncoder(&buf).Encode(v); err != nil {
return err
}
r.Header.Set("Content-Type", "application/toml; charset=utf-8")
return r.Write(bytes.NewReader(buf.Bytes()))
}
// WriteYAML writes an "application/yaml" content encoded from the v to the
// client.
func (r *Response) WriteYAML(v interface{}) error {
buf := bytes.Buffer{}
if err := yaml.NewEncoder(&buf).Encode(v); err != nil {
return err
}
r.Header.Set("Content-Type", "application/yaml; charset=utf-8")
return r.Write(bytes.NewReader(buf.Bytes()))
}
// WriteHTML writes the h as a "text/html" content to the client.
func (r *Response) WriteHTML(h string) error {
if r.Air.AutoPushEnabled && r.req.HTTPRequest().ProtoMajor == 2 {
tree, err := html.Parse(strings.NewReader(h))
if err != nil {
return err
}
var f func(*html.Node)
f = func(n *html.Node) {
if n.Type == html.ElementNode {
avoid, target := false, ""
switch strings.ToLower(n.Data) {
case "link":
relChecked := false
LinkLoop:
for _, a := range n.Attr {
switch strings.ToLower(a.Key) {
case "rel":
switch strings.ToLower(
a.Val,
) {
case "preload", "icon":
avoid = true
break LinkLoop
}
relChecked = true
case "href":
target = a.Val
if relChecked {
break LinkLoop
}
}
}
case "img", "script":
ImgScriptLoop:
for _, a := range n.Attr {
switch strings.ToLower(a.Key) {
case "src":
target = a.Val
break ImgScriptLoop
}
}
}
if !avoid && path.IsAbs(target) {
r.Push(target, nil)
}
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
f(c)
}
}
f(tree)
}
r.Header.Set("Content-Type", "text/html; charset=utf-8")
return r.Write(strings.NewReader(h))
}
// Render renders one or more HTML templates with the m and writes the results
// as a "text/html" content to the client. The results rendered by the former
// can be inherited by accessing the `m["InheritedHTML"]`.
func (r *Response) Render(m map[string]interface{}, templates ...string) error {
buf := bytes.Buffer{}
for _, t := range templates {
if buf.Len() > 0 {
if m == nil {
m = make(map[string]interface{}, 1)
}
m["InheritedHTML"] = template.HTML(buf.String())
}
buf.Reset()
err := r.Air.renderer.render(&buf, t, m, r.req.LocalizedString)
if err != nil {
return err
}
}
return r.WriteHTML(buf.String())
}
// WriteFile writes a file content targeted by the filename to the client.
func (r *Response) WriteFile(filename string) error {
filename, err := filepath.Abs(filename)
if err != nil {
return err
} else if fi, err := os.Stat(filename); err != nil {
return err
} else if fi.IsDir() {
p := r.req.RawPath()
if !strings.HasSuffix(p, "/") {
p = fmt.Sprint(path.Base(p), "/")
if q := r.req.RawQuery(); q != "" {
p = fmt.Sprint(p, "?", q)
}
r.Status = http.StatusMovedPermanently
return r.Redirect(p)
}
filename = fmt.Sprint(filename, "index.html")
}
var (
c io.ReadSeeker
ct string
et []byte
mt time.Time
)
if r.Air.CofferEnabled {
if a, err := r.Air.coffer.asset(filename); err != nil {
return err
} else if a != nil {
r.Minified = a.minified
var ac []byte
if r.Air.GzipEnabled && a.gzippedDigest != nil &&
httpguts.HeaderValuesContainsToken(
r.req.Header["Accept-Encoding"],
"gzip",
) {
if ac = a.content(true); ac != nil {
r.Gzipped = true
}
} else {
ac = a.content(false)
}
if ac != nil {
c = bytes.NewReader(ac)
ct = a.mimeType
et = a.digest
mt = a.modTime
}
}
}
if c == nil {
f, err := os.Open(filename)
if err != nil {
return err
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return err
}
c = f
mt = fi.ModTime()
}
if r.Header.Get("Content-Type") == "" {
if ct == "" {
ct = mime.TypeByExtension(filepath.Ext(filename))
}
r.Header.Set("Content-Type", ct)
}
if r.Header.Get("ETag") == "" {
if et == nil {
h := xxhash.New()
if _, err := io.Copy(h, c); err != nil {
return err
}
if _, err := c.Seek(0, io.SeekStart); err != nil {
return err
}
et = h.Sum(nil)
}
r.Header.Set("ETag", fmt.Sprintf(
"%q",
base64.StdEncoding.EncodeToString(et),
))
}
if r.Header.Get("Last-Modified") == "" {
r.Header.Set("Last-Modified", mt.UTC().Format(http.TimeFormat))
}
return r.Write(c)
}
// Redirect writes the url as a redirection to the client. Note that the
// `Status` of the r will be the `http.StatusFound` if it is not a redirection
// status.
func (r *Response) Redirect(url string) error {
if r.Written {
return errors.New("air: request has been written")
}
if r.Status < http.StatusMultipleChoices ||
r.Status >= http.StatusBadRequest {
r.Status = http.StatusFound
}
http.Redirect(r.hrw, r.req.HTTPRequest(), url, r.Status)
return nil
}
// WebSocket switches the connection of the r to the WebSocket protocol. See RFC
// 6455.
func (r *Response) WebSocket() (*WebSocket, error) {
if r.Written {
return nil, errors.New("air: request has been written")
}
wsu := &websocket.Upgrader{
HandshakeTimeout: r.Air.WebSocketHandshakeTimeout,
Error: func(
_ http.ResponseWriter,
_ *http.Request,
status int,
_ error,
) {
r.Status = status
},
CheckOrigin: func(*http.Request) bool {
return true
},
}
if len(r.Air.WebSocketSubprotocols) > 0 {
wsu.Subprotocols = r.Air.WebSocketSubprotocols
}
r.Status = http.StatusSwitchingProtocols
conn, err := wsu.Upgrade(r.hrw, r.req.HTTPRequest(), r.Header)
if err != nil {
return nil, err
}
ws := &WebSocket{
conn: conn,
}
conn.SetCloseHandler(func(status int, reason string) error {
ws.Closed = true
if ws.ConnectionCloseHandler != nil {
return ws.ConnectionCloseHandler(status, reason)
}
conn.WriteControl(
websocket.CloseMessage,
websocket.FormatCloseMessage(status, ""),
time.Now().Add(time.Second),
)
return nil
})
conn.SetPingHandler(func(appData string) error {
if ws.PingHandler != nil {
return ws.PingHandler(appData)
}
err := conn.WriteControl(
websocket.PongMessage,
[]byte(appData),
time.Now().Add(time.Second),
)
if errors.Is(err, websocket.ErrCloseSent) {
return nil
}
var ne net.Error
if errors.As(err, &ne) && ne.Temporary() {
return nil
}
return err
})
conn.SetPongHandler(func(appData string) error {
if ws.PongHandler != nil {
return ws.PongHandler(appData)
}
return nil
})
return ws, nil
}
// Push initiates an HTTP/2 server push. This constructs a synthetic request
// using the target and pos, serializes that request into a "PUSH_PROMISE"
// frame, then dispatches that request using the server's request handler. If
// pos is nil, default options are used.
//
// The target must either be an absolute path (like "/path") or an absolute URL
// that contains a valid authority and the same scheme as the parent request. If
// the target is a path, it will inherit the scheme and authority of the parent
// request.
//
// It returns `http.ErrNotSupported` if the client has disabled push or if push
// is not supported on the underlying connection.
func (r *Response) Push(target string, pos *http.PushOptions) error {
p, ok := r.hrw.(http.Pusher)
if !ok {
return http.ErrNotSupported
}
return p.Push(target, pos)
}
// ProxyPass passes the request to the target and writes the response from the
// target to the client by using the reverse proxy technique. If the rpm is
// non-nil, then it will be used to modify the request to the target and
// response from the target.
//
// The target must be based on the HTTP protocol (such as HTTP(S), WebSocket and
// gRPC). So, the scheme of the target must be "http", "https", "ws", "wss",
// "grpc" or "grpcs".
func (r *Response) ProxyPass(target string, rpm *ReverseProxyModifier) error {
if r.Written {
return errors.New("air: request has been written")
}
if rpm == nil {
rpm = &ReverseProxyModifier{}
}
targetMethod := r.req.Method
if mrm := rpm.ModifyRequestMethod; mrm != nil {
m, err := mrm(targetMethod)
if err != nil {
return err
}
targetMethod = m
}
targetURL, err := url.Parse(target)
if err != nil {
return err
}
targetURL.Scheme = strings.ToLower(targetURL.Scheme)
switch targetURL.Scheme {
case "http", "https", "ws", "wss", "grpc", "grpcs":
default:
return fmt.Errorf(
"air: unsupported reverse proxy scheme: %s",
targetURL.Scheme,
)
}
targetURL.Host = strings.ToLower(targetURL.Host)
reqPath := r.req.Path
if mrp := rpm.ModifyRequestPath; mrp != nil {
p, err := mrp(reqPath)
if err != nil {
return err
}
reqPath = p
}
if reqPath == "" {
reqPath = "/"
}
reqURL, err := url.ParseRequestURI(reqPath)
if err != nil {
return err
}
targetURL.Path = path.Join(targetURL.Path, reqURL.Path)
targetURL.RawPath = path.Join(targetURL.RawPath, reqURL.RawPath)
if targetURL.RawQuery == "" || reqURL.RawQuery == "" {
targetURL.RawQuery = fmt.Sprint(
targetURL.RawQuery,
reqURL.RawQuery,
)
} else {
targetURL.RawQuery = fmt.Sprint(
targetURL.RawQuery,
"&",
reqURL.RawQuery,
)
}
targetHeader := r.req.Header.Clone()
if mrh := rpm.ModifyRequestHeader; mrh != nil {
h, err := mrh(targetHeader)
if err != nil {
return err
}
targetHeader = h
}
if _, ok := targetHeader["User-Agent"]; !ok {
// Explicitly disable the User-Agent header so it's not set to
// default value.
targetHeader.Set("User-Agent", "")
}
targetBody := r.req.Body
if mrb := rpm.ModifyRequestBody; mrb != nil {
b, err := mrb(targetBody)
if err != nil {
return err
}
targetBody = b
}
var reverseProxyError error
rp := &httputil.ReverseProxy{
Director: func(req *http.Request) {
req.Method = targetMethod
req.URL = targetURL
req.Header = targetHeader
req.Body = targetBody
// TODO: Remove the following line when the
// "net/http/httputil" of the minimum supported Go
// version of Air has fixed this bug.
req.Host = ""
},
FlushInterval: 100 * time.Millisecond,
Transport: r.Air.reverseProxyTransport,
ErrorLog: r.Air.ErrorLogger,
BufferPool: r.Air.reverseProxyBufferPool,
ModifyResponse: func(res *http.Response) error {
if mrs := rpm.ModifyResponseStatus; mrs != nil {
s, err := mrs(res.StatusCode)
if err != nil {
return err
}
res.StatusCode = s
}
if mrh := rpm.ModifyResponseHeader; mrh != nil {
h, err := mrh(res.Header)
if err != nil {
return err
}
res.Header = h
}
if mrb := rpm.ModifyResponseBody; mrb != nil {
b, err := mrb(res.Body)
if err != nil {
return err
}
res.Body = b
}
r.Gzipped = httpguts.HeaderValuesContainsToken(
res.Header["Content-Encoding"],
"gzip",
)
return nil
},
ErrorHandler: func(
_ http.ResponseWriter,
_ *http.Request,
err error,
) {
if r.Status < http.StatusBadRequest {
r.Status = http.StatusBadGateway
}
reverseProxyError = err
},
}
switch targetURL.Scheme {
case "grpc", "grpcs":
rp.FlushInterval /= 100 // For gRPC streaming
}
defer func() {
r := recover()
if r == nil || r == http.ErrAbortHandler {
return
}
panic(r)
}()
switch targetURL.Scheme {
case "ws", "wss":
r.Status = http.StatusSwitchingProtocols
}
r.reverseProxying = true
rp.ServeHTTP(r.hrw, r.req.HTTPRequest())
r.reverseProxying = false
return reverseProxyError
}
// Defer pushes the f onto the stack of functions that will be called after
// responding. Nil functions will be silently dropped.
func (r *Response) Defer(f func()) {
if f != nil {
r.deferredFuncs = append(r.deferredFuncs, f)
}
}
// ReverseProxyModifier is used by the `Response.ProxyPass` to modify the
// request to the target and response from the traget.
//
// Note that any field in the `ReverseProxyModifier` can be nil, which means
// there is no need to modify that value.
type ReverseProxyModifier struct {
// ModifyRequestMethod modifies the method of the request to the target.
ModifyRequestMethod func(method string) (string, error)
// ModifyRequestPath modifies the path of the request to the target.
//
// Note that the path contains the query part (anyway, the HTTP/2
// specification says so). Therefore, the returned path must also be in
// this format.
ModifyRequestPath func(path string) (string, error)
// ModifyRequestHeader modifies the header of the request to the target.
ModifyRequestHeader func(header http.Header) (http.Header, error)
// ModifyRequestBody modifies the body of the request from the target.
//
// It is the caller's responsibility to close the returned
// `io.ReadCloser`, which means that the `Response.ProxyPass` will be
/// responsible for closing it.
ModifyRequestBody func(body io.ReadCloser) (io.ReadCloser, error)
// ModifyResponseStatus modifies the status of the response from the
// target.
ModifyResponseStatus func(status int) (int, error)
// ModifyResponseHeader modifies the header of the response from the
// target.
ModifyResponseHeader func(header http.Header) (http.Header, error)
// ModifyResponseBody modifies the body of the response from the target.
//
// It is the caller's responsibility to close the returned
// `io.ReadCloser`, which means that the `Response.ProxyPass` will be
/// responsible for closing it.
ModifyResponseBody func(body io.ReadCloser) (io.ReadCloser, error)
}
// responseWriter is used to tie the `Response` and `http.ResponseWriter`
// together.
type responseWriter struct {
sync.Mutex
r *Response
rw http.ResponseWriter
w *countWriter
gw *gzip.Writer
gwn int
b64wc io.WriteCloser
}
// Header implements the `http.ResponseWriter`.
func (rw *responseWriter) Header() http.Header {
return rw.rw.Header()
}
// WriteHeader implements the `http.ResponseWriter`.
func (rw *responseWriter) WriteHeader(status int) {
rw.Lock()
defer rw.Unlock()
if rw.r.Written {
return
}
if rw.r.servingContent {
if status == http.StatusOK {
status = rw.r.Status
} else if status >= http.StatusBadRequest {
rw.r.Status = status
rw.r.Header.Del("Content-Type")
rw.r.Header.Del("X-Content-Type-Options")
return
}
}
rw.w = &countWriter{
w: rw.rw,
c: &rw.r.ContentLength,
}
rw.handleGzip()
rw.handleReverseProxy()
rw.rw.WriteHeader(status)
rw.r.Status = status
rw.r.ContentLength = 0
rw.r.Written = true
}
// Write implements the `http.ResponseWriter`.
func (rw *responseWriter) Write(b []byte) (int, error) {
if !rw.r.Written {
rw.WriteHeader(rw.r.Status)
}
rw.Lock()
defer rw.Unlock()
if rw.r.servingContent && rw.r.Status >= http.StatusBadRequest {
rw.r.serveContentError = errors.New(string(b))
return 0, nil
}
w := io.Writer(rw.w)
if rw.b64wc != nil {
w = rw.b64wc
} else if rw.gw != nil {
w = rw.gw
}
n, err := w.Write(b)
if n > 0 && w == rw.gw && rw.r.Air.GzipFlushThreshold > 0 {
rw.gwn += n
if rw.gwn >= rw.r.Air.GzipFlushThreshold {
rw.gwn = 0
rw.gw.Flush()
}
}
return n, err
}
// Flush implements the `http.Flusher`.
func (rw *responseWriter) Flush() {
if rw.b64wc != nil {
rw.b64wc.Close()
w := io.Writer(rw.w)
if rw.gw != nil {
w = rw.gw
}
rw.b64wc = base64.NewEncoder(base64.StdEncoding, w)
}
if rw.gw != nil {
rw.gw.Flush()
}
rw.rw.(http.Flusher).Flush()
}
// handleGzip handles the gzip feature for the rw.
func (rw *responseWriter) handleGzip() {
if !rw.r.Air.GzipEnabled {
return
}
if !rw.r.Gzipped {
if cl, _ := strconv.ParseInt(
rw.r.Header.Get("Content-Length"),
10,
64,
); cl < rw.r.Air.GzipMinContentLength {
return
}
if mt, _, _ := mime.ParseMediaType(
rw.r.Header.Get("Content-Type"),
); !stringSliceContains(rw.r.Air.GzipMIMETypes, mt, true) {
return
}
if httpguts.HeaderValuesContainsToken(
rw.r.req.Header["Accept-Encoding"],
"gzip",
) {
rw.gw, _ = rw.r.Air.gzipWriterPool.Get().(*gzip.Writer)
if rw.gw == nil {
return
}
rw.gw.Reset(rw.w)
rw.r.Defer(func() {
if rw.r.ContentLength == 0 {
rw.gw.Reset(ioutil.Discard)
}
rw.gw.Close()
rw.r.Air.gzipWriterPool.Put(rw.gw)
rw.gw = nil
})
rw.r.Gzipped = true
}
}
if rw.r.Gzipped {
if !httpguts.HeaderValuesContainsToken(
rw.r.Header["Content-Encoding"],
"gzip",
) {
rw.r.Header.Add("Content-Encoding", "gzip")
}
rw.r.Header.Del("Content-Length")
// See RFC 2732, section 2.3.3.
if et := rw.r.Header.Get("ETag"); et != "" {
et = strings.TrimSuffix(et, `"`)
et = fmt.Sprint(et, `-gzip"`)
rw.r.Header.Set("ETag", et)
}
}
if !httpguts.HeaderValuesContainsToken(
rw.r.Header["Vary"],
"Accept-Encoding",
) {
rw.r.Header.Add("Vary", "Accept-Encoding")
}
}
// handleReverseProxy handles the reverse proxy feature for the rw.
func (rw *responseWriter) handleReverseProxy() {
if !rw.r.reverseProxying {
return
}
reqct := rw.r.req.Header.Get("Content-Type")
if !strings.HasPrefix(reqct, "application/grpc-web") {
return
}
reqmt := "application/grpc-web-text"
if strings.HasSuffix(reqct, reqmt) {
w := io.Writer(rw.w)
if rw.gw != nil {
w = rw.gw
}
rw.b64wc = base64.NewEncoder(base64.StdEncoding, w)
} else {
reqmt = "application/grpc-web"
}
rw.r.Header.Set("Content-Type", strings.Replace(
rw.r.Header.Get("Content-Type"),
"application/grpc",
reqmt,
1,
))
tns := strings.Split(rw.r.Header.Get("Trailer"), ", ")
rw.r.Header.Del("Trailer")
hns := make([]string, 0, len(rw.r.Header))
for n := range rw.r.Header {
hns = append(hns, n)
}
rw.r.Header.Set(
"Access-Control-Expose-Headers",
strings.Join(hns, ", "),
)
rw.r.Defer(func() {
ts := make(http.Header, len(tns))
for _, tn := range tns {
ltn := strings.ToLower(tn)
ts[ltn] = append(ts[ltn], rw.r.Header[tn]...)
rw.r.Header.Del(tn)
}
for n, vs := range rw.r.Header {
if !strings.HasPrefix(n, http.TrailerPrefix) {
continue
}
ltn := strings.ToLower(n[len(http.TrailerPrefix):])
ts[ltn] = append(ts[ltn], vs...)
rw.r.Header.Del(n)
}
tb := bytes.Buffer{}
ts.Write(&tb)
th := []byte{1 << 7, 0, 0, 0, 0}
binary.BigEndian.PutUint32(th[1:5], uint32(tb.Len()))
rw.Write(th)
rw.Write(tb.Bytes())
rw.Flush()
})
}
// responseHijacker is used to tie the `Response` and `http.Hijacker` together.
type responseHijacker struct {
r *Response
h http.Hijacker
}
// Hijack implements the `http.Hijacker`.
func (rh *responseHijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) {
c, rw, err := rh.h.Hijack()
if err == nil {
rh.r.Written = true
}
return c, rw, err
}
// countWriter is used to count the number of bytes written to the underlying
// `io.Writer`.
type countWriter struct {
w io.Writer
c *int64
}
// Write implements the `io.Writer`.
func (cw *countWriter) Write(b []byte) (int, error) {
n, err := cw.w.Write(b)
*cw.c += int64(n)
return n, err
}
// reverseProxyTransport is a transport with the reverse proxy support.
type reverseProxyTransport struct {
hTransport *http.Transport
h2Transport *http2.Transport
h2cTransport *http2.Transport
}
// newReverseProxyTransport returns a new instance of the
// `reverseProxyTransport`.
func newReverseProxyTransport() *reverseProxyTransport {
return &reverseProxyTransport{
hTransport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
TLSClientConfig: &tls.Config{},
DisableCompression: true,
MaxIdleConnsPerHost: 200,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
},
h2Transport: &http2.Transport{
TLSClientConfig: &tls.Config{},
DisableCompression: true,
},
h2cTransport: &http2.Transport{
DialTLS: func(
network string,
address string,
_ *tls.Config,
) (net.Conn, error) {
return net.Dial(network, address)
},
DisableCompression: true,
AllowHTTP: true,
},
}
}
// RoundTrip implements the `http.RoundTripper`.
func (rpt *reverseProxyTransport) RoundTrip(
req *http.Request,
) (*http.Response, error) {
var (
transport http.RoundTripper
handleGRPCWeb bool
)
switch req.URL.Scheme {
case "ws":
req.URL.Scheme = "http"
transport = rpt.hTransport
case "wss":
req.URL.Scheme = "https"
transport = rpt.hTransport
case "grpc":
req.URL.Scheme = "http"
transport = rpt.h2cTransport
handleGRPCWeb = true
case "grpcs":
req.URL.Scheme = "https"
transport = rpt.h2Transport
handleGRPCWeb = true
default:
transport = rpt.hTransport
}
if handleGRPCWeb {
ct := req.Header.Get("Content-Type")
if strings.HasPrefix(ct, "application/grpc-web") {
mt := "application/grpc-web-text"
if strings.HasSuffix(ct, mt) {
req.Body = (&struct {
io.Reader
io.Closer
}{
base64.NewDecoder(
base64.StdEncoding,
req.Body,
),
req.Body,
})
} else {
mt = "application/grpc-web"
}
req.Header.Set(
"Content-Type",
strings.Replace(ct, mt, "application/grpc", 1),
)
}
}
return transport.RoundTrip(req)
}
// reverseProxyBufferPool is a buffer pool for the reverse proxy.
type reverseProxyBufferPool struct {
pool *sync.Pool
}
// newReverseProxyBufferPool returns a new instance of the
// `reverseProxyBufferPool`.
func newReverseProxyBufferPool() *reverseProxyBufferPool {
return &reverseProxyBufferPool{
pool: &sync.Pool{
New: func() interface{} {
return make([]byte, 32<<20)
},
},
}
}
// Get implements the `httputil.BufferPool`.
func (rpbp *reverseProxyBufferPool) Get() []byte {
return rpbp.pool.Get().([]byte)
}
// Put implements the `httputil.BufferPool`.
func (rpbp *reverseProxyBufferPool) Put(bytes []byte) {
rpbp.pool.Put(bytes)
}
|
package controller
import (
"bookkeeping/config"
"bookkeeping/logic"
"net/http"
"github.com/gin-gonic/gin"
)
func Auth(c *gin.Context) {
authCookie, err := c.Cookie(config.AuthCookieName)
if err != nil {
c.JSON(http.StatusUnauthorized, nil)
c.Abort()
return
}
mc, err := logic.ParseToken(authCookie)
if err != nil {
c.JSON(http.StatusUnauthorized, nil)
c.Abort()
return
}
ok := logic.HasPersonByID(mc.Username)
if ok == false {
c.JSON(http.StatusUnauthorized, nil)
c.Abort()
return
}
c.Set(config.AuthMidUserNameKey, mc.Username)
c.Next()
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package wilco
import (
"context"
"encoding/json"
"chromiumos/tast/local/bundles/cros/wilco/wilcoextension"
"chromiumos/tast/local/policyutil/fixtures"
"chromiumos/tast/local/wilco"
"chromiumos/tast/testing"
dtcpb "chromiumos/wilco_dtc"
)
func init() {
testing.AddTest(&testing.Test{
Func: APISendMessageToUIEnrolled,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Test sending a message from the Wilco DTC VM to the Chromium extension",
Contacts: []string{
"chromeos-oem-services@google.com", // Use team email for tickets.
"bkersting@google.com",
"lamzin@google.com",
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"vm_host", "wilco", "chrome"},
Fixture: "wilcoDTCAllowed",
})
}
// APISendMessageToUIEnrolled tests Wilco DTC SendMessageToUi gRPC API.
func APISendMessageToUIEnrolled(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(*fixtures.FixtData).Chrome()
wConn, err := wilcoextension.NewConnectionToWilcoExtension(ctx, cr)
if err != nil {
s.Fatal("Failed to create connection to extension: ", err)
}
defer wConn.CloseTarget(ctx)
defer wConn.Close()
if err := wConn.CreatePort(ctx); err != nil {
s.Fatal("Failed to create port to built-in application: ", err)
}
if err := wConn.StartListener(ctx); err != nil {
s.Fatal("Failed to start listener: ", err)
}
type testMsg struct {
Test int
}
uiResponse := testMsg{
Test: 8,
}
if err := wConn.AddReply(ctx, &uiResponse); err != nil {
s.Fatal("Failed to set reply: ", err)
}
vmRequest := testMsg{
Test: 5,
}
marshaled, err := json.Marshal(vmRequest)
if err != nil {
s.Fatal("Failed to marshal message: ", err)
}
s.Log("Sending message to extension")
request := dtcpb.SendMessageToUiRequest{
JsonMessage: string(marshaled),
}
response := dtcpb.SendMessageToUiResponse{}
if err := wilco.DPSLSendMessage(ctx, "SendMessageToUi", &request, &response); err != nil {
s.Error("Failed to send message to UI: ", err)
}
var vmResponse testMsg
if err := json.Unmarshal([]byte(response.ResponseJsonMessage), &vmResponse); err != nil {
s.Logf("Response JSON message: %q", response.ResponseJsonMessage)
s.Fatal("Failed to unmarshal message: ", err)
}
if uiResponse != vmResponse {
s.Errorf("Unexpected reply received = got %v, want %v", vmResponse, uiResponse)
}
s.Log("Waiting for message")
var uiRequest testMsg
if err := wConn.WaitForMessage(ctx, &uiRequest); err != nil {
s.Fatal("Failed to wait for a message received by extension: ", err)
}
if vmRequest != uiRequest {
s.Errorf("Unexpected request received = got %v, want %v", uiRequest, vmRequest)
}
}
|
package atgo
import (
"context"
)
type (
Bank interface {
// Collect money into your payment wallet.
BankCheckout(ctx context.Context, p *BankCheckoutPayload) (res *BankCheckoutResponse, err error)
// Validate a bank checkout charge request
BankCheckoutValidate(ctx context.Context, p *BankCheckoutValidatePayload) (res *BankCheckoutValidateResponse, err error)
// Initiate a bank transfer request.
BankTransfer(ctx context.Context, p *BankTransferPayload) (res *BankTransferResponse, err error)
}
)
// BankCheckoutPayload is the payload type of the africastalking service Bank
// Checkout method.
type BankCheckoutPayload struct {
// Africa’s Talking application username.
Username string `form:"username" json:"username" xml:"username"`
// Africa’s Talking Payment Product to initiate this transaction.
ProductName string `form:"productName" json:"productName" xml:"productName"`
BankAccount BankAccount `form:"bankAccount" json:"bankAccount" xml:"bankAccount"`
// 3-digit ISO format currency code.
CurrencyCode string `form:"currencyCode" json:"currencyCode" xml:"currencyCode"`
// Amount client is expected to confirm.
Amount float64 `form:"amount" json:"amount" xml:"amount"`
// Short description of the transaction displayed on the clients statement.
Narration string `form:"narration" json:"narration" xml:"narration"`
// A map of any metadata that you would like us to associate with the request.
Metadata map[string]string `form:"metadata,omitempty" json:"metadata,omitempty" xml:"metadata,omitempty"`
}
// BankCheckoutResponse is the result type of the africastalking service Bank
// Checkout method.
type BankCheckoutResponse struct {
// This corresponds to the status of the request.
Status string `form:"status,omitempty" json:"status,omitempty" xml:"status,omitempty"`
// A detailed description of the request status.
Description string `form:"description,omitempty" json:"description,omitempty" xml:"description,omitempty"`
// Unique ID that our API generates for successful requests.
TransactionID string `form:"transactionId,omitempty" json:"transactionId,omitempty" xml:"transactionId,omitempty"`
}
// BankCheckoutValidatePayload is the payload type of the africastalking
// service BankCheckoutValidate method.
type BankCheckoutValidatePayload struct {
// Africa’s Talking application Username.
Username string `form:"username" json:"username" xml:"username"`
// The ID of the transaction to be validated.
TransactionID string `form:"transactionId" json:"transactionId" xml:"transactionId"`
// One Time Password bank sent to the client.
Otp string `form:"otp" json:"otp" xml:"otp"`
}
// BankCheckoutValidateResponse is the result type of the africastalking
// service BankCheckoutValidate method.
type BankCheckoutValidateResponse struct {
// The final status of this request.
Status string `form:"status,omitempty" json:"status,omitempty" xml:"status,omitempty"`
// A detailed description of the request status.
Description string `form:"description,omitempty" json:"description,omitempty" xml:"description,omitempty"`
}
// BankTransferPayload is the payload type of the africastalking service
// BankTransfer method.
type BankTransferPayload struct {
// Africa’s Talking application username.
Username string `form:"username" json:"username" xml:"username"`
// Africa’s Talking Payment product to initiate this transaction.
ProductName string `form:"productName" json:"productName" xml:"productName"`
// Transfer Recipients
Recipients []TransferRecipients `form:"recipients" json:"recipients" xml:"recipients"`
}
// BankTransferResponse is the result type of the africastalking service
// BankTransfer method.
type BankTransferResponse struct {
// Transfer Entries
Entries []TransferEntries `form:"entries,omitempty" json:"entries,omitempty" xml:"entries,omitempty"`
// Error message if the ENTIRE request was rejected by the API.
ErrorMessage string `form:"errorMessage,omitempty" json:"errorMessage,omitempty" xml:"errorMessage,omitempty"`
}
type BankAccount struct {
// Bank account name.
AccountName string `form:"accountName" json:"accountName" xml:"accountName"`
// Bank account number.
AccountNumber string `form:"accountNumber" json:"accountNumber" xml:"accountNumber"`
// 6-Digit Integer code for the bank that we allocate.
BankCode int `form:"bankCode" json:"bankCode" xml:"bankCode"`
// Date of birth of the account owner.
DateOfBirth string `form:"dateOfBirth" json:"dateOfBirth" xml:"dateOfBirth"`
}
// A list of Recipient elements each corresponding to a bank transfer
// transaction request.
type TransferRecipients struct {
// Details of a bank account to receive the bank transfer payment.
BankAccount string `form:"bankAccount" json:"bankAccount" xml:"bankAccount"`
// Bank account name.
AccountName string `form:"accountName,omitempty" json:"accountName,omitempty" xml:"accountName,omitempty"`
// Bank account number.
AccountNumber string `form:"accountNumber" json:"accountNumber" xml:"accountNumber"`
// 6-Digit Integer code for the bank that we allocate.
BankCode string `form:"bankCode" json:"bankCode" xml:"bankCode"`
// Date of birth of the account owner.
DateOfBirth string `form:"dateOfBirth,omitempty" json:"dateOfBirth,omitempty" xml:"dateOfBirth,omitempty"`
// 3-digit ISO format currency code
CurrencyCode string `form:"currencyCode" json:"currencyCode" xml:"currencyCode"`
// Amount client is expected to receive.
Amount string `form:"amount" json:"amount" xml:"amount"`
// Short description of the transaction displayed on the clients statement.
Narration string `form:"narration,omitempty" json:"narration,omitempty" xml:"narration,omitempty"`
// A map of any metadata associated with the request.
Metadata map[string]string `form:"metadata,omitempty" json:"metadata,omitempty" xml:"metadata,omitempty"`
}
// A list of bank transfer entries.
type TransferEntries struct {
// The account number of the bank transfer recipient.
AccountNumber string `form:"accountNumber,omitempty" json:"accountNumber,omitempty" xml:"accountNumber,omitempty"`
// The transaction has been accepted and queued for processing by the payment
// provider.
Status string `form:"status,omitempty" json:"status,omitempty" xml:"status,omitempty"`
// A unique ID that our API generates for successful requests.
TransactionID string `form:"transactionId,omitempty" json:"transactionId,omitempty" xml:"transactionId,omitempty"`
// Transaction fee charged by Africa’s Talking for this transaction.
TransactionFee string `form:"transactionFee,omitempty" json:"transactionFee,omitempty" xml:"transactionFee,omitempty"`
// A more descriptive error message for the status of this transaction.
ErrorMessage string `form:"errorMessage,omitempty" json:"errorMessage,omitempty" xml:"errorMessage,omitempty"`
}
|
package infrastructure
import (
"context"
"fmt"
apihttp "github.com/denis-sukhoverkhov/calendar/internal/infrastructure/api/http"
"github.com/denis-sukhoverkhov/calendar/internal/interfaces"
"github.com/denis-sukhoverkhov/calendar/internal/interfaces/repositories"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
"go.uber.org/zap"
"net/http"
"os"
"os/signal"
"syscall"
"time"
)
type AppServer struct {
l *zap.Logger
repo *repositories.RepositoryInteractor
*http.Server
}
func NewServer(config Configuration) (*AppServer, error) {
logger := NewLogger(config.Logs.Level, config.Logs.PathToLogFile)
listenAddr := fmt.Sprintf("%s:%d", config.HttpListen.Ip, config.HttpListen.Port)
// FIXME: эта строка нужна?
errorLog, _ := zap.NewStdLogAt(logger, zap.ErrorLevel)
pgPool := NewPgPool(config, logger)
repos := interfaces.InitRepositories(pgPool)
srv := http.Server{
Addr: listenAddr,
Handler: NewHttpApi(repos, logger),
ErrorLog: errorLog,
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 15 * time.Second,
}
return &AppServer{logger, repos, &srv}, nil
}
func (s *AppServer) Start() {
s.l.Info("Starting server")
defer s.l.Sync()
go func() {
if err := s.ListenAndServe(); err != nil && err != http.ErrServerClosed {
s.l.Fatal("Could not listen on", zap.String("addr", s.Addr), zap.Error(err))
}
}()
s.l.Info("Server is ready to handle requests", zap.String("addr", s.Addr))
s.gracefulShutdown()
}
func (s *AppServer) gracefulShutdown() {
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
sig := <-quit
s.l.Info("Server is shutting down", zap.String("reason", sig.String()))
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
s.SetKeepAlivesEnabled(false)
if err := s.Shutdown(ctx); err != nil {
s.l.Fatal("Could not gracefully shutdown the server", zap.Error(err))
}
s.l.Info("Server stopped")
}
func NewHttpApi(repos *repositories.RepositoryInteractor, logger *zap.Logger) *chi.Mux {
r := chi.NewRouter()
r.Use(middleware.RequestID)
r.Use(zapLogger(logger))
r.Use(middleware.Recoverer)
r.Get("/", apihttp.Main)
r.Get("/hello", apihttp.Hello)
r.Get("/user/{userId:[0-9]+}", apihttp.GetUserHandler(repos))
r.Get("/user", apihttp.GetUsersHandler(repos))
r.Post("/user", apihttp.PostUserHandler(repos))
r.Delete("/user/{userId:[0-9]+}", apihttp.DeleteUserHandler(repos))
r.Get("/event/{eventId:[0-9]+}", apihttp.GetVentHandler(repos))
r.Get("/event", apihttp.GetEventsHandler(repos))
r.Post("/event", apihttp.PostEventHandler(repos))
r.Delete("/event/{eventId:[0-9]+}", apihttp.DeleteEventHandler(repos))
r.Get("/events_for_day", apihttp.GetEventsForDayHandler(repos))
logRoutes(r, logger)
return r
}
|
package main
import (
"fmt"
)
type gridgame struct{ grid grid }
func (game gridgame) nextTile(t tile, x, y int64) tile {
nextTile := t
eachBit(func(b bit) {
neighbors := game.grid.countLivingNeighbors(b, x, y)
alive := t.bitAlive(b)
if alive && (neighbors < 2 || neighbors > 3) || !alive && neighbors == 3 {
nextTile ^= tile(b)
}
})
return nextTile
}
func (game *gridgame) next() {
nextGrid := grid{}
for y, row := range game.grid {
nextRow := gridrow{}
for x, tile := range row {
nextRow[x] = game.nextTile(tile, x, y)
}
nextGrid[y] = nextRow
}
game.grid = nextGrid
}
func (game gridgame) start() {
loop(func() {
clear()
fmt.Println("Life.")
fmt.Println(game.grid)
game.next()
}, 15)
}
|
package handlers
import (
"net/http"
"github.com/kiali/kiali/log"
"github.com/kiali/kiali/prometheus"
)
// aladdin
// InfraDashboard is the API handler to fetch Istio dashboard, related to a single service
func InfraDashboard(w http.ResponseWriter, r *http.Request) {
// prometheus에 client 등록
prom, err := defaultPromClientSupplier()
if err != nil {
log.Error(err)
RespondWithError(w, http.StatusServiceUnavailable, "Prometheus client error: "+err.Error())
}
// params를 types.go 파일의 InfraOptionMetricsQuery형태로 초기화
params := prometheus.InfraOptionMetricsQuery{}
// 실제 query의 param 정보를 파싱해서 params에 넣음
error := extractInfraMetricsQueryParams(r, ¶ms)
if error != nil {
RespondWithError(w, http.StatusBadRequest, err.Error())
return
}
// params의 정보를 가지고 프로메테우스에 보낼 query를 만듬
metrics := prom.GetInfraMetrics(¶ms)
RespondWithJSON(w, http.StatusOK, metrics)
}
|
package handlers
import (
"html/template"
"net/http"
)
func ViewHandle(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
t, _ := template.ParseFiles("./views/error.gohtml")
myvar := map[string]interface{}{"MyVar": "404 Page was not found"}
t.Execute(w, myvar)
} else {
t, _ := template.ParseFiles("./views/index.gohtml")
myvar := map[string]interface{}{"MyVar": "Hello World"}
t.Execute(w, myvar)
}
}
func DashboardView(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/dashboard/" {
t, _ := template.ParseFiles("./views/error.gohtml")
myvar := map[string]interface{}{"MyVar": "404 Page was not found"}
t.Execute(w, myvar)
} else {
t, _ := template.ParseFiles("./views/helth/Doctor/dashboard.gohtml")
myvar := map[string]interface{}{"MyVar": "Hello World"}
t.Execute(w, myvar)
}
}
func UserView(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/user/" {
t, _ := template.ParseFiles("./views/error.gohtml")
myvar := map[string]interface{}{"MyVar": "404 Page was not found"}
t.Execute(w, myvar)
} else {
t, _ := template.ParseFiles("./views/helth/Doctor/user.gohtml")
myvar := map[string]interface{}{"MyVar": "Hello World"}
t.Execute(w, myvar)
}
}
func PatDetails(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/PatDetails/" {
t, _ := template.ParseFiles("./views/error.gohtml")
myvar := map[string]interface{}{"MyVar": "404 Page was not found"}
t.Execute(w, myvar)
} else {
t, _ := template.ParseFiles("./views/helth/Doctor/patient.gohtml")
myvar := map[string]interface{}{"MyVar": "Hello World"}
t.Execute(w, myvar)
}
}
/*func AddHis(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/AddHis/" {
t, _ := template.ParseFiles("./views/error.html")
myvar := map[string]interface{}{"MyVar": "404 Page was not found"}
t.Execute(w, myvar)
} else {
t, _ := template.ParseFiles("./views/helth/Doctor/table.html")
myvar := map[string]interface{}{"MyVar": "Hello World"}
t.Execute(w, myvar)
}
}
func ShowHis(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/ShowHis/" {
t, _ := template.ParseFiles("./views/error.html")
myvar := map[string]interface{}{"MyVar": "404 Page was not found"}
t.Execute(w, myvar)
} else {
t, _ := template.ParseFiles("./views/helth/Doctor/history.html")
myvar := map[string]interface{}{"MyVar": "Hello World"}
t.Execute(w, myvar)
}
}
*/
|
package diagnostics
import "net/http"
func setEncodedHeader(req *http.Request) {
if req.Method == "GET" {
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
}
return
}
|
package odoo
import (
"fmt"
)
// UtmMedium represents utm.medium model.
type UtmMedium struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
Active *Bool `xmlrpc:"active,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
Name *String `xmlrpc:"name,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// UtmMediums represents array of utm.medium model.
type UtmMediums []UtmMedium
// UtmMediumModel is the odoo model name.
const UtmMediumModel = "utm.medium"
// Many2One convert UtmMedium to *Many2One.
func (um *UtmMedium) Many2One() *Many2One {
return NewMany2One(um.Id.Get(), "")
}
// CreateUtmMedium creates a new utm.medium model and returns its id.
func (c *Client) CreateUtmMedium(um *UtmMedium) (int64, error) {
ids, err := c.CreateUtmMediums([]*UtmMedium{um})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateUtmMedium creates a new utm.medium model and returns its id.
func (c *Client) CreateUtmMediums(ums []*UtmMedium) ([]int64, error) {
var vv []interface{}
for _, v := range ums {
vv = append(vv, v)
}
return c.Create(UtmMediumModel, vv)
}
// UpdateUtmMedium updates an existing utm.medium record.
func (c *Client) UpdateUtmMedium(um *UtmMedium) error {
return c.UpdateUtmMediums([]int64{um.Id.Get()}, um)
}
// UpdateUtmMediums updates existing utm.medium records.
// All records (represented by ids) will be updated by um values.
func (c *Client) UpdateUtmMediums(ids []int64, um *UtmMedium) error {
return c.Update(UtmMediumModel, ids, um)
}
// DeleteUtmMedium deletes an existing utm.medium record.
func (c *Client) DeleteUtmMedium(id int64) error {
return c.DeleteUtmMediums([]int64{id})
}
// DeleteUtmMediums deletes existing utm.medium records.
func (c *Client) DeleteUtmMediums(ids []int64) error {
return c.Delete(UtmMediumModel, ids)
}
// GetUtmMedium gets utm.medium existing record.
func (c *Client) GetUtmMedium(id int64) (*UtmMedium, error) {
ums, err := c.GetUtmMediums([]int64{id})
if err != nil {
return nil, err
}
if ums != nil && len(*ums) > 0 {
return &((*ums)[0]), nil
}
return nil, fmt.Errorf("id %v of utm.medium not found", id)
}
// GetUtmMediums gets utm.medium existing records.
func (c *Client) GetUtmMediums(ids []int64) (*UtmMediums, error) {
ums := &UtmMediums{}
if err := c.Read(UtmMediumModel, ids, nil, ums); err != nil {
return nil, err
}
return ums, nil
}
// FindUtmMedium finds utm.medium record by querying it with criteria.
func (c *Client) FindUtmMedium(criteria *Criteria) (*UtmMedium, error) {
ums := &UtmMediums{}
if err := c.SearchRead(UtmMediumModel, criteria, NewOptions().Limit(1), ums); err != nil {
return nil, err
}
if ums != nil && len(*ums) > 0 {
return &((*ums)[0]), nil
}
return nil, fmt.Errorf("utm.medium was not found with criteria %v", criteria)
}
// FindUtmMediums finds utm.medium records by querying it
// and filtering it with criteria and options.
func (c *Client) FindUtmMediums(criteria *Criteria, options *Options) (*UtmMediums, error) {
ums := &UtmMediums{}
if err := c.SearchRead(UtmMediumModel, criteria, options, ums); err != nil {
return nil, err
}
return ums, nil
}
// FindUtmMediumIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindUtmMediumIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(UtmMediumModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindUtmMediumId finds record id by querying it with criteria.
func (c *Client) FindUtmMediumId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(UtmMediumModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("utm.medium was not found with criteria %v and options %v", criteria, options)
}
|
package service
import (
"context"
"errors"
"jxc/models"
"go.mongodb.org/mongo-driver/bson"
)
// 用户验证器
type UserRules struct {
UserName string `form:"username" json:"username" binding:"required,min=5,max=20"`
Password string `form:"password" json:"password" binding:"required,min=8,max=20"`
Phone string `form:"phone" json:"phone" binding:"required,min=6,max=20"`
}
// 用户登录
func Login(comId, userName, password string) (models.User, error) {
User := models.User{}
collection := models.Client.Collection("users")
err := collection.FindOne(context.TODO(), bson.D{{"com_id", comId}, {"username", userName}}).Decode(&User)
if err != nil {
// 无此用户
return models.User{}, errors.New("无此用户")
}
if User.Password != password {
// 密码错误
return models.User{}, errors.New("密码错误")
}
return User, nil
}
// 创建一条登录日志
func CreateLoginLog(user_id, ip, msg string) {
loginLog := models.LoginLogData{
UserId: user_id,
Ip: ip,
Message: msg,
}
_, err := models.Collection.InsertOne(context.TODO(), loginLog)
if err != nil {
// 添加日志失败
//checkErr(err)
}
}
// 查找用户
func FindUser(user_id []int64, com_id int64) (map[int64]models.User, error) {
var user models.User
users := make(map[int64]models.User) // map[user_id]user
filter := bson.M{}
filter["user_id"] = bson.M{"$in": user_id}
filter["com_id"] = com_id
collection := models.Client.Collection("users")
cur, err := collection.Find(context.TODO(), filter)
if err != nil {
return nil, err
}
for cur.Next(context.TODO()) {
err = cur.Decode(&user)
if err != nil {
return nil, err
}
users[user.UserID] = user
}
return users, nil
}
// 查找一条用户信息
func FindOneUser(userId int64, comId int64) (*models.User, error) {
var user models.User
filter := bson.M{}
filter["user_id"] = userId
filter["com_id"] = comId
collection := models.Client.Collection("users")
err := collection.FindOne(context.TODO(), filter).Decode(&user)
if err != nil {
return nil, err
}
// 获取这个用户的所有权限路由节点id,在根据节点id获取所有路由
filter = bson.M{}
auth_note := models.AuthNote{}
urls:= []string{"/api/v1/customer_settlement/getcustomer",
"/api/v1/customer_settlement/getsettlement",
"/api/v1/customer_settlement/create",
"/api/v1/customer_settlement/detail",
"/api/v1/customer_settlement/confirm",
"/api/v1/supplier_settlement/list",
"/api/v1/supplier_settlement/getsupplier",
"/api/v1/supplier_settlement/getsettlement",
"/api/v1/supplier_settlement/create",
"/api/v1/supplier_settlement/detail",
"/api/v1/supplier_settlement/confirm",
"/upload_images"}
filter["auth_id"] = bson.M{"$in": user.Authority}
cur, err := models.Client.Collection("auth_note").Find(context.TODO(), filter)
if err != nil {
// 没有找到对应的数据,返回空
return &user,nil
}
//defaultUrl := []string{"/api/v1/units"}
for cur.Next(context.TODO()) {
err = cur.Decode(&auth_note)
if err != nil {
continue
}
for _, val := range auth_note.Urls {
urls = append(urls, val)
}
}
user.Urls = urls
return &user, nil
}
// 更新用户信息
func UpdateUser(user models.User, user_id string) error {
_, err := models.Collection.UpdateOne(context.TODO(), bson.M{"user_id": user_id}, bson.M{"$set": bson.M{"password": user.Password}})
if err != nil {
return errors.New("修改密码失败")
}
return nil
}
// 添加用户
func AddUser(user models.User) (string, error) {
// 指定数据库 invoicing ,数据集 users
collection := models.Client.Collection("users")
_, err := collection.InsertOne(context.TODO(), user)
if err != nil {
return "", err
}
return "", errors.New("")
}
|
package main
import "fmt"
type configModel struct {
mongoUri string
mongoDb string
tokenSecret string
tokenExp string
serveUri string
}
var config = configModel{
mongoUri: fmt.Sprintf("mongodb://%v:27017/eks", "localhost"), // mongodb://mongodb:27017/eks
mongoDb: "eks", // DB name
tokenSecret: "secret", // Secret to use in Tokens
tokenExp: "1h", // Expiration of Token
serveUri: ":8080", // Serve
}
|
package main
// Leetcode 5404. (easy)
func buildArray(target []int, n int) []string {
strs := []string{"Push", "Pop"}
res := []string{}
idx := 0
for i := 1; i <= n; i++ {
if i == target[idx] {
res = append(res, strs[0])
idx++
if idx == len(target) {
break
}
} else {
res = append(res, strs...)
}
}
return res
}
|
// Package main (02_access_token_auth) demonstrates how to make authenticated
// requests to Mondo. To follow this example:
//
// 1. Log into https://developers.getmondo.co.uk/api/playground;
// 2. Copy the "Access token" shown on;
// 3. Run the this program with:
// MONDO_ACCESS_TOKEN=<paste> go run main.go
//
// (Note that the environment variables a program runs with can be read, so be
// wary of the security implications of this approach.)
package main
import (
"github.com/icio/mondo"
"github.com/icio/mondo/mondodomain"
"github.com/icio/mondo/mondohttp"
"log"
"net/http"
"os"
)
func main() {
client := &mondo.Client{
// The mondohttp package assumes the Production API host and uses Go's
// default User-Agent, but we can override these before each request is
// made when using mondo.HTTPClient.
HTTPClient: &mondo.HTTPClient{
Client: http.DefaultClient,
Host: "api.getmondo.co.uk",
UserAgent: "example/0.1 (+https://github.com/icio/mondo)",
},
// When we give mondo.Client a mondo.auth, any Authorization headers
// present in requests are updated before the request is made.
Auth: mondo.NewAccessTokenAuth(os.Getenv("MONDO_ACCESS_TOKEN")),
}
// mondo.Client is going to override the Authorization header, so we just
// provide an empty string for the access token when creating the request.
accounts := new(mondodomain.AccountCollection)
err := client.DoInto(mondohttp.NewAccountsRequest(""), accounts)
if err != nil {
log.Fatal(err)
}
log.Printf("Accounts: %#v\n", accounts)
}
|
package http
import (
"fmt"
"net/http"
"github.com/gorilla/mux"
)
type userHandler struct {
}
func NewUserHandler(r *mux.Router) {
handler := &userHandler{}
v1 := r.PathPrefix("/v1").Subrouter()
v1.HandleFunc("/test", handler.Test).Methods(http.MethodGet)
}
func (h *userHandler) Test(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(fmt.Sprintf("success %s", r.URL)))
}
|
package lib_gc_cache_source
import (
"errors"
"sync"
LOG "github.com/theskyinflames/go-misc/com.theskyinflames.go.misc/lib_gc_log"
)
var mutex *sync.Mutex = &sync.Mutex{}
var dmutex *sync.Mutex = &sync.Mutex{}
func init() {
ICacheMap = &MapCache{make(map[string][]byte)}
// Register the cache source
CacheSourceContainer.AddICacheSoure("MAP", ICacheMap)
LOG.Info.Println("lib_cache_source (map adapter) package initialized. ")
}
var ICacheMap ICacheSource
type MapCache struct {
cache map[string][]byte
}
func (mp *MapCache) InitSource() error { return nil }
func (mp *MapCache) CloseSource() error { return nil }
func (mp *MapCache) AddToHash(hash *string, key, value *[]byte) error {
return errors.New("method not implemented !!!")
}
func (mp *MapCache) GetKeysFromHash(hash *string) (*[][]byte, error) {
return nil, errors.New("method not implemented !!!")
}
func (mp *MapCache) RemoveKeyFromHash(hash *string, key *[]byte) error {
return errors.New("method not implemented !!!")
}
func (mp *MapCache) RemoveHash(hash *string) error {
return errors.New("method not implemented !!!")
}
func (mp *MapCache) AddData(key, value *[]byte) error {
mutex.Lock()
defer mutex.Unlock()
mp.cache[string(*key)] = *value
return nil
}
func (mp *MapCache) AddMData(data *[][]byte) error {
return errors.New("method not implemented !!!")
}
func (mp *MapCache) GetData(key *[]byte) (*[]byte, error) {
if data, ok := mp.cache[string(*key)]; ok {
return &data, nil
} else {
return nil, errors.New("The key" + string(*key) + " does not exists !!!")
}
}
func (mp *MapCache) MGetData(keys *[][]byte) (*[][]byte, error) {
return nil, errors.New("method not implemented !!!")
}
func (mp *MapCache) DeleteData(key *[]byte) (bool, error) {
dmutex.Lock()
defer dmutex.Unlock()
skey := string(*key)
if _, ok := mp.cache[skey]; ok {
delete(mp.cache, skey)
return true, nil
} else {
return false, nil
}
}
|
package routers
import (
"beego.demo/controllers"
"github.com/astaxie/beego"
)
func init() {
beego.Router("/", &controllers.MainController{})
v1ns := beego.NewNamespace("/v1",
beego.NSRouter("/login", &controllers.AdminController{}, "*:Login"),
beego.NSRouter("/home", &controllers.HomeController{}, "*:Index"),
)
beego.AddNamespace(v1ns)
}
|
package main
import "fmt"
func main() {
for x := 0; x < 1000; x++ {
fmt.Println("Hello World")
}
}
|
package host
import (
"crypto/sha1"
"encoding/json"
"fmt"
"github.com/infraboard/mcube/types/ftime"
)
const (
ProvateIDC Vendor = iota
Tencent
Aliyun
HuaWei
)
//用int做枚举
type Vendor int
func NewDefaultHost() *Host {
return &Host{
&Base{},
&Resource{},
&Describe{},
}
}
type Host struct {
*Base
*Resource
*Describe
}
//put也是常见的更新方式
func (h *Host) Put(req *UpdateHostData) {
//指针类型,直接将值赋值给他去实现替换
h.Resource = req.Resource
h.Describe = req.Describe
h.UpdateAt = ftime.Now().Timestamp() // time, 13 时间戳
h.GenHash()
}
func (h *Host) Patch(req *UpdateHostData) error {
err := ObjectPatch(h.Resource, req.Resource)
if err != nil {
return err
}
err = ObjectPatch(h.Describe, req.Describe)
if err != nil {
return err
}
h.UpdateAt = ftime.Now().Timestamp()
h.GenHash()
return nil
}
// patch JSON {a: 1, b: 2}, {b:20} ===> {a:1, b:20}
func ObjectPatch(old, new interface{}) error {
// {b: 20}
newByte, err := json.Marshal(new)
if err != nil {
return err
}
// {a:1, b:2}
// {a:1, b: 20}
return json.Unmarshal(newByte, old)
}
func (h *Host) GenHash() error{
hash :=sha1.New()
b,err :=json.Marshal(h.Resource)
if err !=nil{
return err
}
hash.Write(b)
h.ResourceHash=fmt.Sprintf("%x",hash.Sum(nil))
b,err =json.Marshal(h.Describe)
if err !=nil{
return err
}
hash.Reset()
hash.Write(b)
h.DescribeHash =fmt.Sprintf("%x",hash.Sum(nil))
return nil
}
type Base struct {
Id string `json:"id"` //全局唯一Id
SyncAt int64 `json:"sync_at"` //同步时间
Vendor Vendor `json:"vendor"` //厂商
Region string `json:"region"` //地域
Zone string `json:"zone"` //区域
CreateAt int64 `json:"create_at"` //创建时间
InstanceId string `json:"instance_id"` //实例ID
ResourceHash string `json:"resource_id"` //基础数据Hash
DescribeHash string `json:"describe_hash"` //描述数据Hash
}
type Resource struct {
ExpireAt int64 `json:"expire_at"` //过期时间
Category string `json:"category"` //种类
Type string `json:"type"` //规格
Name string `json:"name"` //名称
Description string `json:"description"` //描述
Status string `json:"status"` //服务商中的状态
Tags map[string]string `json:"tags"` //标签
UpdateAt int64 `json:"update_at"` //更新时间
SyncAccount string `json:"sync_account"` //同步账号
PublicIP string `json:"public_ip"` //公网IP
PrivateIP string `json:"private_ip"` //私网IP
PayType string `json:"pay_type"` //实例付费方式
}
type Describe struct {
ResourceId string `json:"resource_id"` //关联Resource
CPU int `json:"cpu"` //核数
Memory int `json:"memory"` //内存
GPUAmount int `json:"gpu_amount"` //GPU数量
GPUSpec string `json:"gpu_spec"` //GPU类型
OSType string `json:"os_type"` //操作系统类型
OSName string `json:"os_name"` //操作系统名称
SerialNumber string `json:"serial_number"` //序列号
ImageID string `json:"image_id"` //镜像ID
InternetMaxBandwidthOut int `json:"internet_max_bandwidth_out"` //公网出宽带
InternetMaxBandwidthIn int `json:"internet_max_bandwidth_in"` //公网入宽带
KeyPairName string `json:"key_pair_name"` //秘钥对名称
SecurityGroups string `json:"security_groups"` //安全组 采用
}
//构造HostSet函数
func NewHostSet()*HostSet{
return &HostSet{
Items: []*Host{},
}
}
type HostSet struct {
Items []*Host `json:"items"`
Total int `json:"total"`
}
func (s *HostSet) Add(item *Host) {
s.Items = append(s.Items, item)
}
|
package pkgset
import (
"sync"
"golang.org/x/tools/go/packages"
)
var stdpkgs Set
var stdonce sync.Once
// LoadStd preloads the std package list.
func LoadStd() {
stdonce.Do(func() {
standard, err := packages.Load(&packages.Config{
Mode: packages.NeedName | packages.NeedFiles | packages.NeedImports | packages.NeedModule,
Tests: true,
}, "std")
if err != nil {
panic(err)
}
stdpkgs = New(standard...)
})
}
// IsStd returns whether *packages.Package is a std package
func IsStd(p *packages.Package) bool {
LoadStd()
return IsStdName(p.ID)
}
// IsStdName returns whether id corresponds to a standard package
func IsStdName(id string) bool {
LoadStd()
_, ok := stdpkgs[id]
return ok
}
// Std returns the standard package set
func Std() Set {
LoadStd()
return stdpkgs.Clone()
}
|
package git
import (
"testing"
"github.com/stretchr/testify/assert"
"go.starlark.net/starlark"
"github.com/tilt-dev/tilt/internal/tiltfile/starkit"
)
func TestGitRepoPath(t *testing.T) {
f := NewFixture(t)
f.UseRealFS()
f.File("Tiltfile", `
print(local_git_repo('.').paths('.git/index'))
`)
f.File(".git/index", "HEAD")
_, err := f.ExecFile("Tiltfile")
assert.NoError(t, err)
assert.Contains(t, f.PrintOutput(), f.JoinPath(".git", "index"))
}
func TestGitRepoBadMethodCall(t *testing.T) {
f := NewFixture(t)
f.UseRealFS()
f.File("Tiltfile", `
local_git_repo('.').asdf()
`)
f.File(".git/index", "HEAD")
_, err := f.ExecFile("Tiltfile")
if assert.Error(t, err) {
msg := err.(*starlark.EvalError).Backtrace()
assert.Contains(t, msg, "Tiltfile:2:20: in <toplevel>")
assert.Contains(t, msg, "Error: git.Repo has no .asdf field or method")
}
}
func NewFixture(tb testing.TB) *starkit.Fixture {
return starkit.NewFixture(tb, NewPlugin())
}
|
package main
import "fmt"
func main() {
half := func(n int) (int, bool) {
return n /2, n%2 == 0
}
fmt.Println(half(5))
}
//package main
//
//import "fmt"
//
//func half(x int) (int, bool) {
//
// div := x / 2
//
// if x%2 == 0 {
// return div, true
// } else {
// return div, false
// }
//
//}
//
//func main() {
// fmt.Println(half(3))
//}
|
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package cache provides an LRU cache implementation to be used by the RLS LB
// policy to cache RLS response data.
package cache
import (
"container/list"
"sync"
"time"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/backoff"
)
var logger = grpclog.Component("rls")
// Key represents the cache key used to uniquely identify a cache entry.
type Key struct {
// Path is the full path of the incoming RPC request.
Path string
// KeyMap is a stringified version of the RLS request keys built using the
// RLS keyBuilder. Since map is not a Type which is comparable in Go, it
// cannot be part of the key for another map (the LRU cache is implemented
// using a native map type).
KeyMap string
}
// Entry wraps all the data to be stored in a cache entry.
type Entry struct {
// Mu synchronizes access to this particular cache entry. The LB policy
// will also hold another mutex to synchronize access to the cache as a
// whole. To avoid holding the top-level mutex for the whole duration for
// which one particular cache entry is acted upon, we use this entry mutex.
Mu sync.Mutex
// ExpiryTime is the absolute time at which the data cached as part of this
// entry stops being valid. When an RLS request succeeds, this is set to
// the current time plus the max_age field from the LB policy config. An
// entry with this field in the past is not used to process picks.
ExpiryTime time.Time
// BackoffExpiryTime is the absolute time at which an entry which has gone
// through backoff stops being valid. When an RLS request fails, this is
// set to the current time plus twice the backoff time. The cache expiry
// timer will only delete entries for which both ExpiryTime and
// BackoffExpiryTime are in the past.
BackoffExpiryTime time.Time
// StaleTime is the absolute time after which this entry will be
// proactively refreshed if we receive a request for it. When an RLS
// request succeeds, this is set to the current time plus the stale_age
// from the LB policy config.
StaleTime time.Time
// BackoffTime is the absolute time at which the backoff period for this
// entry ends. The backoff timer is setup with this value. No new RLS
// requests are sent out for this entry until the backoff period ends.
BackoffTime time.Time
// EarliestEvictTime is the absolute time before which this entry should
// not be evicted from the cache. This is set to a default value of 5
// seconds when the entry is created. This is required to make sure that a
// new entry added to the cache is not evicted before the RLS response
// arrives (usually when the cache is too small).
EarliestEvictTime time.Time
// CallStatus stores the RPC status of the previous RLS request for this
// entry. Picks for entries with a non-nil value for this field are failed
// with the error stored here.
CallStatus error
// Backoff contains all backoff related state. When an RLS request
// succeeds, backoff state is reset.
Backoff BackoffState
// HeaderData is received in an RLS response and is to be sent in the
// X-Google-RLS-Data header for matching RPCs.
HeaderData string
// ChildPicker is a very thin wrapper around the child policy wrapper.
// The type is declared as a Picker interface since the users of
// the cache only care about the picker provided by the child policy, and
// this makes it easy for testing.
ChildPicker balancer.Picker
// size stores the size of this cache entry. Uses only a subset of the
// fields. See `entrySize` for this is computed.
size int64
// key contains the cache key corresponding to this entry. This is required
// from methods like `removeElement` which only have a pointer to the
// list.Element which contains a reference to the cache.Entry. But these
// methods need the cache.Key to be able to remove the entry from the
// underlying map.
key Key
}
// BackoffState wraps all backoff related state associated with a cache entry.
type BackoffState struct {
// Retries keeps track of the number of RLS failures, to be able to
// determine the amount of time to backoff before the next attempt.
Retries int
// Backoff is an exponential backoff implementation which returns the
// amount of time to backoff, given the number of retries.
Backoff backoff.Strategy
// Timer fires when the backoff period ends and incoming requests after
// this will trigger a new RLS request.
Timer *time.Timer
// Callback provided by the LB policy to be notified when the backoff timer
// expires. This will trigger a new picker to be returned to the
// ClientConn, to force queued up RPCs to be retried.
Callback func()
}
// LRU is a cache with a least recently used eviction policy. It is not safe
// for concurrent access.
type LRU struct {
maxSize int64
usedSize int64
onEvicted func(Key, *Entry)
ll *list.List
cache map[Key]*list.Element
}
// NewLRU creates a cache.LRU with a size limit of maxSize and the provided
// eviction callback.
//
// Currently, only the cache.Key and the HeaderData field from cache.Entry
// count towards the size of the cache (other overhead per cache entry is not
// counted). The cache could temporarily exceed the configured maxSize because
// we want the entries to spend a configured minimum amount of time in the
// cache before they are LRU evicted (so that all the work performed in sending
// an RLS request and caching the response is not a total waste).
//
// The provided onEvited callback must not attempt to re-add the entry inline
// and the RLS LB policy does not have a need to do that.
//
// The cache package trusts the RLS policy (its only user) to supply a default
// minimum non-zero maxSize, in the event that the ServiceConfig does not
// provide a value for it.
func NewLRU(maxSize int64, onEvicted func(Key, *Entry)) *LRU {
return &LRU{
maxSize: maxSize,
onEvicted: onEvicted,
ll: list.New(),
cache: make(map[Key]*list.Element),
}
}
// Resize sets the size limit of the LRU to newMaxSize and removes older
// entries, if required, to comply with the new limit.
func (lru *LRU) Resize(newMaxSize int64) {
lru.maxSize = newMaxSize
lru.removeToFit(0)
}
// TODO(easwars): If required, make this function more sophisticated.
func entrySize(key Key, value *Entry) int64 {
return int64(len(key.Path) + len(key.KeyMap) + len(value.HeaderData))
}
// removeToFit removes older entries from the cache to make room for a new
// entry of size newSize.
func (lru *LRU) removeToFit(newSize int64) {
now := time.Now()
for lru.usedSize+newSize > lru.maxSize {
elem := lru.ll.Back()
if elem == nil {
// This is a corner case where the cache is empty, but the new entry
// to be added is bigger than maxSize.
logger.Info("rls: newly added cache entry exceeds cache maxSize")
return
}
entry := elem.Value.(*Entry)
if t := entry.EarliestEvictTime; !t.IsZero() && t.Before(now) {
// When the oldest entry is too new (it hasn't even spent a default
// minimum amount of time in the cache), we abort and allow the
// cache to grow bigger than the configured maxSize.
logger.Info("rls: LRU eviction finds oldest entry to be too new. Allowing cache to exceed maxSize momentarily")
return
}
lru.removeElement(elem)
}
}
// Add adds a new entry to the cache.
func (lru *LRU) Add(key Key, value *Entry) {
size := entrySize(key, value)
elem, ok := lru.cache[key]
if !ok {
lru.removeToFit(size)
lru.usedSize += size
value.size = size
value.key = key
elem := lru.ll.PushFront(value)
lru.cache[key] = elem
return
}
existing := elem.Value.(*Entry)
sizeDiff := size - existing.size
lru.removeToFit(sizeDiff)
value.size = size
elem.Value = value
lru.ll.MoveToFront(elem)
lru.usedSize += sizeDiff
}
// Remove removes a cache entry wth key key, if one exists.
func (lru *LRU) Remove(key Key) {
if elem, ok := lru.cache[key]; ok {
lru.removeElement(elem)
}
}
func (lru *LRU) removeElement(e *list.Element) {
entry := e.Value.(*Entry)
lru.ll.Remove(e)
delete(lru.cache, entry.key)
lru.usedSize -= entry.size
if lru.onEvicted != nil {
lru.onEvicted(entry.key, entry)
}
}
// Get returns a cache entry with key key.
func (lru *LRU) Get(key Key) *Entry {
elem, ok := lru.cache[key]
if !ok {
return nil
}
lru.ll.MoveToFront(elem)
return elem.Value.(*Entry)
}
|
package review
type CreateReviewInput struct {
UserID uint
MovieID string `json:"movie_id" binding:"required"`
Review string `json:"review" binding:"required"`
Rate string `json:"rate" binding:"required"`
}
type UpdateReviewInput struct {
MovieID string `json:"movie_id" binding:"required"`
Review string `json:"review" binding:"required"`
Rate string `json:"rate" binding:"required"`
}
type GetReviewUriInput struct {
ID int `uri:"id" binding:"required"`
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package brightness
import (
"context"
"fmt"
"strconv"
"strings"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
)
// Percent gets the current brightness of the system.
func Percent(ctx context.Context) (float64, error) {
out, err := testexec.CommandContext(ctx, "backlight_tool", "--get_brightness_percent").Output()
if err != nil {
return 0.0, errors.Wrap(err, "failed to execute brightness command")
}
sysBrightness, err := strconv.ParseFloat(strings.TrimSpace(string(out)), 64)
if err != nil {
return 0.0, errors.Wrap(err, "failed to parse string into float64")
}
return sysBrightness, nil
}
// SetPercent sets the brightness of the system.
func SetPercent(ctx context.Context, percent float64) error {
if err := testexec.CommandContext(ctx, "backlight_tool", fmt.Sprintf("--set_brightness_percent=%f", percent)).Run(); err != nil {
return errors.Wrapf(err, "failed to set %f%% brightness", percent)
}
return nil
}
|
package ymdRedisServer
import (
"testing"
"sort"
"github.com/orestonce/ymd/ymdAssert"
"github.com/orestonce/ymd/ymdRedis/ymdRedisProtocol"
)
func TestRedisCore_SAdd(t *testing.T) {
core := newDebugRedisCore()
add, errMsg := core.SAdd(`k`, `m1`, `m2`, `m3`)
ymdAssert.True(errMsg == `` && add == 3)
add, errMsg = core.SAdd(`k`, `m2`, `m3`, `m8`)
ymdAssert.True(errMsg == `` && add == 1)
}
func TestRedisCore_SCard(t *testing.T) {
core := newDebugRedisCore()
core.SAdd(`k`, `m1`, `m2`, `m3`)
size, errMsg := core.SCard(`k`)
ymdAssert.True(errMsg == `` && size == 3)
}
func TestRedisCore_SDiff(t *testing.T) {
core := newDebugRedisCore()
core.SAdd(`k1`, `a`, `b`, `c`, `d`)
core.SAdd(`k2`, `c`)
core.SAdd(`k3`, `a`, `c`, `e`)
reply, errMsg := core.SDiff(`k1`, `k2`, `k3`)
ymdAssert.True(errMsg == `` && len(reply.GetValueSlice()) == 2)
data := map[string]struct{}{
`b`: {}, `d`: {},
}
for _, one := range reply.GetValueSlice() {
_, ok := data[string(one.Value)]
ymdAssert.True(ok)
delete(data, string(one.Value))
}
ymdAssert.True(len(data) == 0)
}
func TestRedisCore_SDiff2(t *testing.T) {
core := newDebugRedisCore()
reply, errMsg := core.SDiff(`k1`, `k2`)
ymdAssert.True(errMsg == `` && len(reply.GetValueSlice()) == 0)
core.Set(`key`, `value`)
_, errMsg = core.SDiff(`key`, `k1`)
ymdAssert.True(errMsg == ymdRedisProtocol.EMInvalidType)
}
func TestRedisCore_SDiff3(t *testing.T) {
core := newDebugRedisCore()
core.SAdd(`k1`, `m1`, `m2`, `m3`)
core.SAdd(`k2`, `m1`, `m2`)
core.SAdd(`k3`, `m3`, `m4`)
reply, errMsg := core.SDiff(`k1`, `k4`, `k2`, `k3`)
ymdAssert.True(errMsg == `` && len(reply.GetValueSlice()) == 0)
reply, errMsg = core.SDiff(`k1`, `k2`, `k3`, `k2`)
ymdAssert.True(errMsg == `` && len(reply.GetValueSlice()) == 0)
}
func TestRedisCore_SInter(t *testing.T) {
core := newDebugRedisCore()
core.Set(`k0`, `v0`)
_, errMsg := core.SInter(`k0`, `k2`)
ymdAssert.True(errMsg == ymdRedisProtocol.EMInvalidType)
core.SAdd(`k1`, `a`, `b`, `c`, `d`)
core.SAdd(`k2`, `c`)
core.SAdd(`k3`, `a`, `c`, `e`)
core.SAdd(`k4`, `e`, `f`, `g`)
core.SAdd(`k5`, `e`, `g`, `z`)
reply, errMsg := core.SInter(`k1`, `k2`, `k3`)
ymdAssert.True(errMsg == `` && len(reply.GetValueSlice()) == 1)
ymdAssert.True(string(reply.GetValueSlice()[0].Value) == `c`)
reply, errMsg = core.SInter(`k1`, `k2`, `k3`, `k4`, `k5`, `k6`)
ymdAssert.True(errMsg == `` && len(reply.GetValueSlice()) == 0)
_, errMsg = core.SInter(`k1`, `k0`)
ymdAssert.True(errMsg == ymdRedisProtocol.EMInvalidType, errMsg)
reply, errMsg = core.SInter(`k1`, `k8`)
ymdAssert.True(errMsg == `` && len(reply.GetValueSlice()) == 0)
}
func TestRedisCore_SIsMember(t *testing.T) {
core := newDebugRedisCore()
core.SAdd(`k1`, `a`, `b`, `c`, `d`)
is, errMsg := core.SIsMember(`k1`, `a`)
ymdAssert.True(errMsg == `` && is == 1)
is, errMsg = core.SIsMember(`k1`, `e`)
ymdAssert.True(errMsg == `` && is == 0)
is, errMsg = core.SIsMember(`k0`, `e`)
ymdAssert.True(errMsg == `` && is == 0)
core.Set(`k2`, `v`)
is, errMsg = core.SIsMember(`k2`, `e`)
ymdAssert.True(errMsg == ymdRedisProtocol.EMInvalidType)
}
func TestRedisCore_SMembers(t *testing.T) {
core := newDebugRedisCore()
core.SAdd(`k1`, `a`, `b`, `c`, `d`)
reply, errMsg := core.SMembers(`k1`)
ymdAssert.True(errMsg == `` && len(reply.GetValueSlice()) == 4)
list := reply.ToStringList()
sort.Strings(list)
ymdAssert.True(list[0] == `a`)
ymdAssert.True(list[1] == `b`)
ymdAssert.True(list[2] == `c`)
ymdAssert.True(list[3] == `d`)
}
func TestRedisCore_SMove(t *testing.T) {
core := newDebugRedisCore()
core.SAdd(`k1`, `a`, `b`, `c`, `d`)
cnt, errMsg := core.SMove(`k1`, `k2`, `b`)
ymdAssert.True(errMsg == `` && cnt == 1)
reply, errMsg := core.SMembers(`k1`)
strList := reply.ToStringList()
sort.Strings(strList)
ymdAssert.True(errMsg == `` && len(strList) == 3)
ymdAssert.True(strList[0] == `a`)
ymdAssert.True(strList[1] == `c`)
ymdAssert.True(strList[2] == `d`)
cnt, errMsg = core.SMove(`k9`, `k8`, `f`)
ymdAssert.True(errMsg == `` && cnt == 0)
}
func TestRedisCore_SMove2(t *testing.T) {
core := newDebugRedisCore()
core.SAdd(`k1`, `a`, `b`, `c`)
core.SAdd(`k2`, `a`)
cnt, errMsg := core.SMove(`k1`, `k2`, `d`)
ymdAssert.True(errMsg == `` && cnt == 0)
core.Set(`k3`, `v3`)
cnt, errMsg = core.SMove(`k1`, `k3`, `a`)
ymdAssert.True(errMsg == ymdRedisProtocol.EMInvalidType && cnt == 0)
cnt, errMsg = core.SMove(`k2`, `k4`, `a`)
ymdAssert.True(errMsg == `` && cnt == 1)
_, ok := core.data[`k2`]
ymdAssert.True(!ok)
}
func TestRedisCore_SRem(t *testing.T) {
core := newDebugRedisCore()
core.SAdd(`k1`, `a`, `b`, `c`)
cnt, errMsg := core.SRem(`k1`, `b`, `c`, `d`)
ymdAssert.True(errMsg == `` && cnt == 2)
cnt, errMsg = core.SRem(`k1`, `a`, `e`)
ymdAssert.True(errMsg == `` && cnt == 1)
_, ok := core.data[`k1`]
ymdAssert.True(!ok)
core.Set(`k2`, `v`)
_, errMsg = core.SRem(`k2`, `v`)
ymdAssert.True(errMsg == ymdRedisProtocol.EMInvalidType)
}
func TestRedisCore_LSet(t *testing.T) {
core := newDebugRedisCore()
core.RPush(`list`, `a`, `b`, `c`)
errMsg := core.LSet(`list`, -2, `d`)
ymdAssert.True(errMsg == ``)
listEqual(core, `list`, `a`, `d`, `c`)
errMsg = core.LSet(`list2`, 1, `1`)
ymdAssert.True(errMsg == ymdRedisProtocol.EMNoKey)
core.Set(`key`, `value`)
errMsg = core.LSet(`key`, 1, `1`)
ymdAssert.True(errMsg == ymdRedisProtocol.EMInvalidType)
errMsg = core.LSet(`list`, 1000, `x`)
ymdAssert.True(errMsg == ymdRedisProtocol.EMIndex)
}
func TestRedisCore_SUnion(t *testing.T) {
core := newDebugRedisCore()
core.SAdd(`k1`, `a`, `b`, `c`, `d`)
core.SAdd(`k2`, `c`)
core.SAdd(`k3`, `a`, `c`, `e`)
reply, errMsg := core.SUnion(`k1`, `k2`, `k3`, `k4`)
ymdAssert.True(errMsg == ``)
strList := reply.ToStringList()
sort.Strings(strList)
ymdAssert.True(len(strList) == 5)
ymdAssert.True(strList[0] == `a`)
ymdAssert.True(strList[1] == `b`)
ymdAssert.True(strList[2] == `c`)
ymdAssert.True(strList[3] == `d`)
ymdAssert.True(strList[4] == `e`)
}
|
package metal
import (
"github.com/ionous/sashimi/util/ident"
)
type CallbackList struct {
callbacks []ident.Id
}
func (cl CallbackList) NumCallback() int {
return len(cl.callbacks)
}
func (cl CallbackList) CallbackNum(i int) ident.Id {
p := cl.callbacks[i]
return p // CallbackWrapper(p)
}
|
package strategy
import (
"github.com/joshprzybyszewski/cribbage/logic/pegging"
"github.com/joshprzybyszewski/cribbage/model"
)
func PegHighestCardNow(hand []model.Card, prevPegs []model.PeggedCard, curPeg int) (model.Card, bool) {
bestCard := model.Card{}
bestPoints := -1
cardsOverMax := 0
for _, c := range hand {
if curPeg+c.PegValue() > 31 {
cardsOverMax++
continue
}
p, err := pegging.PointsForCard(prevPegs, c)
if err != nil {
return model.Card{}, false
}
if p > bestPoints {
bestCard = c
bestPoints = p
}
}
if cardsOverMax == len(hand) {
return model.Card{}, true
}
return bestCard, false
}
|
package entity
import "sync"
type UserInfo struct {
Id uint64 `json:"id"`
Username string `json:"username"`
SayHello string `json:"sayHello"`
Password string `json:"password"`
CreatedAt string `json:"createdAt"`
UpdatedAt string `json:"updatedAt"`
}
type UserList struct {
Lock *sync.Mutex
IdMap map[uint64]*UserInfo
}
|
package repository
import (
"../entity"
)
type repo struct{}
//NewFirestoreRepository
func NewFirestoreRepository() PostRepository {
return &repo{}
}
func (*repo) Save(post *entity.Post) (*entity.Post, error) {
// implement save method for Firestore
return &entity.Post{}, nil
}
func (*repo) FindAll() ([]entity.Post, error) {
// implement get all method for Firestore
return []entity.Post{}, nil
}
|
/*
Copyright 2014 Huawei Technologies Co., Ltd. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integratetest
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"testing"
"github.com/satori/go.uuid"
"github.com/stretchr/testify/assert"
"github.com/containerops/dockyard/updateservice/client"
"github.com/containerops/dockyard/utils"
)
func getTestURL() string {
// Start a dockyard web server and set the enviornment like this:
// $ export US_TEST_SERVER=http://localhost:1234
server := os.Getenv("US_TEST_SERVER")
if server == "" {
return ""
}
//TODO: need to clean the repo in the server after finish the testing
namespace := "namespace-" + uuid.NewV4().String()
repository := "repository-" + uuid.NewV4().String()
return fmt.Sprintf("%s/%s/%s", server, namespace, repository)
}
// TestOper tests add/get/getmeta/getmetasign/list
func TestOper(t *testing.T) {
var appV1Repo client.UpdateClientAppV1Repo
validURL := getTestURL()
// Skip the test if the testing enviornment is not ready
if validURL == "" {
fmt.Printf("Skip the '%s' test since the testing enviornment is not ready.\n", "List")
return
}
testFiles := []string{"osA/archA/appA", "osB/archB/appB"}
f, _ := appV1Repo.New(validURL)
defer func() {
for _, tf := range testFiles {
err := f.Delete(tf)
assert.Nil(t, err, "Fail to delete file")
}
}()
// Init the data and also test the put function
_, path, _, _ := runtime.Caller(0)
for _, tf := range testFiles {
file := filepath.Join(filepath.Dir(path), "testdata", tf)
content, _ := ioutil.ReadFile(file)
err := f.Put(tf, content, utils.EncryptNone)
assert.Nil(t, err, "Fail to put file")
}
// Test list
l, err := f.List()
assert.Nil(t, err, "Fail to list")
assert.Equal(t, len(l), 2, "Fail to list or something wrong in put")
ok := (l[0] == testFiles[0] && l[1] == testFiles[1]) || (l[0] == testFiles[1] && l[1] == testFiles[0])
assert.Equal(t, true, ok, "Fail to list the correct data")
// Test get file
fileBytes, err := f.GetFile(testFiles[0])
assert.Nil(t, err, "Fail to get file")
expectedBytes, _ := ioutil.ReadFile(filepath.Join(filepath.Dir(path), "testdata", testFiles[0]))
assert.Equal(t, fileBytes, expectedBytes, "Fail to get the correct data")
// Test get meta
metaBytes, err := f.GetMeta()
assert.Nil(t, err, "Fail to get meta file")
// Test get metasign
signBytes, err := f.GetMetaSign()
assert.Nil(t, err, "Fail to get meta signature file")
// Test get public key
pubkeyBytes, err := f.GetPublicKey()
assert.Nil(t, err, "Fail to get public key file")
// VIP: Verify meta/sign with public to make real sure that everything works perfect
err = utils.SHA256Verify(pubkeyBytes, metaBytes, signBytes)
assert.Nil(t, err, "Fail to verify the meta data")
}
|
package main
import (
"fmt"
)
func main() {
salir := make(chan int)
c := gen(salir)
recibir(c, salir)
fmt.Println("A punto de finalizar.")
}
func recibir(c, c2 <-chan int) {
for {
select {
case message1 := <-c:
fmt.Println("received message1 ", message1)
case <-c2:
fmt.Println("received exit")
return
}
}
}
func gen(salir chan<- int) <-chan int {
c := make(chan int)
go func() {
for i := 0; i < 100; i++ {
c <- i
}
salir <- 1
close(c)
}()
return c
}
|
package health
import (
"net/url"
"github.com/cerana/cerana/acomm"
"github.com/cerana/cerana/pkg/errors"
"github.com/cerana/cerana/provider"
)
// Mock is a mock Health provider.
type Mock struct {
Data MockData
}
// MockData is mock data for the Mock provider.
type MockData struct {
Uptime bool
File bool
TCPResponse bool
HTTPStatus bool
}
// NewMock creates a new mock provider and initializes data.
func NewMock() *Mock {
return &Mock{
Data: MockData{
Uptime: true,
File: true,
TCPResponse: true,
HTTPStatus: true,
},
}
}
// RegisterTasks registers all of the Mock health task handlers with the server.
func (m *Mock) RegisterTasks(server *provider.Server) {
server.RegisterTask("health-uptime", m.Uptime)
server.RegisterTask("health-file", m.File)
server.RegisterTask("health-tcp-response", m.TCPResponse)
server.RegisterTask("health-http-status", m.HTTPStatus)
}
// Uptime is a mock uptime health check.
func (m *Mock) Uptime(req *acomm.Request) (interface{}, *url.URL, error) {
var err error
if !m.Data.Uptime {
err = errors.New("uptime less than expected")
}
return nil, nil, err
}
// File is a mock file health check.
func (m *Mock) File(req *acomm.Request) (interface{}, *url.URL, error) {
var err error
if !m.Data.File {
err = errors.New("file does not exist")
}
return nil, nil, err
}
// TCPResponse is a mock tcp response health check.
func (m *Mock) TCPResponse(req *acomm.Request) (interface{}, *url.URL, error) {
var err error
if !m.Data.TCPResponse {
err = errors.New("response did not match")
}
return nil, nil, err
}
// HTTPStatus is a mock http status health check.
func (m *Mock) HTTPStatus(req *acomm.Request) (interface{}, *url.URL, error) {
var err error
if !m.Data.HTTPStatus {
err = errors.New("unexpected response status code")
}
return nil, nil, err
}
|
package component
import (
"github.com/maxence-charriere/go-app/v7/pkg/app"
"github.com/pelly-ryu/minim/app/internal"
)
type NoteList struct {
app.Compo
opened bool
}
func NewNoteList() *NoteList {
return &NoteList{
opened: false,
}
}
func (l *NoteList) Render() app.UI {
if !l.opened {
return app.Aside().ID("note-list").Class("closed").
OnClick(func(ctx app.Context, e app.Event) {
l.Toggle()
})
}
ids, err := internal.StorageListNoteId()
if err != nil {
panic(err)
}
var noteListEl []app.UI
for _, id := range ids {
n, err := internal.StorageGetNote(id)
if err != nil {
panic(err)
}
short := n.Body
if len(n.Body) > 50 {
short = n.Body[:50]
}
el := app.Div().Class("note-list-item note-list-item-selected").Body(
app.H5().Class("note-list-name").Text("Tilo Mitra"),
app.H4().Class("note-list-subject").Text(n.Title),
app.P().Class("note-list-desc").Text(short),
)
noteListEl = append(noteListEl, el)
}
return app.Aside().ID("note-list").Class("pure-u-1").Body(
noteListEl...
)
}
func (l *NoteList) Toggle() {
l.opened = !l.opened
l.Update()
}
func (l *NoteList) Opened() bool {
return l.opened
}
|
package api
import (
"encoding/json"
"net/http"
"github.com/jacexh/golang-ddd-template/internal/application"
"github.com/jacexh/golang-ddd-template/internal/transport/dto"
)
func CreateUser(w http.ResponseWriter, r *http.Request) {
u := new(dto.User)
_ = json.NewDecoder(r.Body).Decode(u)
_ = application.User.CreateUser(r.Context(), u)
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
_ = json.NewEncoder(w).Encode(u)
}
|
package stapi
// RESTURL of Stapi.co
const RESTURL = "http://stapi.co/api/v1/rest"
// If the result is bigger than tolerance, return ErrorTooManyResults
const maxToleranceResult = 10
|
package v2
import (
"errors"
"fmt"
"log"
"net/http"
"net/url"
"github.com/google/uuid"
"github.com/labstack/echo/v4"
"github.com/traPtitech/trap-collection-server/pkg/types"
"github.com/traPtitech/trap-collection-server/src/domain"
"github.com/traPtitech/trap-collection-server/src/domain/values"
"github.com/traPtitech/trap-collection-server/src/handler/v2/openapi"
"github.com/traPtitech/trap-collection-server/src/service"
)
type Edition struct {
editionService service.Edition
}
func NewEdition(editionService service.Edition) *Edition {
return &Edition{
editionService: editionService,
}
}
// エディション一覧の取得
// (GET /editions)
func (edition *Edition) GetEditions(c echo.Context) error {
editions, err := edition.editionService.GetEditions(c.Request().Context())
if err != nil {
log.Printf("error: failed to get editions: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to get editions")
}
res := make([]openapi.Edition, 0, len(editions))
for _, edition := range editions {
questionnaireURL, err := edition.GetQuestionnaireURL()
if err != nil && !errors.Is(err, domain.ErrNoQuestionnaire) {
log.Printf("error: failed to get questionnaire url: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to get questionnaire url")
}
var strQuestionnaireURL *string
if !errors.Is(err, domain.ErrNoQuestionnaire) {
v := (*url.URL)(questionnaireURL).String()
strQuestionnaireURL = &v
}
res = append(res, openapi.Edition{
Id: uuid.UUID(edition.GetID()),
Name: string(edition.GetName()),
Questionnaire: strQuestionnaireURL,
CreatedAt: edition.GetCreatedAt(),
})
}
return c.JSON(http.StatusOK, res)
}
// エディションの作成
// (POST /editions)
func (edition *Edition) PostEdition(c echo.Context) error {
var req openapi.NewEdition
err := c.Bind(&req)
if err != nil {
return echo.NewHTTPError(http.StatusBadRequest, "invalid request")
}
name := values.NewLauncherVersionName(req.Name)
if err := name.Validate(); err != nil {
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("invalid name: %v", err.Error()))
}
var optionQuestionnaireURL types.Option[values.LauncherVersionQuestionnaireURL]
if req.Questionnaire != nil {
urlValue, err := url.Parse(*req.Questionnaire)
if err != nil {
return echo.NewHTTPError(http.StatusBadRequest, "invalid questionnaire url")
}
optionQuestionnaireURL = types.NewOption(values.NewLauncherVersionQuestionnaireURL(urlValue))
}
gameVersionIDs := make([]values.GameVersionID, 0, len(req.GameVersions))
for _, gameVersionID := range req.GameVersions {
gameVersionIDs = append(gameVersionIDs, values.NewGameVersionIDFromUUID(gameVersionID))
}
domainEdition, err := edition.editionService.CreateEdition(
c.Request().Context(),
name,
optionQuestionnaireURL,
gameVersionIDs,
)
switch {
case errors.Is(err, service.ErrInvalidGameVersionID):
return echo.NewHTTPError(http.StatusBadRequest, "invalid game version id")
case errors.Is(err, service.ErrDuplicateGameVersion):
return echo.NewHTTPError(http.StatusBadRequest, "duplicate game version")
case errors.Is(err, service.ErrDuplicateGame):
return echo.NewHTTPError(http.StatusBadRequest, "duplicate game")
case err != nil:
log.Printf("error: failed to create edition: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to create edition")
}
questionnaireURL, err := domainEdition.GetQuestionnaireURL()
if err != nil && !errors.Is(err, domain.ErrNoQuestionnaire) {
log.Printf("error: failed to get questionnaire url: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to get questionnaire url")
}
var strQuestionnaireURL *string
if !errors.Is(err, domain.ErrNoQuestionnaire) {
v := (*url.URL)(questionnaireURL).String()
strQuestionnaireURL = &v
}
return c.JSON(http.StatusCreated, openapi.Edition{
Id: uuid.UUID(domainEdition.GetID()),
Name: string(domainEdition.GetName()),
Questionnaire: strQuestionnaireURL,
CreatedAt: domainEdition.GetCreatedAt(),
})
}
// エディションの削除
// (DELETE /editions/{editionID})
func (edition *Edition) DeleteEdition(ctx echo.Context, editionID openapi.EditionIDInPath) error {
err := edition.editionService.DeleteEdition(ctx.Request().Context(), values.NewLauncherVersionIDFromUUID(editionID))
if errors.Is(err, service.ErrInvalidEditionID) {
return echo.NewHTTPError(http.StatusBadRequest, "invalid edition id")
}
if err != nil {
log.Printf("error: failed to delete edition: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to delete edition")
}
return ctx.NoContent(http.StatusOK)
}
// エディション情報の取得
// (GET /editions/{editionID})
func (edition *Edition) GetEdition(ctx echo.Context, editionID openapi.EditionIDInPath) error {
domainEdition, err := edition.editionService.GetEdition(ctx.Request().Context(), values.NewLauncherVersionIDFromUUID(editionID))
if errors.Is(err, service.ErrInvalidEditionID) {
return echo.NewHTTPError(http.StatusBadRequest, "invalid edition id")
}
if err != nil {
log.Printf("error: failed to get edition: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to get edition")
}
questionnaireURL, err := domainEdition.GetQuestionnaireURL()
if err != nil && !errors.Is(err, domain.ErrNoQuestionnaire) {
log.Printf("error: failed to get questionnaire url: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to get questionnaire url")
}
var strQuestionnaireURL *string
if !errors.Is(err, domain.ErrNoQuestionnaire) {
v := (*url.URL)(questionnaireURL).String()
strQuestionnaireURL = &v
}
return ctx.JSON(http.StatusOK, openapi.Edition{
Id: uuid.UUID(domainEdition.GetID()),
Name: string(domainEdition.GetName()),
Questionnaire: strQuestionnaireURL,
CreatedAt: domainEdition.GetCreatedAt(),
})
}
// エディション情報の変更
// (PATCH /editions/{editionID})
func (edition *Edition) PatchEdition(ctx echo.Context, editionID openapi.EditionIDInPath) error {
var req openapi.PatchEdition
err := ctx.Bind(&req)
if err != nil {
return echo.NewHTTPError(http.StatusBadRequest, "invalid request")
}
name := values.NewLauncherVersionName(req.Name)
if err := name.Validate(); err != nil {
return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("invalid name: %v", err.Error()))
}
var optionQuestionnaireURL types.Option[values.LauncherVersionQuestionnaireURL]
if req.Questionnaire != nil {
urlValue, err := url.Parse(*req.Questionnaire)
if err != nil {
return echo.NewHTTPError(http.StatusBadRequest, "invalid questionnaire url")
}
optionQuestionnaireURL = types.NewOption(values.NewLauncherVersionQuestionnaireURL(urlValue))
}
domainEdition, err := edition.editionService.UpdateEdition(
ctx.Request().Context(),
values.NewLauncherVersionIDFromUUID(editionID),
name,
optionQuestionnaireURL,
)
if errors.Is(err, service.ErrInvalidEditionID) {
return echo.NewHTTPError(http.StatusBadRequest, "invalid edition id")
}
if err != nil {
log.Printf("error: failed to update edition: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to update edition")
}
questionnaireURL, err := domainEdition.GetQuestionnaireURL()
if err != nil && !errors.Is(err, domain.ErrNoQuestionnaire) {
log.Printf("error: failed to get questionnaire url: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to get questionnaire url")
}
var strQuestionnaireURL *string
if !errors.Is(err, domain.ErrNoQuestionnaire) {
v := (*url.URL)(questionnaireURL).String()
strQuestionnaireURL = &v
}
return ctx.JSON(http.StatusOK, openapi.Edition{
Id: uuid.UUID(domainEdition.GetID()),
Name: string(domainEdition.GetName()),
Questionnaire: strQuestionnaireURL,
CreatedAt: domainEdition.GetCreatedAt(),
})
}
// エディションに紐づくゲームの一覧の取得
// (GET /editions/{editionID}/games)
func (edition *Edition) GetEditionGames(ctx echo.Context, editionID openapi.EditionIDInPath) error {
gameVersions, err := edition.editionService.GetEditionGameVersions(ctx.Request().Context(), values.NewLauncherVersionIDFromUUID(editionID))
if errors.Is(err, service.ErrInvalidEditionID) {
return echo.NewHTTPError(http.StatusBadRequest, "invalid edition id")
}
if err != nil {
log.Printf("error: failed to get games: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to get games")
}
res := make([]openapi.EditionGameResponse, 0, len(gameVersions))
for _, gameVersion := range gameVersions {
var resURL *openapi.GameURL
urlValue, ok := gameVersion.GameVersion.Assets.URL.Value()
if ok {
v := (*url.URL)(urlValue).String()
resURL = &v
}
var resFiles *openapi.GameVersionFiles
windows, windowsOk := gameVersion.GameVersion.Assets.Windows.Value()
mac, macOk := gameVersion.GameVersion.Assets.Mac.Value()
jar, jarOk := gameVersion.GameVersion.Assets.Jar.Value()
if windowsOk || macOk || jarOk {
resFiles = &openapi.GameVersionFiles{}
if windowsOk {
v := (uuid.UUID)(windows)
resFiles.Win32 = &v
}
if macOk {
v := (uuid.UUID)(mac)
resFiles.Darwin = &v
}
if jarOk {
v := (uuid.UUID)(jar)
resFiles.Jar = &v
}
}
res = append(res, openapi.EditionGameResponse{
Id: uuid.UUID(gameVersion.Game.GetID()),
Name: string(gameVersion.Game.GetName()),
Description: string(gameVersion.Game.GetDescription()),
CreatedAt: gameVersion.Game.GetCreatedAt(),
Version: openapi.GameVersion{
Id: uuid.UUID(gameVersion.GameVersion.GetID()),
Name: string(gameVersion.GameVersion.GetName()),
Description: string(gameVersion.GameVersion.GetDescription()),
CreatedAt: gameVersion.GameVersion.GetCreatedAt(),
ImageID: uuid.UUID(gameVersion.GameVersion.ImageID),
VideoID: uuid.UUID(gameVersion.GameVersion.VideoID),
Url: resURL,
Files: resFiles,
},
})
}
return ctx.JSON(http.StatusOK, res)
}
// エディションのゲームの変更
// (PATCH /editions/{editionID}/games)
func (edition *Edition) PostEditionGame(c echo.Context, editionID openapi.EditionIDInPath) error {
var req openapi.PatchEditionGameRequest
err := c.Bind(&req)
if err != nil {
return echo.NewHTTPError(http.StatusBadRequest, "invalid request")
}
gameVersionIDs := make([]values.GameVersionID, 0, len(req.GameVersionIDs))
for _, gameVersionID := range req.GameVersionIDs {
gameVersionIDs = append(gameVersionIDs, values.NewGameVersionIDFromUUID(gameVersionID))
}
gameVersions, err := edition.editionService.UpdateEditionGameVersions(
c.Request().Context(),
values.NewLauncherVersionIDFromUUID(editionID),
gameVersionIDs,
)
switch {
case errors.Is(err, service.ErrInvalidEditionID):
return echo.NewHTTPError(http.StatusBadRequest, "invalid edition id")
case errors.Is(err, service.ErrDuplicateGameVersion):
return echo.NewHTTPError(http.StatusBadRequest, "duplicate game version")
case errors.Is(err, service.ErrDuplicateGame):
return echo.NewHTTPError(http.StatusBadRequest, "duplicate game")
case err != nil:
log.Printf("error: failed to update edition games: %v\n", err)
return echo.NewHTTPError(http.StatusInternalServerError, "failed to update edition games")
}
res := make([]openapi.EditionGameResponse, 0, len(gameVersions))
for _, gameVersion := range gameVersions {
var resURL *openapi.GameURL
urlValue, ok := gameVersion.GameVersion.Assets.URL.Value()
if ok {
v := (*url.URL)(urlValue).String()
resURL = &v
}
var resFiles *openapi.GameVersionFiles
windows, windowsOk := gameVersion.GameVersion.Assets.Windows.Value()
mac, macOk := gameVersion.GameVersion.Assets.Mac.Value()
jar, jarOk := gameVersion.GameVersion.Assets.Jar.Value()
if windowsOk || macOk || jarOk {
resFiles = &openapi.GameVersionFiles{}
if windowsOk {
v := (uuid.UUID)(windows)
resFiles.Win32 = &v
}
if macOk {
v := (uuid.UUID)(mac)
resFiles.Darwin = &v
}
if jarOk {
v := (uuid.UUID)(jar)
resFiles.Jar = &v
}
}
res = append(res, openapi.EditionGameResponse{
Id: uuid.UUID(gameVersion.Game.GetID()),
Name: string(gameVersion.Game.GetName()),
Description: string(gameVersion.Game.GetDescription()),
CreatedAt: gameVersion.Game.GetCreatedAt(),
Version: openapi.GameVersion{
Id: uuid.UUID(gameVersion.GameVersion.GetID()),
Name: string(gameVersion.GameVersion.GetName()),
Description: string(gameVersion.GameVersion.GetDescription()),
CreatedAt: gameVersion.GameVersion.GetCreatedAt(),
ImageID: uuid.UUID(gameVersion.GameVersion.ImageID),
VideoID: uuid.UUID(gameVersion.GameVersion.VideoID),
Url: resURL,
Files: resFiles,
},
})
}
return c.JSON(http.StatusOK, res)
}
|
package main
import "fmt"
// 给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。
//
//你可以假设每种输入只会对应一个答案。但是,数组中同一个元素不能使用两遍
/*
给定 nums = [2, 7, 11, 15], target = 9
因为 nums[0] + nums[1] = 2 + 7 = 9
所以返回 [0, 1]
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/two-sum
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
*/
func main() {
nums := []int{2, 5, 4, 6, 7, 8}
target := 10
res := twoSum(nums, target)
fmt.Println(res)
}
func twoSum(nums []int, target int) []int {
myMap := make(map[int]int)
for i, v := range nums {
if pos, ok := myMap[target-v]; ok {
return []int{pos, i}
}
myMap[v] = i
}
return nil
}
|
package code
import (
"math/rand"
"github.com/telecoda/pico-go/console"
)
// Code must implement console.Cartridge interface
type cartridge struct {
*console.BaseCartridge
}
// NewCart - initialise a struct implementing Cartridge interface
func NewCart() console.Cartridge {
return &cartridge{
BaseCartridge: console.NewBaseCart(),
}
}
/* This is the original tweetcart code
s={}w=128 r=rnd for i=1,w do s[i]={}p=s[i]p[1]=r(w)end::a::cls()for i=1,w do p=s[i]pset(p[1],i,i%3+5)p[1]=(p[1]-i%3)%w end flip()goto a
*/
// Init - called once when cart is initialised
func (c *cartridge) Init() {
}
// Update - called once every frame
func (c *cartridge) Update() {
}
// Render - called once every frame
func (c *cartridge) Render() {
c.ClsWithColor(console.PICO8_BLACK)
// init stars
/*
s={}
w=128
r=rnd
for i=1,w do
s[i]={}
p=s[i]
p[1]=r(w)
end
*/
w := 128
s := make([]int, w, w)
for i := 0; i < w; i++ {
s[i] = rand.Intn(w)
}
/*
cls()
for i=1,w do
p=s[i]
pset(p[1],i,i%3+5)
p[1]=(p[1]-i%3)%w
end
*/
for c.IsRunning() {
c.Cls()
for i := 0; i < w; i++ {
c.PSetWithColor(s[i], i, console.Color(i%3+5))
s[i] = (s[i] - (i % 3)) % w
if s[i] < 0 {
s[i] += w
}
}
c.Flip()
}
}
|
package main
import (
"github.com/globalsign/mgo"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
"github.com/go-chi/render"
"github.com/thimalw/note-ninja-api/user"
)
func routes(db *mgo.Database) *chi.Mux {
r := chi.NewRouter()
r.Use(
render.SetContentType(render.ContentTypeJSON),
middleware.Logger,
middleware.DefaultCompress,
middleware.RedirectSlashes,
middleware.Recoverer,
)
r.Route("/v1", func(r chi.Router) {
r.Mount("/user", user.Routes(db))
})
return r
}
|
package model
import (
"database/sql"
"fmt"
_ "github.com/go-sql-driver/mysql"
help "../helper"
config "../config"
)
var Bdd *sql.DB
func TestSql() {
fmt.Println("test sql")
}
func InitBdd() {
var err error
Bdd, err = sql.Open("mysql", config.DSN())
help.CheckErr(err)
}
/** Liste des requêtes :
Récupérer une ressource entière (pour Hydrater)
SELECT
champ.clef AS clef,
champ.id AS champ_id,
regle.nom AS regle,
regle.id AS regle_id,
CONCAT(
'[',
GROUP_CONCAT(
CONCAT(
'{\"id\": ', champ_parametre.id, ', '
'\"type\": \"', parametre.nom, '\", '
'\"value\": \"', champ_parametre.valeur, '\"}'
) ORDER BY regle_parametre.id
),
']'
) AS parametres
FROM ressource
LEFT OUTER JOIN champ ON champ.ressource_id = ressource.id
LEFT OUTER JOIN champ_parametre ON champ_parametre.champ_id = champ.id
LEFT OUTER JOIN regle_parametre ON champ_parametre.regle_parametre_id = regle_parametre.id
LEFT OUTER JOIN regle ON regle_parametre.regle_id = regle.id
LEFT OUTER JOIN parametre ON regle_parametre.parametre_id = parametre.id
WHERE ressource.id = ?
GROUP BY champ_parametre.champ_id
ORDER BY clef
Récupérer un champ entier :
SELECT
champ.clef AS clef,
champ.id AS champ_id,
regle.nom AS regle,
regle.id AS regle_id,
CONCAT(
'[',
GROUP_CONCAT(
CONCAT(
'{\"id\": ', champ_parametre.id, ', '
'\"type\": \"', parametre.nom, '\", '
'\"value\": \"', champ_parametre.valeur, '\"}'
) ORDER BY regle_parametre.id
),
']'
) AS parametres
FROM champ
LEFT OUTER JOIN champ_parametre ON champ_parametre.champ_id = champ.id
LEFT OUTER JOIN regle_parametre ON champ_parametre.regle_parametre_id = regle_parametre.id
LEFT OUTER JOIN regle ON regle_parametre.regle_id = regle.id
LEFT OUTER JOIN parametre ON regle_parametre.parametre_id = parametre.id
WHERE champ.id = ?
GROUP BY champ_parametre.champ_id
Récupérer l'ensemble des règles et leurs paramètres :
SELECT
regle.nom AS regle,
regle.id AS regle_id,
CONCAT(
'[',
GROUP_CONCAT(
CONCAT(
'{\"id\": ', parametre.id, ', '
'\"type\": \"', parametre.nom, '\"}'
) ORDER BY regle_parametre.id
),
']'
) AS parametres
FROM regle
LEFT OUTER JOIN regle_parametre ON regle.id = regle_parametre.regle_id
LEFT OUTER JOIN parametre ON regle_parametre.parametre_id = parametre.id
GROUP BY regle_parametre.regle_id
*/ |
package config
import (
"fmt"
"io/ioutil"
"log"
"regexp"
"sort"
"strings"
"github.com/openshift-scale/perf-analyzer/pkg/result"
"github.com/openshift-scale/perf-analyzer/pkg/utils"
"github.com/openshift/origin/test/extended/cluster/metrics"
)
type ScrapeConfig struct {
EnablePrometheusFlag bool
EnablePbenchFlag bool
InsecureTLSFlag bool
DurationFlag int
BlockString string
NetString string
ProcessString string
ResultDir string
SearchDir string
StepFlag string
TokenFlag string
UrlFlag string
}
type config struct {
searchDir string
resultDir string
fileHeader map[string][]string
hosts []result.Host
Metrics []metrics.Metrics
keys []string
}
// NewConfig returns a new configuration struct that contains all fields that we need
func NewConfig(cfg ScrapeConfig) config {
var c config
if cfg.EnablePbenchFlag {
c = config{
searchDir: utils.TrailingSlash(cfg.SearchDir),
resultDir: utils.TrailingSlash(cfg.ResultDir),
fileHeader: map[string][]string{},
}
c.addHeaders(cfg.BlockString, cfg.NetString, cfg.ProcessString)
}
return c
}
// addHeaders will check the command line flags to create the files and headers we're looking for
func (c *config) addHeaders(blockString, netString, processString string) {
blockDevices := strings.Split(blockString, ",")
// If no block devices were passed, don't add to search
if len(blockDevices) > 0 {
c.fileHeader["disk_IOPS.csv"] = blockDevices
}
netDevices := strings.Split(netString, ",")
// If no network devices were passed, don't add to search
if len(netDevices) > 0 {
c.fileHeader["network_l2_network_packets_sec.csv"] = netDevices
c.fileHeader["network_l2_network_Mbits_sec.csv"] = netDevices
}
processList := strings.Split(processString, ",")
// If no process names were passed, don't add to search
if len(processList) > 0 {
c.fileHeader["cpu_usage_percent_cpu.csv"] = processList
c.fileHeader["memory_usage_resident_set_size.csv"] = processList
}
}
// InitHosts will create the initial host structures with the Kind and ResultDir for each
func (c *config) Init() {
// This regexp matches the prefix to each pbench host result directory name
// which indicates host type. (ie. svt-master-1:pbench-benchmark-001/)
hostRegex := regexp.MustCompile(`svt[_-][ceilmn]\w*[_-]\d`)
// Return directory listing of searchDir
dirList, err := ioutil.ReadDir(c.searchDir)
if err != nil {
log.Fatal(err)
}
// Iterate over directory contents
for _, item := range dirList {
// Match subdirectory that follows our pattern
if hostRegex.MatchString(item.Name()) && item.IsDir() {
Kind := strings.Split(item.Name(), ":")
newHost := result.Host{
Kind: Kind[0],
ResultDir: c.searchDir + item.Name(),
}
c.hosts = append(c.hosts, newHost)
}
}
}
// addKeys will help us print a consistent CSV order by sorting our keys
func (c *config) addKeys() {
// Maps are not ordered, create ordered slice of keys and sort
// This ensures that the file output is identical between execution
for k := range c.fileHeader {
c.keys = append(c.keys, k)
}
sort.Strings(c.keys)
}
// Process does the bulk of the math reading the CSV raw data and saving results
func (c *config) Process() {
c.addKeys()
for i, host := range c.hosts {
// Find each raw data CSV
for _, key := range c.keys {
fileList := utils.FindFile(host.ResultDir, key)
// FindFile returns slice, though there should only be one file
for _, file := range fileList {
// Parse file into 2d-string slice
sliceResult, err := utils.ReadCSV(file)
if err != nil {
fmt.Printf("Error reading %v: %v\n", file, err)
continue
}
// In a single file we have multiple headers to extract
for _, header := range c.fileHeader[key] {
// Extract single column of data that we want
newResult, err := result.NewSlice(sliceResult, header)
if err != nil {
//need to keep list of columns same for all types
//continue
fmt.Printf("NewSlice returned error: %v\n", err)
}
// Mutate host to add calcuated stats to object
c.hosts[i].AddResult(newResult, file, header, key)
}
}
}
}
var m []metrics.Metrics
err := utils.GetMetrics(c.searchDir, &m)
if err != nil {
fmt.Printf("Error getting Metrics: %v\n", err)
} else {
c.Metrics = m
}
}
// WriteToDisk will write the results to disk as a CSV and a JSON file
func (c *config) WriteToDisk() error {
err := utils.WriteCSV(c.resultDir, c.keys, c.fileHeader, c.hosts)
if err != nil {
return err
}
err = utils.WriteJSON(c.resultDir, result.Result{Hosts: c.hosts, Metrics: c.Metrics})
if err != nil {
return err
}
return nil
}
|
/**
* @Author: lzw5399
* @Date: 2021/1/14 22:13
* @Desc: 流程的定义
*/
package model
// 流程定义表
type Process struct {
EntityBase
Code string `json:"code" gorm:"uniqueIndex"`
Name string `json:"name"` // 流程名字
Category string `json:"category"` // 流程类别
Version int `json:"version,omitempty"` // 版本
Resource string `json:"resource"` // 流程定义的完整bpmn xml字符串
}
|
package dsky
import (
"fmt"
"strings"
"testing"
"github.com/spf13/cobra"
)
func fakeCLI() *CLI {
root := &cobra.Command{
Short: "root short",
Use: "root",
Run: func(cmd *cobra.Command, args []string) {
cmd.Help()
},
}
cli := New(root)
cmd1 := &cobra.Command{
Use: "cmd1",
Short: "cmd1-short",
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("inside cmd with args: %v\n", args)
},
}
cli.AddCommand(cmd1).AddTopic("cmd1", "cmd1-topic", true)
cmd2 := &cobra.Command{
Use: "cmd2",
Short: "cmd2-short",
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("inside cmd with args: %v\n", args)
},
}
cli.AddCommand(cmd2).AddTopic("cmd2", "cmd2-topic", false)
return cli.Bind()
}
func TestUsageFunc(t *testing.T) {
got := fakeCLI().Root().UsageString()
for _, s := range []string{"cmd1-topic", "cmd2-topic", "Usage: root"} {
if !strings.Contains(got, s) {
t.Fatalf("expected %s, got:\n%v\n", s, got)
}
}
}
|
package main
import (
"fmt"
"os"
"path"
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
ref "k8s.io/client-go/tools/reference"
)
const finalizerName = "nect.com/rook-cephfs-provisioner"
// Provisioner is an k8s client which is able to provision persistent volumes for rook's cephfs.
type Provisioner struct {
client clientset.Interface
storageClass string
fsName string
clusterNamespace string
localPath string
}
// NewProvisioner creates a new k8s volume provisioner for rook's cephfs.
func NewProvisioner(client clientset.Interface, storageClass string, fsName string, clusterNamespace string, localPath string) *Provisioner {
return &Provisioner{
client: client,
storageClass: storageClass,
fsName: fsName,
clusterNamespace: clusterNamespace,
localPath: localPath,
}
}
// Handle handles the passed pvc by checking whether we care about the pvc and then creating a matching pv.
func (p *Provisioner) Handle(pvc *v1.PersistentVolumeClaim) error {
if pvc.Spec.StorageClassName == nil {
glog.Warningf("ignored pvc `%s` in namespace `%s` since it's selected storage class is nil", pvc.Name, pvc.Namespace)
} else if *pvc.Spec.StorageClassName != p.storageClass {
glog.V(3).Infof("ignored pvc `%s` in namespace `%s` since it's selected storage class `%s` doesn't match `%s`", pvc.Name, pvc.Namespace, pvc.Spec.StorageClassName, p.storageClass)
return nil
}
if pvc.GetDeletionTimestamp() != nil { // TODO: check if our finalizer is first in list
err := p.handleDeletion(pvc)
if err != nil {
return fmt.Errorf("couldn't delete pvc, see: %v", err)
}
return nil
}
if pvc.Status.Phase != v1.ClaimPending {
glog.V(3).Infof("skipping pvc `%s` in namespace `%s` since it's not in phase pending", pvc.Name, pvc.Namespace)
return nil
}
return p.handleUnboundPVC(pvc)
}
func (p *Provisioner) handleUnboundPVC(pvc *v1.PersistentVolumeClaim) error {
glog.V(3).Infof("started binding of pvc `%s` in namespace `%s`", pvc.Name, pvc.Namespace)
// TODO: Add finalizer to pvc
pvc, err := p.addFinalizerToPVC(pvc)
if err != nil {
return fmt.Errorf("couldn't append finalizer to pvc, see: %v", err)
}
// Create /storage/pvc-fooo-bar-baz-qux
err = p.createLocalPathForPVC(pvc)
if err != nil {
return fmt.Errorf("couldn't create localPath, see: %v", err)
}
// Create PV and deploy to k8s
_, err = p.createPVforPVC(pvc)
if err != nil {
return fmt.Errorf("couldn't create pv for pvc, see: %v", err)
}
glog.V(3).Infof("finished binding of pvc `%s` in namespace `%s`", pvc.Name, pvc.Namespace)
return nil
}
func (p *Provisioner) createLocalPathForPVC(pvc *v1.PersistentVolumeClaim) error {
localPath := p.getLocalPathForPVC(pvc)
if _, err := os.Stat(localPath); os.IsNotExist(err) {
glog.V(4).Infof("creating local path `%s` for pvc `%s` in namespace `%s`", localPath, pvc.Name, pvc.Namespace)
err := os.Mkdir(localPath, 0777)
if err != nil {
return fmt.Errorf("couldn't create pvc folder at `%s`, see: %v", localPath, err)
}
glog.V(4).Infof("created local path `%s` for pvc `%s` in namespace `%s`", localPath, pvc.Name, pvc.Namespace)
} else {
glog.V(4).Infof("local path `%s` already existing for pvc `%s` in namespace `%s`", localPath, pvc.Name, pvc.Namespace)
}
return nil
}
func (p *Provisioner) isPVAlreadyExistsError(pv *v1.PersistentVolume, err error) bool {
errMsg := fmt.Sprintf("persistentvolumes \"%s\" already exists", pv.Name)
return err != nil && strings.Index(err.Error(), errMsg) != -1
}
func (p *Provisioner) createPVforPVC(pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
// TOOD: check if err is sth like "already existing" and continue
glog.V(4).Infof("creating pv for pvc `%s` in namespace `%s`", pvc.Name, pvc.Namespace)
pv, err := p.newPVforPVC(pvc)
if err != nil {
return nil, fmt.Errorf("couldn't create pv, see: %v", err)
}
newPV, err := p.client.CoreV1().PersistentVolumes().Create(pv)
if p.isPVAlreadyExistsError(pv, err) {
glog.V(4).Infof("skipping creation of pv `%s` for pvc `%s` in namespace `%s` since it already exists", pv.Name, pvc.Name, pvc.Namespace)
return pv, nil
} else if err != nil {
return nil, fmt.Errorf("couldn't deploy pv, see: %v", err)
}
glog.V(4).Infof("created pv `%s` for pvc `%s` in namespace `%s`", pv.Name, pvc.Name, pvc.Namespace)
return newPV, nil
}
func (p *Provisioner) addFinalizerToPVC(pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
finalizers := pvc.GetFinalizers()
if len(finalizers) > 0 {
for _, f := range finalizers {
if f == finalizerName {
return pvc, nil
}
}
}
pvc.Finalizers = append(pvc.Finalizers, finalizerName)
newPVC, err := p.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(pvc)
if err != nil {
return nil, fmt.Errorf("couldn't update pvc, see: %v", err)
}
return newPVC, nil
}
func (p *Provisioner) tryRemoveFinalizerFromPVC(pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
finalizers := pvc.GetFinalizers()
if len(finalizers) == 1 && finalizers[0] == finalizerName {
pvc.Finalizers = nil
} else if len(finalizers) > 1 && finalizers[0] == finalizerName {
pvc.Finalizers = pvc.Finalizers[1:]
}
newPVC, err := p.client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Update(pvc)
if err != nil {
return nil, fmt.Errorf("couldn't update pvc, see: %v", err)
}
return newPVC, nil
}
func (p *Provisioner) handleDeletion(pvc *v1.PersistentVolumeClaim) error {
// Check whether it's our time to finalize the pvc.
finalizers := pvc.GetFinalizers()
if len(finalizers) > 0 && finalizers[0] != finalizerName {
glog.V(3).Infof("skipping cleanup of pvc `%s` in namespace `%s` since there are other finalizers", pvc.Name, pvc.Namespace)
return nil
}
glog.V(3).Infof("started cleanup of pvc `%s` in namespace `%s`", pvc.Name, pvc.Namespace)
// Remove files from cephfs
path := p.getLocalPathForPVC(pvc)
err := os.RemoveAll(path)
if err != nil {
return fmt.Errorf("couldn't delete `%s`, see: %v", path, err)
}
// Remove PV
err = p.client.CoreV1().PersistentVolumes().Delete(p.getPVNameForPVC(pvc), &metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("couldn't delete pv, see: %v", err)
}
// Remove finalizer
pvc, err = p.tryRemoveFinalizerFromPVC(pvc)
if err != nil {
return fmt.Errorf("couldn't remove finalizer from pvc, see: %v", err)
}
glog.V(3).Infof("finished cleanup of pvc `%s` in namespace `%s`", pvc.Name, pvc.Namespace)
return nil
}
func (p *Provisioner) getLocalPathForPVC(pvc *v1.PersistentVolumeClaim) string {
return path.Join(p.localPath, p.getPVNameForPVC(pvc))
}
func (p *Provisioner) getPVNameForPVC(pvc *v1.PersistentVolumeClaim) string {
return fmt.Sprintf("pvc-%s", pvc.UID)
}
func (p *Provisioner) newPVforPVC(pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
claimRef, err := ref.GetReference(scheme.Scheme, pvc)
if err != nil {
return nil, fmt.Errorf("couldn't get reference to pvc, see: %v", err)
}
return &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: p.getPVNameForPVC(pvc),
},
Spec: v1.PersistentVolumeSpec{
StorageClassName: p.storageClass,
AccessModes: pvc.Spec.AccessModes,
Capacity: v1.ResourceList{
// TODO set ceph.quota.max_bytes on volume path.
v1.ResourceName(v1.ResourceStorage): pvc.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)],
},
ClaimRef: claimRef,
PersistentVolumeSource: v1.PersistentVolumeSource{
FlexVolume: &v1.FlexPersistentVolumeSource{
Driver: "ceph.rook.io/rook",
FSType: "ceph",
Options: map[string]string{
"clusterNamespace": p.clusterNamespace,
"fsName": p.fsName,
"path": "/" + p.getPVNameForPVC(pvc),
},
},
},
},
}, nil
}
|
package action
import (
"context"
"strings"
"github.com/hidayatullahap/go-monorepo-example/cmd/auth_service/entity"
"github.com/hidayatullahap/go-monorepo-example/pkg"
"github.com/hidayatullahap/go-monorepo-example/pkg/errors"
"github.com/hidayatullahap/go-monorepo-example/pkg/grpc/codes"
"google.golang.org/grpc/status"
)
func (a *AuthAction) Login(ctx context.Context, user entity.User) (string, error) {
var token string
user.Username = strings.ToLower(user.Username)
dUser, err := a.authRepo.FindUser(ctx, user)
if err != nil {
if st, ok := status.FromError(err); ok {
if st.Code() == codes.NotFound {
err = errors.InvalidArgument("username or password not match")
}
}
return token, err
}
match := pkg.ComparePasswords(dUser.Password, []byte(user.Password))
if !match {
return token, errors.InvalidArgument("username or password not match")
}
token, err = pkg.GenerateToken(user.Username)
if err != nil {
return token, err
}
upsertToken := entity.Token{
UserID: dUser.ID,
Username: user.Username,
Token: token,
}
err = a.authRepo.UpdateToken(ctx, upsertToken)
if err != nil {
return token, err
}
return token, nil
}
|
package level_ip
import (
"io"
"log"
"os"
"os/exec"
"strings"
"syscall"
"unsafe"
)
const (
cIFF_TUN = 0x0001
cIFF_TAP = 0x0002
cIFF_NOPI = 0x1000
cIFF_MULTI_QUEUE = 0x0100
)
type TunTap struct {
Dev *os.File
}
type ifReq struct {
Name [0x10]byte
Flags uint16
pad [0x28 - 0x10 - 2]byte
}
type TunInterface struct {
isTap bool
io.ReadWriteCloser
name string
}
func createInterface(fd uintptr, name string, flags uint16) (createdIFName string, err error) {
var req ifReq
req.Flags = flags
copy(req.Name[:], name)
err = ioctl(fd, syscall.TUNSETIFF, uintptr(unsafe.Pointer(&req)))
if err != nil {
return
}
createdIFName = strings.Trim(string(req.Name[:]), "\x00")
return
}
func newTap(name string) (ifce *TunInterface, err error) {
file, err := os.OpenFile("/dev/net/tap", os.O_RDWR, 0)
if err != nil {
return nil, err
}
var flags uint16
flags = cIFF_TAP | cIFF_NOPI
createdName, err := createInterface(file.Fd(), name, flags)
if err != nil {
return nil, err
}
// optional: setting device options
// not implemented
ifce = &TunInterface{name: createdName, isTap: true, ReadWriteCloser: file}
return
}
func newTun(name string) (ifce *TunInterface, err error) {
log.Fatal("newTun not implemented")
return nil, nil
}
func setIfUp(devName string) error {
cmd := exec.Command("ip", "link", "set", "dev", devName, "up")
err := cmd.Run()
return err
}
func setIfRoute(devName string, cidr string) error {
cmd := exec.Command("ip", "route", "add", "dev", devName, cidr)
err := cmd.Run()
return err
}
func tunInit(devName string) (*TunInterface, error) {
ifce, err := newTap(devName)
if err != nil {
return nil, err
}
err = setIfUp(ifce.name)
if err != nil {
return nil, err
}
err = setIfRoute(ifce.name, "10.0.0.0/24")
if err != nil {
return nil, err
}
return ifce, err
}
|
package main
import (
"flag"
"log"
"os"
"strconv"
"github.com/hattorious/echoserver/http"
"github.com/hattorious/echoserver/tcp"
"github.com/hattorious/echoserver/udp"
"github.com/hattorious/echoserver/version"
)
var (
verbose bool
ports struct {
http1 string
http2 string
tcp int
udp int
}
)
func init() {
flag.BoolVar(&verbose, "verbose", false, "enable verbose logging")
flag.StringVar(&ports.http1, "http1", getStringEnv("ECHO_HTTP1_PORT", "8001"), "give me a port number for HTTP/1")
flag.StringVar(&ports.http2, "http2", getStringEnv("ECHO_HTTP2_CLEARTEXT_PORT", "8002"), "give me a port number for HTTP/2 over clear-text")
flag.IntVar(&ports.tcp, "tcp", getIntEnv("ECHO_TCP_PORT", 8006), "give me a port number for UDP")
flag.IntVar(&ports.udp, "udp", getIntEnv("ECHO_UDP_PORT", 8007), "give me a port number for UDP")
}
func main() {
flag.Parse()
log.Println(version.ServerID)
go http.StartHttpServer(ports.http1, verbose)
go http.StartHttp2CleartextServer(ports.http2, verbose)
go tcp.StartTCPServer(ports.tcp, verbose)
udp.StartUDPServer(ports.udp, verbose)
}
func getStringEnv(key string, fallback string) string {
value := os.Getenv(key)
if value == "" {
return fallback
}
return value
}
func getIntEnv(key string, fallback int) int {
value := os.Getenv(key)
if value == "" {
return fallback
}
valueInt, err := strconv.Atoi(value)
if err != nil {
log.Fatalf("ERROR: couldn't decode environment variable %s=%s to a port number; %s", key, value, err)
}
return valueInt
}
|
package types
import (
"gopkg.in/mgo.v2/bson"
)
type Item struct {
ID bson.ObjectId `bson:"_id,omitempty" json:"id,omitempty"`
Name string `bson:"name" json:"name,omitempty"`
Price int `bson:"price" json:"price,omitempty"`
Quantity int `bson:"quantity" json:"quantity,omitempty"`
}
type ItemList struct {
Items []Item `bson:"items" json:"items,omitempty"`
}
|
package parser
import (
"unicode"
"unicode/utf8"
)
func isLetter(ch rune) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' ||
ch >= utf8.RuneSelf && unicode.IsLetter(ch)
}
func isDigit(ch rune) bool {
return '0' <= ch && ch <= '9' ||
ch >= utf8.RuneSelf && unicode.IsDigit(ch)
}
const bom = 0xFEFF // byte order mark, only permitted as very first char
type Scanner struct {
rdOffset int // reading offset (position after current character)
offset int // character offset
src []byte
ch rune // current character
}
func NewScanner(src []byte) *Scanner {
s := &Scanner{src: src}
s.next()
return s
}
type Token int
const (
ILLEGAL Token = iota
EOF
LBRACK // [
RBRACK // ]
IDENT
)
func (t Token) String() string {
switch t {
case ILLEGAL:
return "ILLEGAL"
case EOF:
return "EOF"
case LBRACK:
return "LBRACK"
case RBRACK:
return "RBRACK"
case IDENT:
return "IDENT"
default:
return "UNKNOWN"
}
}
// Read the next Unicode char into s.ch. s.ch < 0 means end-of-file.
func (s *Scanner) next() {
if s.rdOffset < len(s.src) {
s.offset = s.rdOffset
//if s.ch == '\n' {
// s.lineOffset = s.offset
// s.file.AddLine(s.offset)
//}
r, w := rune(s.src[s.rdOffset]), 1
switch {
case r == 0:
//s.error(s.offset, "illegal character NUL")
case r >= utf8.RuneSelf:
// not ASCII
r, w = utf8.DecodeRune(s.src[s.rdOffset:])
if r == utf8.RuneError && w == 1 {
//s.error(s.offset, "illegal UTF-8 encoding")
} else if r == bom && s.offset > 0 {
//s.error(s.offset, "illegal byte order mark")
}
}
s.rdOffset += w
s.ch = r
} else {
s.offset = len(s.src)
//if s.ch == '\n' {
// s.lineOffset = s.offset
// s.file.AddLine(s.offset)
//}
s.ch = -1 // eof
}
}
func (s *Scanner) skipWhitespace() {
for s.ch == ' ' || s.ch == '\t' || s.ch == '\n' || s.ch == '\r' {
s.next()
}
}
func (s *Scanner) scanIdentifier() []byte {
offs := s.offset
for isLetter(s.ch) || isDigit(s.ch) {
s.next()
}
return s.src[offs:s.offset]
}
func (s *Scanner) Scan() (tok Token, lit []byte) {
s.skipWhitespace()
switch ch := s.ch; {
case isLetter(ch):
tok = IDENT
lit = s.scanIdentifier()
case isDigit(ch):
tok = IDENT
lit = s.scanIdentifier()
default:
s.next() // always make progress
switch ch {
case -1:
tok = EOF
case '[':
tok = LBRACK
case ']':
tok = RBRACK
}
}
return tok, lit
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package accountmanager
import (
"context"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/accountmanager"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: AddAccountFromOGB,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Verify that a secondary account can be added from One Google Bar",
Contacts: []string{"anastasiian@chromium.org", "team-dent@google.com"},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
Params: []testing.Param{{
ExtraSoftwareDeps: []string{"android_p"},
Fixture: "loggedInToChromeAndArc",
Val: browser.TypeAsh,
}, {
Name: "vm",
ExtraSoftwareDeps: []string{"android_vm"},
Fixture: "loggedInToChromeAndArc",
Val: browser.TypeAsh,
}, {
Name: "lacros",
ExtraSoftwareDeps: []string{"android_p", "lacros"},
Fixture: "loggedInToChromeAndArcWithLacros",
Val: browser.TypeLacros,
}, {
Name: "vm_lacros",
ExtraSoftwareDeps: []string{"android_vm", "lacros"},
Fixture: "loggedInToChromeAndArcWithLacros",
Val: browser.TypeLacros,
}},
VarDeps: []string{"accountmanager.username1", "accountmanager.password1"},
Timeout: 7 * time.Minute,
})
}
func AddAccountFromOGB(ctx context.Context, s *testing.State) {
username := s.RequiredVar("accountmanager.username1")
password := s.RequiredVar("accountmanager.password1")
// Reserve one minute for various cleanup.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, time.Minute)
defer cancel()
cr := s.FixtValue().(accountmanager.FixtureData).Chrome()
// Setup the browser.
br, closeBrowser, err := browserfixt.SetUp(ctx, cr, s.Param().(browser.Type))
if err != nil {
s.Fatal("Failed to setup chrome: ", err)
}
defer closeBrowser(cleanupCtx)
// Connect to Test API to use it with the UI library.
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to connect Test API: ", err)
}
defer func(ctx context.Context) {
s.Log("Running test cleanup")
if err := accountmanager.TestCleanup(ctx, tconn, cr); err != nil {
s.Fatal("Failed to do cleanup: ", err)
}
}(cleanupCtx)
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "add_account_from_ogb")
ui := uiauto.New(tconn).WithTimeout(time.Minute)
a := s.FixtValue().(accountmanager.FixtureData).ARC
defer a.DumpUIHierarchyOnError(ctx, s.OutDir(), s.HasError)
arcDevice, err := a.NewUIDevice(ctx)
if err != nil {
s.Fatal("Failed to initialize UI Automator: ", err)
}
defer arcDevice.Close(ctx)
if err := accountmanager.OpenOneGoogleBar(ctx, tconn, br); err != nil {
s.Fatal("Failed to open OGB: ", err)
}
if err := clickAddAccount(ctx, ui); err != nil {
s.Fatal("Failed to find add account link: ", err)
}
// ARC toggle should NOT be checked.
if err := accountmanager.CheckARCToggleStatus(ctx, tconn, s.Param().(browser.Type), false); err != nil {
s.Fatal("Failed to check ARC toggle status: ", err)
}
s.Log("Adding a secondary Account")
if err := accountmanager.AddAccount(ctx, tconn, username, password); err != nil {
s.Fatal("Failed to add a secondary Account: ", err)
}
s.Log("Verifying that account is present in OS Settings")
// Find "More actions, <email>" button to make sure that account was added.
moreActionsButton := nodewith.Name("More actions, " + username).Role(role.Button)
if err := uiauto.Combine("Click Add Google Account button",
accountmanager.OpenAccountManagerSettingsAction(tconn, cr),
ui.WaitUntilExists(moreActionsButton),
)(ctx); err != nil {
s.Fatal("Failed to find More actions button: ", err)
}
// Check that account is present in OGB.
s.Log("Verifying that account is present in OGB")
secondaryAccountListItem := nodewith.NameContaining(username).Role(role.Link)
if err := accountmanager.CheckOneGoogleBar(ctx, tconn, br, ui.WaitUntilExists(secondaryAccountListItem)); err != nil {
s.Fatal("Failed to check that account is present in OGB: ", err)
}
// Account is expected to be not present in ARC only if browser type is Lacros. The feature is being applied only if Lacros is enabled.
expectedPresentInArc := s.Param().(browser.Type) != browser.TypeLacros
if err := accountmanager.CheckIsAccountPresentInARCAction(tconn, arcDevice,
accountmanager.NewARCAccountOptions(username).ExpectedPresentInARC(expectedPresentInArc))(ctx); err != nil {
s.Fatalf("Failed to check if account is present in ARC, expected '%t', err: %v", expectedPresentInArc, err)
}
}
// clickAddAccount clicks on 'add another account' button in OGB until account addition dialog is opened.
func clickAddAccount(ctx context.Context, ui *uiauto.Context) error {
addAccount := nodewith.Name("Add another account").Role(role.Link)
dialog := accountmanager.AddAccountDialog()
if err := uiauto.Combine("Click add account",
ui.WaitUntilExists(addAccount),
ui.WithInterval(time.Second).LeftClickUntil(addAccount, ui.Exists(dialog)),
)(ctx); err != nil {
return errors.Wrap(err, "failed to find and click add account link")
}
return nil
}
|
package cert
import v1 "k8s.io/api/core/v1"
func IsValidTLSSecret(secret *v1.Secret) bool {
if secret == nil {
return false
}
if _, ok := secret.Data[v1.TLSCertKey]; !ok {
return false
}
if _, ok := secret.Data[v1.TLSPrivateKeyKey]; !ok {
return false
}
return true
}
|
package webui
import (
"encoding/json"
"io"
"net/http"
"github.com/dkotik/cuebook"
)
// CRUDQ provides a create, retrieve, update, and delete HTTP interfaces with a search and list Query capability.
func CRUDQ(b *cuebook.Book) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var err error
defer func() {
switch err {
case nil:
return
case io.EOF:
http.Error(w, "Not Found", http.StatusNotFound)
}
http.Error(w, err.Error(), http.StatusInternalServerError)
}()
switch r.Method {
case http.MethodOptions:
w.Header().Add("Access-Control-Allow-Methods", "GET,PUT,POST,DELETE,OPTIONS")
w.WriteHeader(http.StatusOK)
return
case http.MethodPost:
m, err := parseMap(r)
if err != nil {
return
}
uuid, err := b.Create(r.URL.Path, m)
if err != nil {
return
}
http.Redirect(w, r, uuid, http.StatusTemporaryRedirect)
return
case http.MethodPut:
m, err := parseMap(r)
if err != nil {
return
}
uuid, err := parseUUID(r)
if err != nil {
return
}
err = b.Update(uuid, m)
return
case http.MethodDelete:
uuid, err := parseUUID(r)
if err != nil {
return
}
err = b.Delete(uuid)
return
}
// GET results
uuid, err := parseUUID(r)
if err != nil {
return
}
m, err := b.Retrieve(uuid)
if err != nil {
return
}
b, err := json.Marshal(m)
if err != nil {
return
}
w.Write(b)
}
}
|
package main
import (
"github.com/davecgh/go-spew/spew"
)
func main() {
var primeTable [100]int
primeTable[0] = 2
primeSize := 1
for n := 3; n <= len(primeTable); n += 2 { // n は素数か判定していく。
isPrime := true // 3 は素数ですし。
for i := 1; i < primeSize; i++ { // なんかここが素数判定ロジックっぽいな。奇数をそれまでに見つけた素数で割って、割り切れたら素数じゃない。
p := primeTable[i]
if p*p > n { // 全部判定判定しなくてもいいらしい。知らん。
break
}
if n%p == 0 {
isPrime = false
break
}
}
if isPrime {
primeTable[primeSize] = n // n が素数だったら primeTable に格納していく。
primeSize++ // 格納したら index を一つ進めておく。
}
}
for i := 0; i < primeSize; i++ {
spew.Print(primeTable[i], " ")
}
}
|
package queryparams
import (
"bytes"
"fmt"
"reflect"
"github.com/k81/kate/utils"
)
// QueryParams for pagination
type QueryParams struct {
filters map[string]interface{}
orderBy []string
page int
perPage int
}
var queryRequiredFields = map[string]reflect.Type{
"Page": reflect.TypeOf(int(0)),
"PerPage": reflect.TypeOf(int(0)),
"Sort": reflect.TypeOf([]string{}),
}
// NewQueryParams return a QueryParams
func NewQueryParams() *QueryParams {
p := &QueryParams{
filters: make(map[string]interface{}),
}
return p
}
// NewQueryParamsFromTag create QueryParams from struct
func NewQueryParamsFromTag(ptr interface{}) *QueryParams {
params := NewQueryParams()
val := reflect.ValueOf(ptr)
ind := reflect.Indirect(val)
typ := ind.Type()
fullName := typ.PkgPath() + "." + typ.Name()
if val.Kind() != reflect.Ptr {
panic(fmt.Errorf("GetQueryParams: cannot use non-ptr struct `%s`", fullName))
}
if typ.Kind() != reflect.Struct {
panic(fmt.Errorf("GetQueryParams: only allow ptr of struct"))
}
for name, expectedType := range queryRequiredFields {
if !utils.IsType(ind.FieldByName(name), expectedType) {
panic(fmt.Errorf("`%s` field should be defined as `%s`", name, expectedType))
}
}
var (
sort = reflect.Indirect(ind.FieldByName("Sort"))
page = reflect.Indirect(ind.FieldByName("Page"))
perPage = reflect.Indirect(ind.FieldByName("PerPage"))
)
if !utils.IsEmptyValue(sort) {
if sorts, ok := sort.Interface().([]string); ok {
params.SetOrderBy(sorts)
}
}
if !utils.IsEmptyValue(page) && !utils.IsEmptyValue(perPage) {
params.SetPagination(int(page.Int()), int(perPage.Int()))
}
for i := 0; i < ind.NumField(); i++ {
structField := ind.Type().Field(i)
field := ind.Field(i)
kind := field.Kind()
if kind == reflect.Ptr && field.IsNil() {
continue
}
filter := structField.Tag.Get("filter")
if filter == "" {
continue
}
value := reflect.Indirect(field).Interface()
params.SetFilter(filter, value)
}
return params
}
// GetFilters return the filters map
func (p *QueryParams) GetFilters() map[string]interface{} {
return p.filters
}
// SetFilter add a filter condition
func (p *QueryParams) SetFilter(name string, value interface{}) {
p.filters[name] = value
}
// SetOrderBy set the order by condition
func (p *QueryParams) SetOrderBy(orderBy []string) {
p.orderBy = orderBy
}
// SetPagination set the pagination info
func (p *QueryParams) SetPagination(page, perPage int) {
p.page = page
p.perPage = perPage
}
// Offset return the sql offset
func (p *QueryParams) Offset() int {
return (p.page - 1) * p.perPage
}
// Limit return the sql limit
func (p *QueryParams) Limit() int {
return p.perPage
}
// OrderBy return the order by exprs
func (p *QueryParams) OrderBy() []string {
return p.orderBy
}
// String return the string representation
// nolint:errcheck
func (p *QueryParams) String() string {
buf := &bytes.Buffer{}
buf.WriteString("Filters:[")
for filter, value := range p.filters {
buf.WriteString(filter)
buf.WriteString("=")
buf.WriteString(fmt.Sprint(value))
buf.WriteString(",")
}
if len(p.filters) > 0 {
buf.Truncate(buf.Len() - 1)
}
buf.WriteString("]")
buf.WriteString(";OrderBy:[")
for _, orderBy := range p.orderBy {
buf.WriteString(orderBy)
buf.WriteString(",")
}
if len(p.orderBy) > 0 {
buf.Truncate(buf.Len() - 1)
}
buf.WriteString("]")
buf.WriteString(";Page:")
buf.WriteString(fmt.Sprint(p.page))
buf.WriteString(";PerPage:")
buf.WriteString(fmt.Sprint(p.perPage))
return buf.String()
}
|
package main
import (
"ekfet-golang/gorm/dao"
"github.com/jinzhu/gorm"
)
type Product struct {
gorm.Model
Code string
Price uint
}
type SysCountry struct {
//gorm.Model
Id uint `gorm:"column:id"`
Cn string `gorm:"column:cn"`
En string `gorm:"column:en"`
Code string `gorm:"column:code"`
}
func (SysCountry) TableName() string {
return "t_sys_country"
}
func main() {
//db := helper.InitDB()
//defer db.Close()
// 自动迁移模式
//db.AutoMigrate(&Product{})
// 创建
//db.Create(&Product{Code: "L1212", Price: 1000})
//info := dao.UserInfo{Username: "zhangsan", Password: "password", CreateTime: time.Now(), ModifyTime: time.Now()}
//dao.CreateUser(info)
/*info := dao.GetUser(7);
fmt.Println(*info)
dao.DeleteUser(3)*/
//dao.SelectUser("zhangsan3")
// 读取
dao.GetUserBySex(0)
//var county1 SysCountry
//db.First(&county1) // 查询id为1的product
//db.find
//fmt.Printf("%x", county1.Code)
//db.First(&product, "code = ?", "L1212") // 查询code为l1212的product
//// 更新 - 更新product的price为2000
//db.Model(&product).Update("Price", 2000)
//
//// 删除 - 删除product
//db.Delete(&product)
}
|
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"sort"
"strings"
)
var (
path = flag.String("p", ".", "the path for the program")
printFiles = flag.Bool("f", true, "do you need to print files?")
)
var interfaceElements = map[string]string{
"Т": "├───",
"Г": "└───",
"-": "│",
}
func dirTree(out io.Writer, path string, needFiles bool) error {
var crawler func(path, indent string) error
crawler = func(path, indent string) error {
file, err := os.Open(path)
if err != nil {
return fmt.Errorf("Have problem with path, error: %v", err)
}
folderContent, _ := file.Readdir(0)
type data struct {
name string
size int
isDir bool
}
var filteredContent []data
for _, item := range folderContent {
hidden := strings.HasPrefix(item.Name(), ".")
if !item.IsDir() && !needFiles || hidden {
continue
}
filteredContent = append(filteredContent, data{
item.Name(),
int(item.Size()),
item.IsDir(),
})
}
sort.Slice(filteredContent, func(i, j int) bool {
return filteredContent[i].name < filteredContent[j].name
})
var row string
for i, item := range filteredContent {
row = indent
if i == len(filteredContent)-1 {
row += interfaceElements["Г"]
} else {
row += interfaceElements["Т"]
}
row += item.name
if !item.isDir {
if item.size == 0 {
row += " (empty)"
} else {
row += fmt.Sprintf(" (%vb)", item.size)
}
}
fmt.Fprintf(out, "%v", row+"\n")
if item.isDir {
if i < len(filteredContent)-1 {
crawler(path+"/"+item.name+"/",
indent+interfaceElements["-"]+"\t")
} else {
crawler(path+"/"+item.name+"/", indent+"\t")
}
}
}
return nil
}
return crawler(path, "")
}
func main() {
flag.Parse()
err := dirTree(os.Stdout, *path, *printFiles)
if err != nil {
log.Fatal(err)
}
}
|
package core
import "fmt"
func ErrorHandler(code int) {
errMap := map[int]string{
40001: "app_key或app_secret不合法",
40002: "access_token过期",
40003: "wxcode不合法",
40005: "access_token无效",
40006: "school_code不存在",
40007: "电子卡号不存在",
40008: "权限不足",
40009: "参数缺失",
40010: "参数错误",
40018: "该主体没有开启应用",
}
fmt.Println(errMap[code])
}
|
package get
import (
"encoding/json"
"net/http"
"github.com/ocoscope/face/db"
"github.com/ocoscope/face/utils"
"github.com/ocoscope/face/utils/answer"
)
func Lunch(w http.ResponseWriter, r *http.Request) {
type tbody struct {
CompanyID, UserID uint
AccessToken string
}
var body tbody
err := json.NewDecoder(r.Body).Decode(&body)
if err != nil {
utils.Message(w, answer.WRONG_DATA, 400)
return
}
database, err := db.CopmanyDB(int64(body.CompanyID))
if err != nil {
utils.Message(w, answer.NOT_FOUND_COMPANY, 400)
return
}
err = db.CheckUserAccessToken(database, int64(body.UserID), body.AccessToken)
if err != nil {
utils.Message(w, answer.UNAUTHORIZED, 401)
return
}
lunch, err := db.GetCompanyLunch(body.CompanyID)
if err != nil {
utils.Message(w, answer.F_SERVER, 500)
return
}
utils.MessageResult(w, lunch, 200)
}
|
package handler
import (
"movie-app/helper"
"movie-app/movie"
"github.com/gin-gonic/gin"
)
type MovieHandler interface {
GetAllMovie(c *gin.Context)
CreateMovie(c *gin.Context)
}
type movieHandler struct {
movieService movie.Service
}
func NewMovieHandler(movieService movie.Service) *movieHandler {
return &movieHandler{movieService}
}
func (h *movieHandler) GetAllMovie(c *gin.Context) {
movies, err := h.movieService.GetAll()
if err != nil {
helper.ErrorHandling(c, err, "Failed to Showing All Movies")
return
}
formatter := movie.FormatMovies(movies)
helper.SuccessHandling(c, formatter, "Successfully showing all movies")
}
func (h *movieHandler) CreateMovie(c *gin.Context) {
var input movie.CreateMovieInput
err := c.ShouldBindJSON(&input)
if err != nil {
helper.ErrorValidation(c, err, "Failed to Create a Movie")
return
}
newMovie, err := h.movieService.Create(input)
if err != nil {
helper.ErrorHandling(c, err, "Failed to Create a Movie")
return
}
formatter := movie.FormatMovie(newMovie)
helper.SuccessHandling(c, formatter, "Successfully create a movie")
}
|
package requests
import (
"net/url"
"github.com/google/go-querystring/query"
"github.com/atomicjolt/canvasapi"
)
// SearchAccountDomains Returns a list of up to 5 matching account domains
//
// Partial match on name / domain are supported
// https://canvas.instructure.com/doc/api/account_domain_lookups.html
//
// Query Parameters:
// # Query.Name (Optional) campus name
// # Query.Domain (Optional) no description
// # Query.Latitude (Optional) no description
// # Query.Longitude (Optional) no description
//
type SearchAccountDomains struct {
Query struct {
Name string `json:"name" url:"name,omitempty"` // (Optional)
Domain string `json:"domain" url:"domain,omitempty"` // (Optional)
Latitude float64 `json:"latitude" url:"latitude,omitempty"` // (Optional)
Longitude float64 `json:"longitude" url:"longitude,omitempty"` // (Optional)
} `json:"query"`
}
func (t *SearchAccountDomains) GetMethod() string {
return "GET"
}
func (t *SearchAccountDomains) GetURLPath() string {
return ""
}
func (t *SearchAccountDomains) GetQuery() (string, error) {
v, err := query.Values(t.Query)
if err != nil {
return "", err
}
return v.Encode(), nil
}
func (t *SearchAccountDomains) GetBody() (url.Values, error) {
return nil, nil
}
func (t *SearchAccountDomains) GetJSON() ([]byte, error) {
return nil, nil
}
func (t *SearchAccountDomains) HasErrors() error {
return nil
}
func (t *SearchAccountDomains) Do(c *canvasapi.Canvas) error {
_, err := c.SendRequest(t)
if err != nil {
return err
}
return nil
}
|
package user
import (
"fmt"
log "github.com/sirupsen/logrus"
"github.com/opsbot/cli-go/utils"
"github.com/opsbot/zerotier/api"
"github.com/spf13/cobra"
)
// GetCommand returns a cobra command
func GetCommand() *cobra.Command {
var outputDocument string
cmd := &cobra.Command{
Use: "get",
Short: "get user",
PreRun: func(cmd *cobra.Command, args []string) {
if cmd.Flag("username").Value.String() == "" {
log.Fatal("you must provide a username")
}
},
Run: func(cmd *cobra.Command, args []string) {
username := cmd.Flag("username").Value.String()
log.Tracef("get called for user id %v\n", username)
data := api.UserGet(username)
if outputDocument != "" {
utils.FileWrite(outputDocument, []byte(data.Body))
} else {
fmt.Println(data.Body)
}
},
}
cmd.Flags().StringVarP(&outputDocument, "output-document", "O", "", "Write output to a file")
return cmd
}
|
package main
import "fmt"
func main() {
mySlice := []string{"Monday", "Tuesday"}
myOtherSlice := []string{"Wednesday", "Thursday", "Friday"}
mySlice = append(mySlice, myOtherSlice...)
fmt.Println(mySlice)
mySlice = append(mySlice[:2], mySlice[3:]...) //enganacao, só tira a posicao que vc nao quer
//no caso tira a posicao 3, que é o index 2. Assim append index 0, 1 e pula 2, depois acrescenta do index 3
//(posicao 4) ate o final
fmt.Println(mySlice)
}
|
package bitwarden
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"sync"
"github.com/sirupsen/logrus"
)
type cliClient struct {
username string
password string
sync.Mutex
session string
savedItems []Item
run func(args ...string) ([]byte, error)
addSecrets func(s ...string)
// onCreate is called before secrets are created by client methods, allowing
// user code to default/validate created items
onCreate func(*Item) error
}
func newCliClient(username, password string, addSecrets func(s ...string)) (Client, error) {
return newCliClientWithRun(username, password, addSecrets, func(args ...string) ([]byte, error) {
// bw-password is protected, session in args is not
logger := logrus.WithField("args", args)
logger.Debug("running bitwarden command ...")
cmd := exec.Command("bw", args...)
stderr, err := cmd.StderrPipe()
if err != nil {
logger.WithError(err).Error("could not open stderr pipe")
return nil, err
}
stdout, err := cmd.StdoutPipe()
if err != nil {
logger.WithError(err).Error("could not open stdout pipe")
return nil, err
}
if err := cmd.Start(); err != nil {
logger.WithError(err).Error("could not start command")
return nil, err
}
stdoutContents, err := ioutil.ReadAll(stdout)
if err != nil {
logger.WithError(err).Error("could not read stdout pipe")
return nil, err
}
stderrContents, err := ioutil.ReadAll(stderr)
if err != nil {
logger.WithError(err).Error("could not read stdout pipe")
return nil, err
}
err = cmd.Wait()
logger = logger.WithFields(logrus.Fields{
"stdout": string(stdoutContents),
"stderr": string(stderrContents),
})
if err != nil {
logger.WithError(err).Error("bitwarden command failed")
}
return stdoutContents, err
})
}
func newCliClientWithRun(username, password string, addSecrets func(s ...string), run func(args ...string) (bytes []byte, err error)) (Client, error) {
client := cliClient{
username: username,
password: password,
run: run,
addSecrets: addSecrets,
}
return &client, client.loginAndListItems()
}
type bwLoginResponse struct {
Success bool `json:"success"`
Data struct {
Raw string `json:"raw"`
} `json:"data"`
}
func (c *cliClient) runWithSession(args ...string) ([]byte, error) {
argsList := []string{"--session", c.session}
argsList = append(argsList, args...)
return c.run(argsList...)
}
func (c *cliClient) loginAndListItems() error {
c.Lock()
defer c.Unlock()
output, err := c.run("login", c.username, c.password, "--response")
if err != nil {
return fmt.Errorf("failed to log in: %w", err)
}
r := bwLoginResponse{}
if err := json.Unmarshal(output, &r); err != nil {
return fmt.Errorf("failed to parse bw login output %s: %w", output, err)
}
if r.Success {
raw := r.Data.Raw
if raw != "" {
c.session = raw
c.addSecrets(c.session)
var items []Item
out, err := c.runWithSession("list", "items")
if err != nil {
return err
}
err = json.Unmarshal(out, &items)
if err != nil {
return fmt.Errorf("failed to parse bw item list output %s: %w", out, err)
}
c.savedItems = items
return nil
}
// should never happen
return errors.New("bw login succeeded with empty '.data.raw'")
}
// should never happen
return errors.New("bw login failed without error from CLI")
}
func (c *cliClient) GetFieldOnItem(itemName, fieldName string) ([]byte, error) {
for _, item := range c.savedItems {
if itemName == item.Name {
for _, field := range item.Fields {
if field.Name == fieldName {
return []byte(field.Value), nil
}
}
}
}
return nil, fmt.Errorf("failed to find field %s in item %s", fieldName, itemName)
}
func (c *cliClient) GetAllItems() []Item {
return c.savedItems
}
func (c *cliClient) HasItem(itemName string) (bool, error) {
for _, item := range c.savedItems {
if itemName == item.Name {
return true, nil
}
}
return false, nil
}
func (c *cliClient) GetAttachmentOnItem(itemName, attachmentName string) (bytes []byte, retErr error) {
file, err := ioutil.TempFile("", "attachmentName")
if err != nil {
return nil, err
}
defer func() {
if err := os.Remove(file.Name()); err != nil {
retErr = err
}
}()
return c.getAttachmentOnItemToFile(itemName, attachmentName, file.Name())
}
func (c *cliClient) getAttachmentOnItemToFile(itemName, attachmentName, filename string) ([]byte, error) {
for _, item := range c.savedItems {
if itemName == item.Name {
for _, attachment := range item.Attachments {
if attachment.FileName == attachmentName {
_, err := c.runWithSession("get", "attachment", attachment.ID, "--itemid", item.ID, "--output", filename)
if err != nil {
return nil, err
}
return ioutil.ReadFile(filename)
}
}
}
}
return nil, fmt.Errorf("failed to find attachment %s in item %s", attachmentName, itemName)
}
func (c *cliClient) GetPassword(itemName string) ([]byte, error) {
for _, item := range c.savedItems {
if itemName == item.Name {
if item.Login != nil {
return []byte(item.Login.Password), nil
}
}
}
return nil, fmt.Errorf("failed to find password in item %s", itemName)
}
func (c *cliClient) Logout() ([]byte, error) {
return c.run("logout")
}
func (c *cliClient) createItem(item Item, targetItem *Item) error {
if c.onCreate != nil {
if err := c.onCreate(&item); err != nil {
return fmt.Errorf("OnCreate() failed on item: %w", err)
}
}
itemBytes, err := json.Marshal(item)
if err != nil {
return fmt.Errorf("failed to serialize item: %w", err)
}
// the bitwarden cli expects the item to be base64 encoded
encItem := base64.StdEncoding.EncodeToString(itemBytes)
out, err := c.runWithSession("create", "item", encItem)
if err != nil {
return err
}
return json.Unmarshal(out, targetItem)
}
func (c *cliClient) createAttachment(fileContents []byte, fileName string, itemID string, newAttachment *Attachment) (retError error) {
// Not tested
tempDir, err := ioutil.TempDir("", "attachment")
if err != nil {
return fmt.Errorf("failed to create temporary file for new attachment: %w", err)
}
defer func() {
if err := os.RemoveAll(tempDir); err != nil {
retError = fmt.Errorf("failed to delete temporary file after use: %w", err)
}
}()
filePath := filepath.Join(tempDir, fileName)
if err := ioutil.WriteFile(filePath, fileContents, 0644); err != nil {
return fmt.Errorf("failed to create temporary file for new attachment: %w", err)
}
out, err := c.runWithSession("create", "attachment", "--itemid", itemID, "--file", filePath)
if err != nil {
return fmt.Errorf("bw create failed: %w", err)
}
if err = json.Unmarshal(out, newAttachment); err != nil {
return fmt.Errorf("failed to parse bw output %s: %w", out, err)
}
return nil
}
func (c *cliClient) createEmptyItem(itemName string, targetItem *Item) error {
item := Item{
Type: 1,
Name: itemName,
Login: &Login{},
}
return c.createItem(item, targetItem)
}
func (c *cliClient) createItemWithPassword(itemName string, password []byte, targetItem *Item) error {
item := Item{
Type: 1,
Name: itemName,
Login: &Login{string(password)},
}
return c.createItem(item, targetItem)
}
func (c *cliClient) createItemWithNotes(itemName, notes string, targetItem *Item) error {
item := Item{
Type: 1,
Name: itemName,
Notes: notes,
Login: &Login{},
}
return c.createItem(item, targetItem)
}
func (c *cliClient) editItem(targetItem Item) error {
targetJSON, err := json.Marshal(targetItem)
if err != nil {
return fmt.Errorf("failed to marshal object: %w", err)
}
encodedItem := base64.StdEncoding.EncodeToString(targetJSON)
_, err = c.runWithSession("edit", "item", targetItem.ID, encodedItem)
if err != nil {
return err
}
return nil
}
func (c *cliClient) deleteAttachment(attachmentID, itemID string) error {
if _, err := c.runWithSession("delete", "attachment", attachmentID, "--itemid", itemID); err != nil {
return fmt.Errorf("failed to delete attachment, attachmentID: %s, itemID: %s: %w", attachmentID, itemID, err)
}
return nil
}
func (c *cliClient) UpdateNotesOnItem(itemName, notes string) error {
var targetItem *Item
for index, item := range c.savedItems {
if itemName == item.Name {
targetItem = &c.savedItems[index]
break
}
}
if targetItem == nil {
newItem := &Item{}
if err := c.createItemWithNotes(itemName, notes, newItem); err != nil {
return fmt.Errorf("failed to create new bw entry: %w", err)
}
c.savedItems = append(c.savedItems, *newItem)
return nil
}
if targetItem.Notes != notes && notes != "" {
targetItem.Notes = notes
if err := c.editItem(*targetItem); err != nil {
return fmt.Errorf("failed to set password for %s: %w", itemName, err)
}
}
return nil
}
func (c *cliClient) SetFieldOnItem(itemName, fieldName string, fieldValue []byte) error {
var targetItem *Item
var targetField *Field
for index, item := range c.savedItems {
if itemName == item.Name {
targetItem = &c.savedItems[index]
for fieldIndex, field := range item.Fields {
if field.Name == fieldName {
targetField = &c.savedItems[index].Fields[fieldIndex]
break
}
}
break
}
}
if targetItem == nil {
newItem := &Item{}
if err := c.createEmptyItem(itemName, newItem); err != nil {
return fmt.Errorf("failed to create new bw entry: %w", err)
}
c.savedItems = append(c.savedItems, *newItem)
targetItem = &c.savedItems[len(c.savedItems)-1]
}
isNewField := false
if targetField == nil {
targetItem.Fields = append(targetItem.Fields, Field{fieldName, string(fieldValue)})
targetField = &targetItem.Fields[len(targetItem.Fields)-1]
isNewField = true
}
if isNewField || targetField.Value != string(fieldValue) {
targetField.Value = string(fieldValue)
if err := c.editItem(*targetItem); err != nil {
return fmt.Errorf("failed to set field, itemName: %s, fieldName: %s - %w", itemName, fieldName, err)
}
}
return nil
}
func (c *cliClient) SetAttachmentOnItem(itemName, attachmentName string, fileContents []byte) (errorMsg error) {
var targetItem *Item
var targetAttachment *Attachment
var targetAttachmentIndex int
for index, item := range c.savedItems {
if itemName != item.Name {
continue
}
targetItem = &c.savedItems[index]
for attachmentIndex, attachment := range item.Attachments {
if attachment.FileName == attachmentName {
targetAttachmentIndex = attachmentIndex
targetAttachment = &c.savedItems[index].Attachments[attachmentIndex]
break
}
}
break
}
if targetItem == nil {
newItem := &Item{}
if err := c.createEmptyItem(itemName, newItem); err != nil {
return fmt.Errorf("failed to create new bw entry: %w", err)
}
c.savedItems = append(c.savedItems, *newItem)
targetItem = &c.savedItems[len(c.savedItems)-1]
}
if targetAttachment != nil {
// read the attachment file
tempDir, err := ioutil.TempDir("", "attachment")
if err != nil {
return fmt.Errorf("failed to create temporary file for getting: %w", err)
}
defer func() {
if err := os.RemoveAll(tempDir); err != nil {
errorMsg = fmt.Errorf("failed to delete temporary file after use: %w", err)
}
}()
filePath := filepath.Join(tempDir, attachmentName)
existingFileContents, err := c.getAttachmentOnItemToFile(itemName, attachmentName, filePath)
if err != nil {
return fmt.Errorf("error reading attachment: %w", err)
}
if bytes.Equal(fileContents, existingFileContents) {
return nil
}
// If attachment exists delete it
if err := c.deleteAttachment(targetAttachment.ID, targetItem.ID); err != nil {
return fmt.Errorf("failed to delete current attachment on item: %w", err)
}
targetItem.Attachments = append(targetItem.Attachments[:targetAttachmentIndex], targetItem.Attachments[targetAttachmentIndex+1:]...)
}
newAttachment := &Attachment{}
// attachment is also considered to be changed if it hadnt existed earlier
if err := c.createAttachment(fileContents, attachmentName, targetItem.ID, newAttachment); err != nil {
return fmt.Errorf("error creating attachment: %w", err)
}
targetItem.Attachments = append(targetItem.Attachments, *newAttachment)
return nil
}
func (c *cliClient) SetPassword(itemName string, password []byte) error {
var targetItem *Item
for index, item := range c.savedItems {
if itemName == item.Name {
targetItem = &c.savedItems[index]
break
}
}
if targetItem == nil {
newItem := &Item{}
if err := c.createItemWithPassword(itemName, password, newItem); err != nil {
return fmt.Errorf("failed to create new bw entry: %w", err)
}
c.savedItems = append(c.savedItems, *newItem)
return nil
}
if targetItem.Login.Password != string(password) {
targetItem.Login.Password = string(password)
if err := c.editItem(*targetItem); err != nil {
return fmt.Errorf("failed to set password for %s: %w", itemName, err)
}
}
return nil
}
func (c *cliClient) OnCreate(callback func(*Item) error) {
c.onCreate = callback
}
type dryRunCliClient struct {
file *os.File
}
func (d *dryRunCliClient) GetFieldOnItem(_, _ string) ([]byte, error) {
return nil, nil
}
func (d *dryRunCliClient) GetAllItems() []Item {
return nil
}
func (d *dryRunCliClient) HasItem(itemName string) (bool, error) {
return false, nil
}
func (d *dryRunCliClient) GetAttachmentOnItem(_, _ string) ([]byte, error) {
return nil, nil
}
func (d *dryRunCliClient) GetPassword(_ string) ([]byte, error) {
return nil, nil
}
func (d *dryRunCliClient) Logout() ([]byte, error) {
return nil, d.file.Close()
}
func (d *dryRunCliClient) SetFieldOnItem(itemName, fieldName string, fieldValue []byte) error {
_, err := fmt.Fprintf(d.file, "ItemName: %s\n\tField: \n\t\t %s: %s\n", itemName, fieldName, string(fieldValue))
return err
}
func (d *dryRunCliClient) SetAttachmentOnItem(itemName, attachmentName string, fileContents []byte) error {
_, err := fmt.Fprintf(d.file, "ItemName: %s\n\tAttachment: \n\t\t %s: %s\n", itemName, attachmentName, string(fileContents))
return err
}
func (d *dryRunCliClient) SetPassword(itemName string, password []byte) error {
_, err := fmt.Fprintf(d.file, "ItemName: %s\n\tAttribute: \n\t\t Password: %s\n", itemName, string(password))
return err
}
func (d *dryRunCliClient) UpdateNotesOnItem(itemName, notes string) error {
_, err := fmt.Fprintf(d.file, "ItemName: %s\n\tNotes: %s\n", itemName, notes)
return err
}
func (d *dryRunCliClient) OnCreate(func(*Item) error) {}
func newDryRunClient(file *os.File) (Client, error) {
return &dryRunCliClient{
file: file,
}, nil
}
var _ Client = &dryRunCliClient{}
|
package wxapi
//授权方授权信息
type APPAuthInfoResp struct {
AuthorizationInfo struct{
AuthorizerAppid string `json:"authorizer_appid"`//授权方appid
AuthorizerAccessToken string `json:"authorizer_access_token"`//授权方接口调用凭据
ExpiresIn string `json:"expires_in"`//有效期
AuthorizerRefreshToken string `json:"authorizer_refresh_token"`//
FuncInfo []Fuc `json:"func_info"`//授权给开发者的权限集列表
} `json:"authorization_info"`//授权信息
*BaseResp
}
type Fuc struct {
Funcscope_category string `json:"funcscope_category"` //权限集的ID
}
//授权方公众号详细信息
type APPUserInfoResp struct {
Nick_name string `json:"nick_name"`
Head_img string `json:"head_img"`
Service_type_info string `json:"service_type_info"`
Verify_type_info string `json:"verify_type_info"`
User_name string `json:"user_name"`
Signature string `json:"signature"`
Principal_name string `json:"principal_name"`
Business_info string `json:"business_info"`
Alias string `json:"alias"`
Qrcode_url string `json:"qrcode_url"`
Authorization_info string `json:"authorization_info"`
Authorization_appid string `json:"authorization_appid"`
Func_info []Fuc `json:"func_info"`
*BaseResp
}
//第三方授权相关解析结构体
type APPAuthMsg struct {
AppId string `xml:"AppId"` //第三方平台appid
CreateTime string `xml:"CreateTime"`//时间戳
InfoType string `xml:"InfoType"`//unauthorized是取消授权,updateauthorized是更新授权,authorized是授权成功通知
AuthorizerAppid string //公众号或小程序
AuthorizationCode string //授权码,可用于换取公众号的接口调用凭据
AuthorizationCodeExpiredTime string //授权码过期时间
PreAuthCode string //预授权码
ComponentVerifyTicket string `xml:"ComponentVerifyTicket"`
}
type APPAuthtokenResp struct {
component_access_token string //第三方平台access_token
expires_in string //有效期
*BaseResp
}
type APPPreResp struct {
pre_auth_code string //预授权码
expires_in string //有效期
*BaseResp
}
type APPOptionResp struct {
authorizer_appid string //授权公众号或小程序的appid
option_name string //选项名称
option_value string //选项值
*BaseResp
}
|
package db
import (
"github.com/chadweimer/gomp/models"
"github.com/jmoiron/sqlx"
)
type sqlAppConfigurationDriver struct {
Db *sqlx.DB
}
func (d *sqlAppConfigurationDriver) Read() (*models.AppConfiguration, error) {
return get(d.Db, func(db sqlx.Queryer) (*models.AppConfiguration, error) {
cfg := new(models.AppConfiguration)
if err := sqlx.Get(db, cfg, "SELECT * FROM app_configuration"); err != nil {
return nil, err
}
return cfg, nil
})
}
func (d *sqlAppConfigurationDriver) Update(cfg *models.AppConfiguration) error {
return tx(d.Db, func(db sqlx.Ext) error {
return d.updateImpl(cfg, db)
})
}
func (*sqlAppConfigurationDriver) updateImpl(cfg *models.AppConfiguration, db sqlx.Execer) error {
_, err := db.Exec("UPDATE app_configuration SET title = $1", cfg.Title)
return err
}
|
/*
Copyright 2021 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"context"
"errors"
"io"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/inspect"
namespaces "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/inspect/namespaces"
olog "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/output/log"
)
func cmdNamespaces() *cobra.Command {
return NewCmd("namespaces").
WithDescription("View skaffold namespace information for resources it manages").
WithCommands(cmdNamespacesList())
}
func cmdNamespacesList() *cobra.Command {
return NewCmd("list").
WithExample("Get list of namespaces", "inspect namespaces list --format json").
WithExample("Get list of namespaces targeting a specific configuration", "inspect namespaces list --profile local --format json").
WithDescription("Print the list of namespaces that would be run for a given configuration (default skaffold configuration, specific module, specific profile, etc).").
WithFlagAdder(cmdNamespacesListFlags).
WithArgs(func(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
olog.Entry(context.TODO()).Errorf("`inspect namespaces list` requires exactly one manifest file path argument")
return errors.New("`inspect namespaces list` requires exactly one manifest file path argument")
}
return nil
}, listNamespaces)
}
// NOTE:
// - currently kubecontext namespaces are not handled as they were not expected for the
// initial use cases involving this command
// - also this code currently does not account for the possibility of the -n flag passed
// additionally to a skaffold command (eg: skaffold apply -n foo)
func listNamespaces(ctx context.Context, out io.Writer, args []string) error {
return namespaces.PrintNamespacesList(ctx, out, args[0], inspect.Options{
Filename: inspectFlags.filename,
RepoCacheDir: inspectFlags.repoCacheDir,
OutFormat: inspectFlags.outFormat,
Modules: inspectFlags.modules,
Profiles: inspectFlags.profiles,
PropagateProfiles: inspectFlags.propagateProfiles,
})
}
func cmdNamespacesListFlags(f *pflag.FlagSet) {
f.StringSliceVarP(&inspectFlags.profiles, "profile", "p", nil, `Profile names to activate`)
f.BoolVar(&inspectFlags.propagateProfiles, "propagate-profiles", true, `Setting '--propagate-profiles=false' disables propagating profiles set by the '--profile' flag across config dependencies. This mean that only profiles defined directly in the target 'skaffold.yaml' file are activated.`)
f.StringSliceVarP(&inspectFlags.modules, "module", "m", nil, "Names of modules to filter target action by.")
}
|
// Copyright 2019 Contentsquare
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package localprovider
import (
"bytes"
"context"
"github.com/contentsquare/gospal/gospal"
"io"
"io/ioutil"
"os"
"path"
"reflect"
"strings"
"testing"
)
func TestNew(t *testing.T) {
type args struct {
ctx context.Context
bucket string
config *gospal.ProviderConfig
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "Should create a local provider",
args: args{
ctx: context.Background(),
bucket: "/tmp/bladibla",
config: &gospal.ProviderConfig{},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := New(tt.args.ctx, tt.args.bucket, tt.args.config)
if (err != nil) != tt.wantErr {
t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr)
return
}
if reflect.TypeOf(got) != reflect.TypeOf(&provider{}) {
t.Errorf("New() got = %v, want %v", reflect.TypeOf(got), reflect.TypeOf(provider{}))
}
})
}
}
func Test_provider_DeleteKey(t *testing.T) {
tmpFile, err := ioutil.TempFile(os.TempDir(), "gospalTests")
if err != nil {
t.Errorf("unable to create temporary file for tests. err=%v", err.Error())
return
}
tmpFile.WriteString("bladibla some content")
tmpFile.Close()
defer os.Remove(tmpFile.Name())
type fields struct {
provider *provider
}
type args struct {
fileName string
}
tests := []struct {
name string
fields fields
args args
wantErr bool
}{
{
name: "Should remove the spacified key",
fields: fields{
provider: &provider{
context: context.Background(),
kind: "local",
directory: os.TempDir(),
noSuchKeyErrorString: os.ErrNotExist.Error(),
config: &gospal.ProviderConfig{},
},
},
args: args{
fileName: path.Base(tmpFile.Name()),
},
wantErr: false,
},
{
name: "Should raise on non existing key",
fields: fields{
provider: &provider{
context: context.Background(),
kind: "local",
directory: os.TempDir(),
noSuchKeyErrorString: os.ErrNotExist.Error(),
config: &gospal.ProviderConfig{},
},
},
args: args{
fileName: "bladibla",
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := tt.fields.provider.DeleteKey(tt.args.fileName); (err != nil) != tt.wantErr {
t.Errorf("DeleteKey() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_provider_GetKind(t *testing.T) {
type fields struct {
context context.Context
kind string
directory string
noSuchKeyErrorString string
config *gospal.ProviderConfig
}
tests := []struct {
name string
fields fields
want string
}{
{
name: "Should return the provider kind",
fields: fields{
context: context.Background(),
kind: "local",
directory: os.TempDir(),
noSuchKeyErrorString: os.ErrNotExist.Error(),
config: &gospal.ProviderConfig{},
},
want: "local",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &provider{
context: tt.fields.context,
kind: tt.fields.kind,
directory: tt.fields.directory,
noSuchKeyErrorString: tt.fields.noSuchKeyErrorString,
config: tt.fields.config,
}
if got := p.GetKind(); got != tt.want {
t.Errorf("GetKind() = %v, want %v", got, tt.want)
}
})
}
}
func Test_provider_GetNoSuchKeyErrorString(t *testing.T) {
type fields struct {
context context.Context
kind string
directory string
noSuchKeyErrorString string
config *gospal.ProviderConfig
}
tests := []struct {
name string
fields fields
want string
}{
{
name: "Should return the proper error string",
fields: fields{
context: context.Background(),
kind: "local",
directory: os.TempDir(),
noSuchKeyErrorString: os.ErrNotExist.Error(),
config: &gospal.ProviderConfig{},
},
want: os.ErrNotExist.Error(),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &provider{
context: tt.fields.context,
kind: tt.fields.kind,
directory: tt.fields.directory,
noSuchKeyErrorString: tt.fields.noSuchKeyErrorString,
config: tt.fields.config,
}
if got := p.GetNoSuchKeyErrorString(); got != tt.want {
t.Errorf("GetNoSuchKeyErrorString() = %v, want %v", got, tt.want)
}
})
}
}
func Test_provider_GetStream(t *testing.T) {
tmpFile, err := ioutil.TempFile(os.TempDir(), "gospalTests")
if err != nil {
t.Errorf("unable to create temporary file for tests. err=%v", err.Error())
return
}
tmpFile.WriteString("bladibla some content")
tmpFile.Close()
defer os.Remove(tmpFile.Name())
type fields struct {
context context.Context
kind string
directory string
noSuchKeyErrorString string
config *gospal.ProviderConfig
}
type args struct {
filePath string
}
tests := []struct {
name string
fields fields
args args
want string
wantErr bool
}{
{
name: "Should return the proper stream",
fields: fields{
context: context.Background(),
kind: "local",
directory: os.TempDir(),
noSuchKeyErrorString: os.ErrNotExist.Error(),
config: &gospal.ProviderConfig{},
},
args: args{
filePath: path.Base(tmpFile.Name()),
},
want: "bladibla some content",
wantErr: false,
},
{
name: "Should raise for non exising file",
fields: fields{
context: context.Background(),
kind: "local",
directory: os.TempDir(),
noSuchKeyErrorString: os.ErrNotExist.Error(),
config: &gospal.ProviderConfig{},
},
args: args{
filePath: "bladibla",
},
want: "",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &provider{
context: tt.fields.context,
kind: tt.fields.kind,
directory: tt.fields.directory,
noSuchKeyErrorString: tt.fields.noSuchKeyErrorString,
config: tt.fields.config,
}
got, _, err := p.GetStream(tt.args.filePath)
if (err != nil) != tt.wantErr {
t.Errorf("GetStream() error = %v, wantErr %v", err, tt.wantErr)
return
}
if err == nil {
var bb bytes.Buffer
io.Copy(&bb, got)
if bb.String() != tt.want {
t.Errorf("GetStream() got = %v, want %v", bb.String(), tt.want)
}
}
})
}
}
func Test_provider_ListKeys(t *testing.T) {
tmpDirectory, err := ioutil.TempDir(os.TempDir(), "gospalTest")
if err != nil {
t.Errorf("unable to create temporary directory for tests. err=%v", err.Error())
return
}
file1, err := os.Create(path.Join(tmpDirectory, "file1.txt"))
if err != nil {
t.Errorf("unable to create file1 for tests. err=%v", err.Error())
return
}
file2, err := os.Create(path.Join(tmpDirectory, "file2.txt"))
if err != nil {
t.Errorf("unable to create file2 for tests. err=%v", err.Error())
return
}
file3, err := os.Create(path.Join(tmpDirectory, "file3.txt"))
if err != nil {
t.Errorf("unable to create file3 for tests. err=%v", err.Error())
return
}
err = os.MkdirAll(path.Join(tmpDirectory, "otherpath"), 0700)
if err != nil {
t.Errorf("unable to create otherpath directory for tests. err=%v", err.Error())
return
}
file4, err := os.Create(path.Join(tmpDirectory, "otherpath/file4.txt"))
if err != nil {
t.Errorf("unable to create file3 for tests. err=%v", err.Error())
return
}
file1.Close()
file2.Close()
file3.Close()
file4.Close()
defer os.RemoveAll(tmpDirectory)
type fields struct {
context context.Context
kind string
directory string
noSuchKeyErrorString string
config *gospal.ProviderConfig
}
type args struct {
pathName []string
}
tests := []struct {
name string
fields fields
args args
want []string
wantErr bool
}{
{
name: "Should return all keys in local directory",
fields: fields{
context: context.Background(),
kind: "local",
directory: tmpDirectory,
noSuchKeyErrorString: os.ErrNotExist.Error(),
config: &gospal.ProviderConfig{},
},
args: args{
pathName: nil,
},
want: []string{"/file1.txt", "/file2.txt", "/file3.txt", "/otherpath/file4.txt"},
wantErr: false,
},
{
name: "Should raise when directory does not exists",
fields: fields{
context: context.Background(),
kind: "local",
directory: "/bl/adi/bla",
noSuchKeyErrorString: os.ErrNotExist.Error(),
config: &gospal.ProviderConfig{},
},
args: args{
pathName: nil,
},
want: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &provider{
context: tt.fields.context,
kind: tt.fields.kind,
directory: tt.fields.directory,
noSuchKeyErrorString: tt.fields.noSuchKeyErrorString,
config: tt.fields.config,
}
got, err := p.ListKeys(tt.args.pathName...)
if (err != nil) != tt.wantErr {
t.Errorf("ListKeys() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ListKeys() got = %v, want %v", got, tt.want)
}
})
}
}
func Test_provider_PutStream(t *testing.T) {
type fields struct {
context context.Context
kind string
directory string
noSuchKeyErrorString string
config *gospal.ProviderConfig
}
type args struct {
fileName string
reader io.Reader
}
tests := []struct {
name string
fields fields
args args
want int64
wantContent string
wantErr bool
}{
{
name: "Should put the specified stream",
fields: fields{
context: context.Background(),
kind: "local",
directory: os.TempDir(),
noSuchKeyErrorString: "",
config: &gospal.ProviderConfig{},
},
args: args{
fileName: "bladibla_file1.out",
reader: strings.NewReader(`{"configuration": {"main_color": "#333"}, "screens": []}`),
},
want: 56,
wantContent: `{"configuration": {"main_color": "#333"}, "screens": []}`,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &provider{
context: tt.fields.context,
kind: tt.fields.kind,
directory: tt.fields.directory,
noSuchKeyErrorString: tt.fields.noSuchKeyErrorString,
config: tt.fields.config,
}
got, err := p.PutStream(tt.args.fileName, tt.args.reader)
if (err != nil) != tt.wantErr {
t.Errorf("PutStream() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("PutStream() got = %v, want %v", got, tt.want)
}
if err == nil {
// open file for comparison
data, err := ioutil.ReadFile(path.Join(os.TempDir(), tt.args.fileName))
if err != nil {
t.Errorf("PutStream() unable to open local data file %v. err=%v", path.Join(os.TempDir(), tt.args.fileName), tt.want)
}
if string(data) != tt.wantContent {
t.Errorf("PutStream() content = %v, wantContent %v", string(data), tt.wantContent)
}
}
})
}
}
|
package requirements
import "testing"
type TestStruct struct {
lengths []int
res float64
}
var toTest = []TestStruct{
{
lengths: []int{3, 4, 5},
res: 6.0,
},
}
var panicTest1 = []int{3, 4, 7}
var panicTest2 = []int{-3, 4, 5}
func TestGetTriangleArea(t *testing.T) {
var area float64
for i := range toTest {
area = getTriangleArea(toTest[i].lengths[0],
toTest[i].lengths[1],
toTest[i].lengths[2],
)
if area != toTest[i].res {
t.Errorf("Calculated area %f didn't match expected area %f", area, toTest[i].res)
}
}
}
func TestPanic1(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("The code did not panic")
}
}()
// The following is the code under test
getTriangleArea(panicTest1[0],
panicTest1[1],
panicTest1[2],
)
}
func TestPanic2(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Errorf("The code did not panic")
}
}()
// The following is the code under test
getTriangleArea(panicTest2[0],
panicTest2[1],
panicTest2[2],
)
}
|
package main
import "fmt"
func main() {
// output
// 1
// 2
// 3
i := 1
for i <= 3 {
fmt.Println(i)
i = i + 1 // `i += 1` is also valid
}
// output
// 7
// 8
// 9
for j := 7; j <= 9; j++ {
fmt.Println(j)
}
// output
// loop
for {
fmt.Println("loop")
break
}
// output
// 1
// 3
// 5
for n := 0; n <= 5; n++ {
if n%2 == 0 {
continue
}
fmt.Println(n)
}
}
|
package client
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net"
"net/http"
"strings"
"sync"
"time"
"gorpc/codec"
"gorpc/option"
"gorpc/server"
)
// Call 承载一次 rpc 调用
type Call struct {
Seq uint64 // 请求编号
ServiceMethod string // 格式 <service>.<method>
Args interface{} // 参数
Reply interface{} // 响应
Error error // 错误信息
Done chan *Call // 调用完成标记
}
// done 调用结束时, 调用本方法通知调用方
func (c *Call) done() {
c.Done <- c
}
// Client 代表一个 rpc 客户端
// 可能存在一个客户端关联多个未完成的调用
// 并且被多个goroutine运行的情况
type Client struct {
cc codec.Codec // 消息编解码器, 序列化将要发送出去的请求,以及反序列化接收到的响应
opt *option.Option // rpc的参数, 包含魔数和 codec.Type
sending sync.Mutex // 保证请求的有序发送, 避免出现多个请求报文混淆
header codec.Header // 每个请求的消息头, 只有在请求发送时才需要, 而请求发送是互斥的, 因此每个客户端只需要一个
mu sync.Mutex // 全局互斥锁, 保证操作的完整性
seq uint64 // 给发送的请求编号, 每个请求拥有唯一编号
pending map[uint64]*Call // 存储未处理完的请求, 键是编号, 值是 Call 实例
closing bool // 标识服务器关闭, 由用户主动关闭
shutdown bool // 标识服务器关闭, 一般是错误产生导致的关闭
}
// 保证Client的方法都已实现
var _ io.Closer = (*Client)(nil)
var ErrShutdown = errors.New("连接已经关闭")
// Close 关闭连接
func (c *Client) Close() error {
c.mu.Lock()
defer c.mu.Unlock()
if c.closing {
return ErrShutdown
}
c.closing = true
return c.cc.Close()
}
// IsAvailable 当Client可用时返回true
func (c *Client) IsAvailable() bool {
c.mu.Lock()
defer c.mu.Unlock()
return !c.shutdown && !c.closing
}
// registerCall 注册调用, 并更新 c.seq
func (c *Client) registerCall(call *Call) (uint64, error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.closing || c.shutdown {
return 0, ErrShutdown
}
call.Seq = c.seq
c.pending[call.Seq] = call
c.seq++
return call.Seq, nil
}
// removeCall 根据请求编号移除某个调用并返回该调用
func (c *Client) removeCall(seq uint64) *Call {
c.mu.Lock()
defer c.mu.Unlock()
call := c.pending[seq]
delete(c.pending, seq)
return call
}
// terminateCalls 在服务端或客户端发生错误时调用
// 将 shutdown 设置为 true, 且将错误信息通知所有 pending 状态的 call
func (c *Client) terminateCalls(err error) {
c.sending.Lock()
defer c.sending.Unlock()
c.mu.Lock()
defer c.mu.Unlock()
c.shutdown = true
for _, call := range c.pending {
call.Error = err
call.done()
}
}
// receive 接收响应
// 对应三种情况
// call 不存在, 可能是请求没有发送完整, 或者因为其他原因被取消, 但是服务端仍旧处理了
// call 存在, 但服务端处理出错, 即 h.Error 不为空
// call 存在, 服务端处理正常, 那么需要从 body 中读取 Reply 的值
func (c *Client) receive() {
var err error
// 一直接收直至结束或出错
for err == nil {
var h codec.Header
// 读取请求头
if err = c.cc.ReadHeader(&h); err != nil {
break
}
// 从 pending 队列中移除本次调用
call := c.removeCall(h.Seq)
switch {
case call == nil:
// call 不存在, 可能是请求没有发送完整, 或者因为其他原因被取消, 但是服务端仍旧处理了
err = c.cc.ReadBody(nil)
case h.Error != "":
// call 存在, 但服务端处理出错, 即 h.Error 不为空
call.Error = fmt.Errorf(h.Error)
err = c.cc.ReadBody(nil)
call.done()
default:
if err = c.cc.ReadBody(call.Reply); err != nil {
call.Error = errors.New("读取消息体出错 " + err.Error())
}
call.done()
}
}
// 发生错误, 结束 pending 队列中的剩余调用
c.terminateCalls(err)
}
// send 发送请求
func (c *Client) send(call *Call) {
// 确保完整发送一次请求
c.sending.Lock()
defer c.sending.Unlock()
// 注册调用
seq, err := c.registerCall(call)
if err != nil {
call.Error = err
call.done()
return
}
// 设置请求头
c.header.ServiceMethod = call.ServiceMethod
c.header.Seq = seq
c.header.Error = ""
// 编码 & 发送请求
if err = c.cc.Write(&c.header, call.Args); err != nil {
call = c.removeCall(seq)
// call 为空可能是写入部分失败
if call != nil {
call.Error = err
call.done()
}
}
}
// Go 异步调用 rpc 服务接口, 返回 Call 实例
func (c *Client) Go(ServiceMethod string, args, reply interface{}, done chan *Call) *Call {
switch {
case done == nil:
done = make(chan *Call, 1) // 带缓冲的通道
case cap(done) == 0:
log.Panic("rpc client: done channel 为无缓冲通道")
}
call := &Call{
ServiceMethod: ServiceMethod,
Args: args,
Reply: reply,
Done: done,
}
c.send(call)
return call
}
// Call 同步调用 Go, 阻塞等待发送完成
func (c *Client) Call(ctx context.Context, serviceMethod string, args, reply interface{}) error {
call := c.Go(serviceMethod, args, reply, make(chan *Call, 1))
select {
case <-ctx.Done():
c.removeCall(call.Seq)
return errors.New("rpc client: 调用失败 " + ctx.Err().Error())
case call = <-call.Done:
return call.Error
}
}
// NewClient 创建 rpc 客户端实例
func NewClient(conn net.Conn, opt *option.Option) (*Client, error) {
// 通过 opt.CodecType 获取编解码函数
f := codec.NewCodecFuncMap[opt.CodecType]
if f == nil {
err := fmt.Errorf("非法的codec类型 %s", opt.CodecType)
log.Println("rpc client: codec error ", err)
return nil, err
}
// 对 opt 进行编码
if err := json.NewEncoder(conn).Encode(opt); err != nil {
log.Println("rpc client: option encode error ", err)
_ = conn.Close()
return nil, err
}
return newClientCodec(f(conn), opt), nil
}
func newClientCodec(cc codec.Codec, opt *option.Option) *Client {
client := &Client{
cc: cc,
opt: opt,
seq: 1, // 请求编号从1开始, 0代表非法调用
pending: make(map[uint64]*Call),
}
// 开启子协程接收响应
go client.receive()
return client
}
// parseOptions 简化用户调用, ...*Option 将 Option 设为可选参数
func parseOptions(opts ...*option.Option) (*option.Option, error) {
if len(opts) == 0 || opts[0] == nil {
return option.DefaultOption, nil
}
if len(opts) != 1 {
return nil, errors.New("opts 参数数量不能超过1")
}
opt := opts[0]
opt.MagicNumber = option.DefaultOption.MagicNumber
if opt.CodecType == "" {
opt.CodecType = option.DefaultOption.CodecType
}
return opt, nil
}
type clientResult struct {
client *Client
err error
}
type newClientFunc func(conn net.Conn, opt *option.Option) (client *Client, err error)
func dialTimeout(f newClientFunc, network, address string, opts ...*option.Option) (client *Client, err error) {
opt, err := parseOptions(opts...)
if err != nil {
return nil, err
}
conn, err := net.DialTimeout(network, address, opt.ConnectTimeout)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
_ = conn.Close()
}
}()
ch := make(chan clientResult)
go func() {
client, err = f(conn, opt)
ch <- clientResult{client: client, err: err}
}()
// 默认不设超时限制
if opt.ConnectTimeout == 0 {
result := <-ch
return result.client, result.err
}
select {
case <-time.After(opt.ConnectTimeout):
return nil, fmt.Errorf("rpc client: 连接超时, 超时时间为%s", opt.ConnectTimeout)
case result := <-ch:
return result.client, result.err
}
}
// Dial 通过 network 和 address 连接 rpc 服务器
func Dial(network, address string, opts ...*option.Option) (*Client, error) {
return dialTimeout(NewClient, network, address, opts...)
}
// NewHTTPClient 创建可以接收HTTP协议的客户端
func NewHTTPClient(conn net.Conn, opt *option.Option) (*Client, error) {
_, _ = io.WriteString(conn, fmt.Sprintf("CONNECT %s HTTP/1.0\n\n", server.DefaultRpcPath))
// 接收HTTP成功后, 切换为RPC协议
resp, err := http.ReadResponse(bufio.NewReader(conn), &http.Request{Method: "CONNECT"})
if err == nil && resp.Status == server.CONNECTED {
return NewClient(conn, opt)
}
if err == nil {
err = errors.New("unexpected HTTP response: " + resp.Status)
}
return nil, err
}
// DialHTTP 通过 HTTP 调用
func DialHTTP(network, address string, opts ...*option.Option) (*Client, error) {
return dialTimeout(NewHTTPClient, network, address, opts...)
}
// XDial 根据 rpcAddr 第一个参数选择不同的调用方式
// rpcAddr 是一个通用的格式 (protocol@addr) 表示 rpc 服务端
// 例如 http@10.0.0.1:7001, tcp@10.0.0.1:9999, unix@/tmp/gorpc.sock
func XDial(rpcAddr string, opts ...*option.Option) (*Client, error) {
parts := strings.Split(rpcAddr, "@")
if len(parts) != 2 {
return nil, fmt.Errorf("rpc client: 错误的格式 '%v', 应当为 protocol@addr", rpcAddr)
}
protocol, addr := parts[0], parts[1]
switch protocol {
case "http":
return DialHTTP("tcp", addr, opts...)
default:
// tcp, unix 或其他传输协议
return Dial(protocol, addr, opts...)
}
}
|
package book
import (
"fmt"
"github.com/gorilla/mux"
"net/http"
)
func LoadRoutes(prefix string, router *mux.Router) {
router.HandleFunc(prefix, index)
router.HandleFunc(prefix+ "/{id}", show)
}
func index(writer http.ResponseWriter, request *http.Request) {
fmt.Println("Book index")
}
func show(writer http.ResponseWriter, request *http.Request) {
fmt.Println("Book show")
}
|
package supervisor_test
import (
"fmt"
"net/http"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
// sql drivers
_ "github.com/mattn/go-sqlite3"
"github.com/starkandwayne/shield/db"
. "github.com/starkandwayne/shield/supervisor"
)
var _ = Describe("/v1/stores API", func() {
var API http.Handler
var resyncChan chan int
STORE_REDIS := `66be7c43-6c57-4391-8ea9-e770d6ab5e9e`
STORE_S3 := `05c3d005-f968-452f-bd59-bee8e79ab982`
STORE_S3_PLUS := `e62ecbe7-d0d1-4573-b0f6-1a3c0c95b92f`
NIL := `00000000-0000-0000-0000-000000000000`
databaseEntries := []string{
`INSERT INTO stores (uuid, name, summary, plugin, endpoint) VALUES
("` + STORE_REDIS + `",
"redis-shared",
"Shared Redis services for CF",
"redis",
"<<redis-configuration>>")`,
`INSERT INTO stores (uuid, name, summary, plugin, endpoint) VALUES
("` + STORE_S3 + `",
"s3",
"Amazon S3 Blobstore",
"s3",
"<<s3-configuration>>")`,
`INSERT INTO jobs (uuid, store_uuid, target_uuid, schedule_uuid, retention_uuid) VALUES
("abc-def",
"` + STORE_S3 + `", "` + NIL + `", "` + NIL + `", "` + NIL + `")`,
}
var data *db.DB
BeforeEach(func() {
var err error
data, err = Database(databaseEntries...)
Ω(err).ShouldNot(HaveOccurred())
resyncChan = make(chan int, 1)
})
JustBeforeEach(func() {
API = StoreAPI{
Data: data,
ResyncChan: resyncChan,
}
})
AfterEach(func() {
close(resyncChan)
resyncChan = nil
})
It("should retrieve all stores", func() {
res := GET(API, "/v1/stores")
Ω(res.Body.String()).Should(MatchJSON(`[
{
"uuid" : "` + STORE_REDIS + `",
"name" : "redis-shared",
"summary" : "Shared Redis services for CF",
"plugin" : "redis",
"endpoint" : "<<redis-configuration>>"
},
{
"uuid" : "` + STORE_S3 + `",
"name" : "s3",
"summary" : "Amazon S3 Blobstore",
"plugin" : "s3",
"endpoint" : "<<s3-configuration>>"
}
]`))
Ω(res.Code).Should(Equal(200))
})
It("should retrieve all stores named 'redis'", func() {
res := GET(API, "/v1/stores?name=redis")
Ω(res.Body.String()).Should(MatchJSON(`[
{
"uuid" : "` + STORE_REDIS + `",
"name" : "redis-shared",
"summary" : "Shared Redis services for CF",
"plugin" : "redis",
"endpoint" : "<<redis-configuration>>"
}
]`))
Ω(res.Code).Should(Equal(200))
})
Context("Without exact matching", func() {
It("should retrieve all stores with partially matching names", func() {
res := GET(API, "/v1/stores?name=red")
Ω(res.Body.String()).Should(MatchJSON(`[
{
"uuid" : "` + STORE_REDIS + `",
"name" : "redis-shared",
"summary" : "Shared Redis services for CF",
"plugin" : "redis",
"endpoint" : "<<redis-configuration>>"
}
]`))
Ω(res.Code).Should(Equal(200))
})
})
Context("With exact matching", func() {
BeforeEach(func() {
var err error
data, err = Database(append(databaseEntries,
`INSERT INTO stores (uuid, name, summary, plugin, endpoint) VALUES
("`+STORE_S3_PLUS+`",
"s3 PLUS",
"Amazon S3 Blobstore",
"s3",
"<<s3-configuration>>")`,
)...)
Ω(err).ShouldNot(HaveOccurred())
})
It("should not retrieve stores with only partially matching names", func() {
res := GET(API, "/v1/stores?name=s3&exact=t")
Ω(res.Body.String()).Should(MatchJSON(`[
{
"uuid" : "` + STORE_S3 + `",
"name" : "s3",
"summary" : "Amazon S3 Blobstore",
"plugin" : "s3",
"endpoint" : "<<s3-configuration>>"
}
]`))
Ω(res.Code).Should(Equal(200))
})
It("should not retrieve any stores if none match exactly", func() {
res := GET(API, "/v1/stores?name=red&exact=t")
Ω(res.Body.String()).Should(MatchJSON(`[]`))
Ω(res.Code).Should(Equal(200))
})
})
It("should retrieve only unused stores ?unused=t", func() {
res := GET(API, "/v1/stores?unused=t")
Ω(res.Body.String()).Should(MatchJSON(`[
{
"uuid" : "` + STORE_REDIS + `",
"name" : "redis-shared",
"summary" : "Shared Redis services for CF",
"plugin" : "redis",
"endpoint" : "<<redis-configuration>>"
}
]`))
Ω(res.Code).Should(Equal(200))
})
It("should retrieve only unused stores named s3 for ?unused=t and ?name=s3", func() {
res := GET(API, "/v1/stores?unused=t&name=s3")
Ω(res.Body.String()).Should(MatchJSON(`[]`))
Ω(res.Code).Should(Equal(200))
})
It("should retrieve only used stores for ?unused=f", func() {
res := GET(API, "/v1/stores?unused=f")
Ω(res.Body.String()).Should(MatchJSON(`[
{
"uuid" : "` + STORE_S3 + `",
"name" : "s3",
"summary" : "Amazon S3 Blobstore",
"plugin" : "s3",
"endpoint" : "<<s3-configuration>>"
}
]`))
Ω(res.Code).Should(Equal(200))
})
It("should filter stores by plugin name", func() {
res := GET(API, "/v1/stores?plugin=redis")
Ω(res.Body.String()).Should(MatchJSON(`[
{
"uuid" : "` + STORE_REDIS + `",
"name" : "redis-shared",
"summary" : "Shared Redis services for CF",
"plugin" : "redis",
"endpoint" : "<<redis-configuration>>"
}
]`))
Ω(res.Code).Should(Equal(200))
res = GET(API, "/v1/stores?plugin=s3")
Ω(res.Body.String()).Should(MatchJSON(`[
{
"uuid" : "` + STORE_S3 + `",
"name" : "s3",
"summary" : "Amazon S3 Blobstore",
"plugin" : "s3",
"endpoint" : "<<s3-configuration>>"
}
]`))
Ω(res.Code).Should(Equal(200))
res = GET(API, "/v1/stores?plugin=enoent")
Ω(res.Body.String()).Should(MatchJSON(`[]`))
Ω(res.Code).Should(Equal(200))
})
It("should filter by combinations of `plugin' and `unused' parameters", func() {
res := GET(API, "/v1/stores?plugin=s3&unused=f")
Ω(res.Body.String()).Should(MatchJSON(`[
{
"uuid" : "` + STORE_S3 + `",
"name" : "s3",
"summary" : "Amazon S3 Blobstore",
"plugin" : "s3",
"endpoint" : "<<s3-configuration>>"
}
]`))
Ω(res.Code).Should(Equal(200))
res = GET(API, "/v1/stores?plugin=s3&unused=t")
Ω(res.Body.String()).Should(MatchJSON(`[]`))
Ω(res.Code).Should(Equal(200))
})
It("can retrieve a single store by UUID", func() {
res := GET(API, "/v1/store/"+STORE_S3)
Ω(res.Code).Should(Equal(200))
Ω(res.Body.String()).Should(MatchJSON(`{
"uuid" : "` + STORE_S3 + `",
"name" : "s3",
"summary" : "Amazon S3 Blobstore",
"plugin" : "s3",
"endpoint" : "<<s3-configuration>>"
}`))
})
It("returns a 404 for unknown UUIDs", func() {
res := GET(API, "/v1/store/de33cdc2-2502-457b-97d8-1bed423b85ac")
Ω(res.Code).Should(Equal(404))
})
It("can create new stores", func() {
res := POST(API, "/v1/stores", WithJSON(`{
"name" : "New Store",
"summary" : "A new one",
"plugin" : "s3",
"endpoint" : "[ENDPOINT]"
}`))
Ω(res.Code).Should(Equal(200))
Ω(res.Body.String()).Should(MatchRegexp(`{"ok":"created","uuid":"[a-z0-9-]+"}`))
Eventually(resyncChan).Should(Receive())
})
It("requires the `name', `plugin', and `endpoint' keys to create a new store", func() {
res := POST(API, "/v1/stores", "{}")
Ω(res.Code).Should(Equal(400))
Ω(res.Body.String()).Should(Equal(`{"missing":["name","plugin","endpoint"]}`))
})
It("can update existing store", func() {
res := PUT(API, "/v1/store/"+STORE_REDIS, WithJSON(`{
"name" : "Renamed",
"summary" : "UPDATED!",
"plugin" : "redis",
"endpoint" : "{NEW-ENDPOINT}"
}`))
Ω(res.Code).Should(Equal(200))
Ω(res.Body.String()).Should(MatchJSON(`{"ok":"updated"}`))
Eventually(resyncChan).Should(Receive())
res = GET(API, "/v1/stores")
Ω(res.Body.String()).Should(MatchJSON(`[
{
"uuid" : "` + STORE_REDIS + `",
"name" : "Renamed",
"summary" : "UPDATED!",
"plugin" : "redis",
"endpoint" : "{NEW-ENDPOINT}"
},
{
"uuid" : "` + STORE_S3 + `",
"name" : "s3",
"summary" : "Amazon S3 Blobstore",
"plugin" : "s3",
"endpoint" : "<<s3-configuration>>"
}
]`))
Ω(res.Code).Should(Equal(200))
})
It("requires the `name', `plugin', and `endpoint' keys to update an existing store", func() {
res := PUT(API, "/v1/store/"+STORE_REDIS, "{}")
Ω(res.Code).Should(Equal(400))
Ω(res.Body.String()).Should(Equal(`{"missing":["name","plugin","endpoint"]}`))
})
It("can delete unused stores", func() {
res := DELETE(API, "/v1/store/"+STORE_REDIS)
Ω(res.Code).Should(Equal(200))
Ω(res.Body.String()).Should(MatchJSON(`{"ok":"deleted"}`))
Eventually(resyncChan).Should(Receive())
res = GET(API, "/v1/stores")
Ω(res.Body.String()).Should(MatchJSON(`[
{
"uuid" : "` + STORE_S3 + `",
"name" : "s3",
"summary" : "Amazon S3 Blobstore",
"plugin" : "s3",
"endpoint" : "<<s3-configuration>>"
}
]`))
Ω(res.Code).Should(Equal(200))
})
It("refuses to delete a store that is in use", func() {
res := DELETE(API, "/v1/store/"+STORE_S3)
Ω(res.Code).Should(Equal(403))
Ω(res.Body.String()).Should(Equal(""))
})
It("validates JSON payloads", func() {
JSONValidated(API, "POST", "/v1/stores")
JSONValidated(API, "PUT", "/v1/store/"+STORE_S3)
})
It("ignores other HTTP methods", func() {
for _, method := range []string{"PUT", "DELETE", "PATCH", "OPTIONS", "TRACE"} {
NotImplemented(API, method, "/v1/stores", nil)
}
for _, method := range []string{"GET", "HEAD", "POST", "PATCH", "OPTIONS", "TRACE"} {
NotImplemented(API, method, "/v1/stores/sub/requests", nil)
NotImplemented(API, method, "/v1/store/sub/requests", nil)
}
})
It("ignores malformed UUIDs", func() {
for _, id := range []string{"malformed-uuid-01234", "", "(abcdef-01234-56-789)"} {
NotImplemented(API, "GET", fmt.Sprintf("/v1/store/%s", id), nil)
NotImplemented(API, "PUT", fmt.Sprintf("/v1/store/%s", id), nil)
}
})
})
|
package main
import (
"net/http"
"os"
api "github.com/Financial-Times/api-endpoint"
"github.com/Financial-Times/http-handlers-go/httphandlers"
"github.com/Financial-Times/photo-tron/annotations"
"github.com/Financial-Times/photo-tron/fotoware"
"github.com/Financial-Times/photo-tron/health"
"github.com/Financial-Times/photo-tron/suggest"
status "github.com/Financial-Times/service-status-go/httphandlers"
"github.com/husobee/vestigo"
"github.com/jawher/mow.cli"
"github.com/rcrowley/go-metrics"
log "github.com/sirupsen/logrus"
)
const appDescription = "PAC Draft Annotations API"
func main() {
app := cli.App("photo-tron", appDescription)
appSystemCode := app.String(cli.StringOpt{
Name: "app-system-code",
Value: "photo-tron",
Desc: "System Code of the application",
EnvVar: "APP_SYSTEM_CODE",
})
appName := app.String(cli.StringOpt{
Name: "app-name",
Value: "photo-tron",
Desc: "Application name",
EnvVar: "APP_NAME",
})
port := app.String(cli.StringOpt{
Name: "port",
Value: "8080",
Desc: "Port to listen on",
EnvVar: "APP_PORT",
})
annotationsEndpoint := app.String(cli.StringOpt{
Name: "annotations-endpoint",
Value: "http://test.api.ft.com/content/%v/annotations",
Desc: "Endpoint to get annotations from UPP",
EnvVar: "ANNOTATIONS_ENDPOINT",
})
uppAPIKey := app.String(cli.StringOpt{
Name: "upp-api-key",
Value: "",
Desc: "API key to access UPP",
EnvVar: "UPP_APIKEY",
})
apiYml := app.String(cli.StringOpt{
Name: "api-yml",
Value: "./api.yml",
Desc: "Location of the API Swagger YML file.",
EnvVar: "API_YML",
})
fotowareAPIKey := app.String(cli.StringOpt{
Name: "fotoware-api-key",
Value: "",
Desc: "",
EnvVar: "FW_APIKEY",
})
suggestAPIKey := app.String(cli.StringOpt{
Name: "upp-api-key",
Value: "",
Desc: "API key to access UPP",
EnvVar: "SUGGEST_APIKEY",
})
log.SetLevel(log.InfoLevel)
log.Infof("[Startup] %v is starting", *appSystemCode)
app.Action = func() {
log.Infof("System code: %s, App Name: %s, Port: %s", *appSystemCode, *appName, *port)
fwAPI := fotoware.NewFotowareAPI(*fotowareAPIKey)
suggestAPI := suggest.NewSuggestAPI(*suggestAPIKey)
annotationsAPI := annotations.NewAnnotationsAPI(*annotationsEndpoint, *uppAPIKey)
annotationsHandler := annotations.NewHandler(annotationsAPI, fwAPI)
suggestHandler := annotations.NewSuggestHandler(suggestAPI, fwAPI)
healthService := health.NewHealthService(*appSystemCode, *appName, appDescription, annotationsAPI)
serveEndpoints(*port, apiYml, annotationsHandler, suggestHandler, healthService)
}
err := app.Run(os.Args)
if err != nil {
log.Errorf("App could not start, error=[%s]\n", err)
return
}
}
func serveEndpoints(port string, apiYml *string, handler *annotations.Handler, suggestHandler *annotations.SuggestHandler, healthService *health.HealthService) {
r := vestigo.NewRouter()
r.Get("/photos-by-uuid/:uuid", handler.ServeHTTP)
r.Post("/photos-by-text", suggestHandler.SuggestServeHTTP)
var monitoringRouter http.Handler = r
monitoringRouter = httphandlers.TransactionAwareRequestLoggingHandler(log.StandardLogger(), monitoringRouter)
monitoringRouter = httphandlers.HTTPMetricsHandler(metrics.DefaultRegistry, monitoringRouter)
http.HandleFunc("/__health", healthService.HealthCheckHandleFunc())
http.HandleFunc(status.GTGPath, status.NewGoodToGoHandler(healthService.GTG))
http.HandleFunc(status.BuildInfoPath, status.BuildInfoHandler)
http.Handle("/", monitoringRouter)
if apiYml != nil {
apiEndpoint, err := api.NewAPIEndpointForFile(*apiYml)
if err != nil {
log.WithError(err).WithField("file", *apiYml).Warn("Failed to serve the API Endpoint for this service. Please validate the Swagger YML and the file location")
} else {
r.Get(api.DefaultPath, apiEndpoint.ServeHTTP)
}
}
if err := http.ListenAndServe(":"+port, nil); err != nil {
log.Fatalf("Unable to start: %v", err)
}
}
|
// Copyright (c) KwanJunWen
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
package estemplate
import "fmt"
// DatatypeInteger Core Datatype for numeric value.
// A signed 32-bit integer with a minimum value of -2³¹ and a maximum value of 2³¹-1.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/number.html
// for details.
type DatatypeInteger struct {
Datatype
name string
copyTo []string
// fields specific to integer datatype
coerce *bool
boost *float32
docValues *bool
ignoreMalformed *bool
index *bool
nullValue *int
store *bool
}
// NewDatatypeInteger initializes a new DatatypeInteger.
func NewDatatypeInteger(name string) *DatatypeInteger {
return &DatatypeInteger{
name: name,
}
}
// Name returns field key for the Datatype.
func (i *DatatypeInteger) Name() string {
return i.name
}
// CopyTo sets the field(s) to copy to which allows the values of multiple fields to be
// queried as a single field.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/copy-to.html
// for details.
func (i *DatatypeInteger) CopyTo(copyTo ...string) *DatatypeInteger {
i.copyTo = append(i.copyTo, copyTo...)
return i
}
// Coerce sets whether if the field should be coerced, attempting to clean up
// dirty values to fit the datatype. Defaults to true.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/coerce.html
// for details.
func (i *DatatypeInteger) Coerce(coerce bool) *DatatypeInteger {
i.coerce = &coerce
return i
}
// Boost sets Mapping field-level query time boosting. Defaults to 1.0.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/mapping-boost.html
// for details.
func (i *DatatypeInteger) Boost(boost float32) *DatatypeInteger {
i.boost = &boost
return i
}
// DocValues sets whether if the field should be stored on disk in a column-stride fashion
// so that it can later be used for sorting, aggregations, or scripting.
// Defaults to true.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/doc-values.html
// for details.
func (i *DatatypeInteger) DocValues(docValues bool) *DatatypeInteger {
i.docValues = &docValues
return i
}
// IgnoreMalformed sets whether if the field should ignore malformed numbers.
// Defaults to false.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/ignore-malformed.html
// for details.
func (i *DatatypeInteger) IgnoreMalformed(ignoreMalformed bool) *DatatypeInteger {
i.ignoreMalformed = &ignoreMalformed
return i
}
// Index sets whether if the field should be searchable. Defaults to true.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/mapping-index.html
// for details.
func (i *DatatypeInteger) Index(index bool) *DatatypeInteger {
i.index = &index
return i
}
// NullValue sets a numeric value which is substituted for any explicit null values.
// Defaults to null.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/null-value.html
// for details.
func (i *DatatypeInteger) NullValue(nullValue int) *DatatypeInteger {
i.nullValue = &nullValue
return i
}
// Store sets whether if the field value should be stored and retrievable separately
// from the `_source` field. Defaults to false.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/mapping-store.html
// for details.
func (i *DatatypeInteger) Store(store bool) *DatatypeInteger {
i.store = &store
return i
}
// Validate validates DatatypeInteger.
func (i *DatatypeInteger) Validate(includeName bool) error {
var invalid []string
if includeName && i.name == "" {
invalid = append(invalid, "Name")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Source returns the serializable JSON for the source builder.
func (i *DatatypeInteger) Source(includeName bool) (interface{}, error) {
// {
// "test": {
// "type": "integer",
// "copy_to": ["field_1", "field_2"],
// "coerce": true,
// "boost": 2,
// "doc_values": true,
// "ignore_malformed": true,
// "index": true,
// "null_value": 0,
// "store": true
// }
// }
options := make(map[string]interface{})
options["type"] = "integer"
if len(i.copyTo) > 0 {
var copyTo interface{}
switch {
case len(i.copyTo) > 1:
copyTo = i.copyTo
break
case len(i.copyTo) == 1:
copyTo = i.copyTo[0]
break
default:
copyTo = ""
}
options["copy_to"] = copyTo
}
if i.coerce != nil {
options["coerce"] = i.coerce
}
if i.boost != nil {
options["boost"] = i.boost
}
if i.docValues != nil {
options["doc_values"] = i.docValues
}
if i.ignoreMalformed != nil {
options["ignore_malformed"] = i.ignoreMalformed
}
if i.index != nil {
options["index"] = i.index
}
if i.nullValue != nil {
options["null_value"] = i.nullValue
}
if i.store != nil {
options["store"] = i.store
}
if !includeName {
return options, nil
}
source := make(map[string]interface{})
source[i.name] = options
return source, nil
}
|
// https://leetcode.com/problems/leaf-similar-trees/
package leetcode_go
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func leafSimilar(root1 *TreeNode, root2 *TreeNode) bool {
l1, l2 := []int{}, []int{}
helperP872(root1, &l1)
helperP872(root2, &l2)
if len(l1) != len(l2) {
return false
}
for i := 0; i < len(l1); i++ {
if l1[i] != l2[i] {
return false
}
}
return true
}
func helperP872(root *TreeNode, leafSeq *[]int) {
if root.Left == nil && root.Right == nil {
*leafSeq = append(*leafSeq, root.Val)
return
}
if root.Left != nil {
helperP872(root.Left, leafSeq)
}
if root.Right != nil {
helperP872(root.Right, leafSeq)
}
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package vkb contains shared code to interact with the virtual keyboard.
package vkb
import (
"context"
"fmt"
"regexp"
"strconv"
"strings"
"time"
"github.com/mafredri/cdp/protocol/target"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/internal/driver"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/mouse"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/chrome/uiauto/touch"
"chromiumos/tast/local/chrome/useractions"
"chromiumos/tast/local/chrome/webutil"
"chromiumos/tast/local/coords"
"chromiumos/tast/testing"
)
// VirtualKeyboardContext represents a context of virtual keyboard.
type VirtualKeyboardContext struct {
ui *uiauto.Context
tconn *chrome.TestConn
cr *chrome.Chrome
}
// NewContext creates a new context of virtual keyboard.
func NewContext(cr *chrome.Chrome, tconn *chrome.TestConn) *VirtualKeyboardContext {
return &VirtualKeyboardContext{
ui: uiauto.New(tconn),
tconn: tconn,
cr: cr,
}
}
// localStorageKey defines the key used in virtual keyboard local storage.
type localStorageKey string
const (
// voicePrivacyInfo key is defined in http://google3/i18n/input/javascript/chos/message/name.ts.
voicePrivacyInfo localStorageKey = "voice_privacy_info"
// showLongformEdu key is defined in http://google3/i18n/input/javascript/chos/ui/widget/longform_dialog_view.ts.
showLongformEdu localStorageKey = "shownLongformEdu"
)
// Finder of virtual keyboard root node.
var vkRootFinder = nodewith.Role(role.RootWebArea).Name("Chrome OS Virtual Keyboard")
// NodeFinder returns a finder of node on virtual keyboard.
// It finds nodes with `offscreen:false` property to avoid
// finding cached offscreen nodes.
var NodeFinder = nodewith.Ancestor(vkRootFinder).Onscreen().Visible()
// DragPointFinder returns the finder of the float VK drag button.
var DragPointFinder = NodeFinder.Role(role.Button).NameContaining("drag to reposition the keyboard")
// KeyFinder returns a finder of keys on virtual keyboard.
var KeyFinder = NodeFinder.Role(role.Button)
// MultipasteItemFinder returns a finder of multipaste item on virtual keyboard.
var MultipasteItemFinder = NodeFinder.HasClass("scrim")
// MultipasteSuggestionFinder returns a finder of multipaste suggestion on virtual keyboard header bar.
var MultipasteSuggestionFinder = NodeFinder.HasClass("chip")
// MultipasteTrashFinder returns a finder for the multipaste delete button.
var MultipasteTrashFinder = NodeFinder.ClassNameRegex(regexp.MustCompile("trash-button|skv-Bin"))
// KeyByNameIgnoringCase returns a virtual keyboard Key button finder with the name ignoring case.
func KeyByNameIgnoringCase(keyName string) *nodewith.Finder {
return KeyFinder.NameRegex(regexp.MustCompile(`(?i)^` + regexp.QuoteMeta(keyName) + `$`))
}
// UIConn returns a connection to the virtual keyboard HTML page,
// where JavaScript can be executed to simulate interactions with the UI.
// The connection is lazily created, and this function will block until the
// extension is loaded or ctx's deadline is reached. The caller should close
// the returned connection.
func (vkbCtx *VirtualKeyboardContext) UIConn(ctx context.Context) (*chrome.Conn, error) {
const extURLPrefix = "chrome-extension://jkghodnilhceideoidjikpgommlajknk/inputview.html"
f := func(t *target.Info) bool { return strings.HasPrefix(t.URL, extURLPrefix) }
return vkbCtx.cr.NewConnForTarget(ctx, f)
}
// BackgroundConn returns a connection to the virtual keyboard background page,
// where JavaScript can be executed to simulate interactions with IME.
func (vkbCtx *VirtualKeyboardContext) BackgroundConn(ctx context.Context) (*chrome.Conn, error) {
const bgPageURLPrefix = "chrome-extension://jkghodnilhceideoidjikpgommlajknk/background"
bgTargetFilter := func(t *driver.Target) bool {
return strings.HasPrefix(t.URL, bgPageURLPrefix)
}
// Background target from login persists for a few seconds, causing 2 background targets.
// Polling until connected to the unique target.
var bconn *chrome.Conn
if err := testing.Poll(ctx, func(ctx context.Context) error {
var err error
bconn, err = vkbCtx.cr.NewConnForTarget(ctx, bgTargetFilter)
return err
}, &testing.PollOptions{Timeout: 60 * time.Second, Interval: 3 * time.Second}); err != nil {
return nil, errors.Wrap(err, "failed to wait for unique virtual keyboard background target")
}
return bconn, nil
}
// ShowVirtualKeyboard returns an action forcing the virtual keyboard show up via Chrome API.
// It is not recommended to use on testing a real user input through the virtual keyboard.
// Virtual keyboard should be normally triggered by focusing an input field.
// Usage: It can be used to test Layout and UI interaction in a quick way.
// For example, testing switch layout.
func (vkbCtx *VirtualKeyboardContext) ShowVirtualKeyboard() uiauto.Action {
return uiauto.RetrySilently(3,
uiauto.Combine("force show virtual keyboard via Chrome API",
func(ctx context.Context) error {
return vkbCtx.tconn.Eval(ctx, `tast.promisify(chrome.inputMethodPrivate.showInputView)()`, nil)
},
vkbCtx.WaitLocationStable()))
}
// HideVirtualKeyboard returns an action forcing the virtual keyboard to be hidden via Chrome API.
// It is not recommended to use on testing a real user input through the virtual keyboard.
// Virtual keyboard should be normally triggered by defocusing an input field.
// Usage: It can be used in test cleanup.
func (vkbCtx *VirtualKeyboardContext) HideVirtualKeyboard() uiauto.Action {
return uiauto.RetrySilently(3,
uiauto.Combine("force hide virtual keyboard via Chrome API",
func(ctx context.Context) error {
return vkbCtx.tconn.Eval(ctx, `tast.promisify(chrome.inputMethodPrivate.hideInputView)()`, nil)
},
vkbCtx.WaitUntilHidden()))
}
// IsShown immediately checks whether the virtual keyboard is shown.
// TODO (b/182408845) re-implement the function in case an error happens.
func (vkbCtx *VirtualKeyboardContext) IsShown(ctx context.Context) (bool, error) {
return vkbCtx.ui.IsNodeFound(ctx, vkRootFinder)
}
// IsKeyShown immediately checks whether the given key is shown.
// TODO (b/182408845) re-implement the function in case an error happens.
func (vkbCtx *VirtualKeyboardContext) IsKeyShown(ctx context.Context, keyName string) (bool, error) {
return vkbCtx.ui.IsNodeFound(ctx, KeyFinder.Name(keyName))
}
// WaitLocationStable returns an action
// waiting for the virtual keyboard to appear and stable.
func (vkbCtx *VirtualKeyboardContext) WaitLocationStable() uiauto.Action {
return vkbCtx.ui.WithTimeout(5 * time.Second).WaitForLocation(vkRootFinder)
}
// Location returns stable location of the virtual keyboard.
func (vkbCtx *VirtualKeyboardContext) Location(ctx context.Context) (*coords.Rect, error) {
return vkbCtx.ui.Location(ctx, vkRootFinder)
}
// WaitUntilHidden returns an action waiting for the virtual keyboard to hide.
// It waits until the node is gone from a11y tree.
func (vkbCtx *VirtualKeyboardContext) WaitUntilHidden() uiauto.Action {
return vkbCtx.ui.WithTimeout(3 * time.Second).WaitUntilGone(vkRootFinder)
}
// TapKey returns an action simulating a mouse click event on the middle of the specified key via a touch event.
// The key name is case sensitive. It can be any letter of the alphabet, "space" or "backspace".
func (vkbCtx *VirtualKeyboardContext) TapKey(keyName string) uiauto.Action {
return vkbCtx.tapKeyFunc(keyName, false)
}
// TapKeyIgnoringCase returns an action simulating a mouse click event on the middle of the specified key via a touch event.
// The key name can either be case sensitive or not. It can be any letter of the alphabet, "space" or "backspace".
func (vkbCtx *VirtualKeyboardContext) TapKeyIgnoringCase(keyName string) uiauto.Action {
return vkbCtx.tapKeyFunc(keyName, true)
}
func (vkbCtx *VirtualKeyboardContext) tapKeyFunc(keyName string, ignoreCase bool) uiauto.Action {
keyFinder := KeyFinder.Name(keyName)
if ignoreCase {
keyFinder = KeyByNameIgnoringCase(keyName)
}
return vkbCtx.TapNode(keyFinder)
}
// TapNode returns an action to tap on a node.
// In most cases, TapKey should be primary function for tapping key.
// This function should only be used when a node can not be unique identified by Name.
// TODO(b/196273235): Refactor vkb.TapKey function to distinguish keyboard, suggestion bar, node.
func (vkbCtx *VirtualKeyboardContext) TapNode(finder *nodewith.Finder) uiauto.Action {
// Note: Must use mouse Move + Press + Sleep + Release here instead of Click.
// Mouse click is simulated by calling Chrome private api `chrome.autotestPrivate.mouseClick`.
// It works for most cases except virtual keyboard.
// In vkb extension, it listens to keyPress to send vk layout event to decoder
// before sending the actual key tap event.
// Mouse click is too quick and causes a racing issue that decoder receives tap key without layout info.
return uiauto.Combine("move mouse to node center point and click",
vkbCtx.ui.MouseMoveTo(finder, 10*time.Millisecond),
mouse.Press(vkbCtx.tconn, mouse.LeftButton),
uiauto.Sleep(50*time.Millisecond),
mouse.Release(vkbCtx.tconn, mouse.LeftButton),
)
}
// DoubleTapNode returns an action to double tap on a node.
// Note: DoubleTapNode cannot be replaced by calling TapNode twice.
// vkbCtx.ui.MouseMoveTo function waits for the node location to be stable.
// It can take ~500ms and causing long sleep between 2 clicks.
func (vkbCtx *VirtualKeyboardContext) DoubleTapNode(finder *nodewith.Finder) uiauto.Action {
return uiauto.Combine("move mouse to node center point and click",
vkbCtx.ui.MouseMoveTo(finder, 10*time.Millisecond),
mouse.Press(vkbCtx.tconn, mouse.LeftButton),
uiauto.Sleep(50*time.Millisecond),
mouse.Release(vkbCtx.tconn, mouse.LeftButton),
uiauto.Sleep(50*time.Millisecond),
mouse.Press(vkbCtx.tconn, mouse.LeftButton),
uiauto.Sleep(50*time.Millisecond),
mouse.Release(vkbCtx.tconn, mouse.LeftButton),
)
}
// TapKeys return an action simulating tap events in the middle of the specified sequence of keys via touch event.
// Each key can be any letter of the alphabet, "space" or "backspace".
// Keys are case sensitive.
func (vkbCtx *VirtualKeyboardContext) TapKeys(keys []string) uiauto.Action {
return vkbCtx.tapKeysFunc(keys, false)
}
// TapKeysIgnoringCase return an action simulating tap events in the middle of the specified sequence of keys via touch event.
// Each key can be any letter of the alphabet, "space" or "backspace".
// Keys are case insensitive.
func (vkbCtx *VirtualKeyboardContext) TapKeysIgnoringCase(keys []string) uiauto.Action {
return vkbCtx.tapKeysFunc(keys, true)
}
func (vkbCtx *VirtualKeyboardContext) tapKeysFunc(keys []string, ignoreCase bool) uiauto.Action {
return uiauto.NamedAction(
fmt.Sprintf("vkbCtx.TapKeys(keys []string) with keys=%v", keys),
func(ctx context.Context) error {
for _, key := range keys {
if err := vkbCtx.tapKeyFunc(key, ignoreCase)(ctx); err != nil {
return err
}
if err := testing.Sleep(ctx, 200*time.Millisecond); err != nil {
return errors.New("failed to sleep between taping keys")
}
}
return nil
})
}
// TapKeyJS returns an action simulating a tap event on the middle of the specified key via javascript. The key can
// be any letter of the alphabet, "space" or "backspace".
func (vkbCtx *VirtualKeyboardContext) TapKeyJS(key string) uiauto.Action {
return func(ctx context.Context) error {
kconn, err := vkbCtx.UIConn(ctx)
if err != nil {
return err
}
defer kconn.Close()
return kconn.Call(ctx, nil, `(key) => {
// Multiple keys can have the same aria label but only one is visible.
const keys = document.querySelectorAll('[aria-label=' + key + ']')
if (!keys) {
throw new Error('Key ' + key + ' not found. No element with aria-label ' + key +'.');
}
for (const key of keys) {
const rect = key.getBoundingClientRect();
if (rect.width <= 0 || rect.height <= 0) {
continue;
}
const e = new Event('pointerdown');
e.clientX = rect.x + rect.width / 2;
e.clientY = rect.y + rect.height / 2;
key.dispatchEvent(e);
key.dispatchEvent(new Event('pointerup'));
return;
}
throw new Error('Key ' + key + ' not clickable. Found elements with aria-label ' + key + ', but they were not visible.');
}`, key)
}
}
// TapKeysJS returns an action simulating tap events on the middle of the specified sequence of keys via javascript.
// Each keys can be any letter of the alphabet, "space" or "backspace".
func (vkbCtx *VirtualKeyboardContext) TapKeysJS(keys []string) uiauto.Action {
return uiauto.NamedAction(
fmt.Sprintf("vkbCtx.TapKeysJS(keys []string) with keys=%v", keys),
func(ctx context.Context) error {
for _, key := range keys {
if err := vkbCtx.TapKeyJS(key)(ctx); err != nil {
return err
}
testing.Sleep(ctx, 100*time.Millisecond)
}
return nil
})
}
// ShowAccessPoints returns an action showing the access points panel.
func (vkbCtx *VirtualKeyboardContext) ShowAccessPoints() uiauto.Action {
return func(ctx context.Context) error {
if err := vkbCtx.ui.WaitForLocation(NodeFinder.HasClass("keyboard"))(ctx); err != nil {
return err
}
if err := vkbCtx.ui.WithTimeout(time.Second).WaitUntilExists(KeyFinder.Name("Hide access points"))(ctx); err == nil {
// "err == nil" means the access points panel is shown.
return nil
}
return vkbCtx.ui.LeftClick(KeyFinder.Name("Show access points"))(ctx)
}
}
// SetFloatingMode returns an action changing the virtual keyboard to floating/dock layout.
func (vkbCtx *VirtualKeyboardContext) SetFloatingMode(uc *useractions.UserContext, enabled bool) uiauto.Action {
var switchMode uiauto.Action
var actionName string
if enabled {
actionName = "Switch VK to floating mode"
flipButtonFinder := KeyFinder.Name("make virtual keyboard movable")
switchMode = uiauto.IfSuccessThen(
vkbCtx.ui.WithTimeout(5*time.Second).WaitUntilExists(flipButtonFinder),
// Switching to float VK is lagging (b/223081262).
// Using long interval to check VK locationed.
vkbCtx.ui.LeftClickUntil(flipButtonFinder,
vkbCtx.ui.WithTimeout(10*time.Second).WithInterval(2*time.Second).WaitForLocation(DragPointFinder),
),
)
} else {
actionName = "Switch VK to dock mode"
flipButtonFinder := KeyFinder.Name("dock virtual keyboard")
switchMode = uiauto.IfSuccessThen(
vkbCtx.ui.WithTimeout(5*time.Second).WaitUntilExists(flipButtonFinder),
vkbCtx.ui.LeftClickUntil(flipButtonFinder, vkbCtx.ui.WithTimeout(10*time.Second).WaitUntilGone(DragPointFinder)),
)
}
return uiauto.UserAction(
actionName,
uiauto.Combine("switch VK mode",
vkbCtx.ShowAccessPoints(),
switchMode,
vkbCtx.WaitLocationStable(),
),
uc,
&useractions.UserActionCfg{
Attributes: map[string]string{
useractions.AttributeFeature: useractions.FeatureFloatVK,
},
Tags: []useractions.ActionTag{
useractions.ActionTagEssentialInputs,
},
Callback: func(ctx context.Context, actionError error) error {
if actionError == nil {
uc.SetAttribute(useractions.AttributeFloatVK, strconv.FormatBool(enabled))
}
return nil
},
},
)
}
// TapKeyboardLayout returns an action clicking keyboard layout to switch.
// The key name is 'Back' in A11y tree.
func (vkbCtx *VirtualKeyboardContext) TapKeyboardLayout() uiauto.Action {
return vkbCtx.ui.LeftClick(KeyFinder.Name("Back"))
}
// TapAccessPoints returns an action clicking access points button to switch the suggestion bar to layout icons.
func (vkbCtx *VirtualKeyboardContext) TapAccessPoints() uiauto.Action {
return vkbCtx.ui.LeftClick(KeyFinder.Name("Show access points"))
}
// WaitForKeysExist returns an action waiting for a list of keys to appear on virtual keyboard.
// Note: Should not use FindKeyNode in a loop to implement this function, because it waits for each key within a timeout.
func (vkbCtx *VirtualKeyboardContext) WaitForKeysExist(keys []string) uiauto.Action {
return func(ctx context.Context) error {
return testing.Poll(ctx, func(ctx context.Context) error {
var notFoundKeys []string
for _, key := range keys {
keyShown, err := vkbCtx.IsKeyShown(ctx, key)
if err != nil {
return err
}
if !keyShown {
notFoundKeys = append(notFoundKeys, key)
}
}
if len(notFoundKeys) > 0 {
return errors.Errorf("these keys are not found: %v", notFoundKeys)
}
return nil
}, &testing.PollOptions{Interval: 1 * time.Second, Timeout: 15 * time.Second})
}
}
// GetSuggestions returns suggestions that are currently displayed by the
// virtual keyboard.
func (vkbCtx *VirtualKeyboardContext) GetSuggestions(ctx context.Context) ([]string, error) {
var suggestions []string
kconn, err := vkbCtx.UIConn(ctx)
if err != nil {
return suggestions, err
}
defer kconn.Close()
err = kconn.Eval(ctx, `
(() => {
const elems = document.querySelectorAll('.candidate-span');
return Array.prototype.map.call(elems, x => x.textContent);
})()
`, &suggestions)
return suggestions, err
}
// WaitForDecoderEnabled returns an action waiting for decoder to be enabled or disabled.
func (vkbCtx *VirtualKeyboardContext) WaitForDecoderEnabled(enabled bool) uiauto.Action {
// TODO(b/157686038) A better solution to identify decoder status.
// Decoder works async in returning status to frontend IME and self loading.
// Using sleep temporarily before a reliable evaluation api provided in cl/339837443.
return func(ctx context.Context) error {
return testing.Sleep(ctx, 10*time.Second)
}
}
// closeInfoDialogue closes a information dialogue if it exists in a handwriting canvas.
func (vkbCtx *VirtualKeyboardContext) closeInfoDialogue(buttonName string) uiauto.Action {
dialogueCloseButton := KeyFinder.Name(buttonName)
// Close the information dialogue if it shows.
return uiauto.IfSuccessThen(
vkbCtx.ui.WithTimeout(time.Second).WaitUntilExists(dialogueCloseButton),
vkbCtx.ui.LeftClickUntil(dialogueCloseButton, vkbCtx.ui.WithTimeout(500*time.Millisecond).WaitUntilGone(dialogueCloseButton)))
}
// ClickUntilVKShown returns an action retrying left clicks the node until the vk is shown with no error.
// This is useful for situations where there is no indication of whether the node is ready to receive clicks.
// The interval between clicks and the timeout can be specified using testing.PollOptions.
func (vkbCtx *VirtualKeyboardContext) ClickUntilVKShown(nodeFinder *nodewith.Finder) uiauto.Action {
ac := vkbCtx.ui.WithPollOpts(testing.PollOptions{Interval: 2 * time.Second, Timeout: 10 * time.Second})
return uiauto.RetrySilently(5, ac.LeftClickUntil(nodeFinder, vkbCtx.WaitLocationStable()))
}
// SwitchToKeyboard returns an action changing to keyboard layout.
// TODO(b/195366402): Use test api for switching to keyboard/handwriting mode for VK.
func (vkbCtx *VirtualKeyboardContext) SwitchToKeyboard() uiauto.Action {
showAccessPointsBtn := KeyFinder.Name("Show access points")
return uiauto.Combine("switch back to keyboard",
uiauto.IfSuccessThen(
vkbCtx.ui.WithTimeout(500*time.Millisecond).WaitUntilExists(showAccessPointsBtn),
vkbCtx.ui.LeftClick(showAccessPointsBtn),
),
vkbCtx.ui.LeftClick(KeyFinder.Name("Back")),
)
}
// SwitchToVoiceInput returns an action changing virtual keyboard to voice input layout.
func (vkbCtx *VirtualKeyboardContext) SwitchToVoiceInput() uiauto.Action {
// Call background API to switch.
callSwitchAPI := func(ctx context.Context) error {
bconn, err := vkbCtx.BackgroundConn(ctx)
if err != nil {
return err
}
if err := bconn.Call(ctx, nil, `(info) => {
window.localStorage.setItem(info, 'true');
background.getTestOnlyApi().switchToVoiceInput();
}`, voicePrivacyInfo); err != nil {
return errors.Wrap(err, "failed to call switchToVoiceInput()")
}
return nil
}
// This node indicates if the voice input is active.
voiceActiveNode := NodeFinder.HasClass("voice-mic-img")
return uiauto.Retry(3, uiauto.Combine("tap voice button and close privacy dialogue",
callSwitchAPI,
vkbCtx.ui.WithTimeout(5*time.Second).WaitUntilExists(voiceActiveNode),
))
}
// SwitchToHandwriting changes to handwriting layout and returns a handwriting context.
func (vkbCtx *VirtualKeyboardContext) SwitchToHandwriting(ctx context.Context) (*HandwritingContext, error) {
// Set local storage to override the LF first time tutorial prompt.
// It does not apply to legacy handwriting.
bconn, err := vkbCtx.BackgroundConn(ctx)
if err != nil {
return nil, err
}
if err := bconn.Call(ctx, nil, `(info) => {
window.localStorage.setItem(info, 'true');
}`, showLongformEdu); err != nil {
return nil, errors.Wrap(err, "failed to set local storage")
}
if err := vkbCtx.leftClickIfExist(KeyFinder.NameRegex(regexp.MustCompile("(switch to handwriting.*)|(handwriting)")))(ctx); err != nil {
return nil, err
}
if err := vkbCtx.ui.WaitUntilExists(NodeFinder.Role(role.Canvas))(ctx); err != nil {
return nil, err
}
return vkbCtx.NewHandwritingContext(ctx)
}
// SwitchToSymbolNumberLayout returns an action changing to symbol number layout.
func (vkbCtx *VirtualKeyboardContext) SwitchToSymbolNumberLayout() uiauto.Action {
return vkbCtx.TapKey("switch to symbols")
}
// SwitchToMultipaste returns an action changing to multipaste layout.
func (vkbCtx *VirtualKeyboardContext) SwitchToMultipaste() uiauto.Action {
return uiauto.Combine("switch to multipaste keyboard",
vkbCtx.ShowAccessPoints(),
vkbCtx.ui.LeftClick(KeyFinder.Name("Multipaste clipboard")),
)
}
// TapMultipasteItem returns an action tapping the item corresponding to itemName in multipaste virtual keyboard.
func (vkbCtx *VirtualKeyboardContext) TapMultipasteItem(itemName string) uiauto.Action {
return vkbCtx.ui.LeftClick(MultipasteItemFinder.Name(itemName))
}
// DeleteMultipasteItem returns an action selecting a multipaste item via longpress and deleting it.
func (vkbCtx *VirtualKeyboardContext) DeleteMultipasteItem(touchCtx *touch.Context, itemName string) uiauto.Action {
itemFinder := MultipasteItemFinder.Name(itemName)
return uiauto.Combine("Delete item in multipaste virtual keyboard",
touchCtx.LongPress(itemFinder),
touchCtx.Tap(MultipasteTrashFinder),
vkbCtx.ui.WithTimeout(3*time.Second).WaitUntilGone(itemFinder))
}
// TapMultipasteSuggestion returns an action tapping the item corresponding to itemName in multipaste suggestion bar.
func (vkbCtx *VirtualKeyboardContext) TapMultipasteSuggestion(itemName string) uiauto.Action {
return vkbCtx.ui.LeftClick(MultipasteSuggestionFinder.Name(itemName))
}
// EnableA11yVirtualKeyboard returns an action enabling or disabling
// accessibility mode of the virtual keyboard.
// When disabled, the tablet non-a11y virtual keyboard will be used.
func (vkbCtx *VirtualKeyboardContext) EnableA11yVirtualKeyboard(enabled bool) uiauto.Action {
return func(ctx context.Context) error {
return vkbCtx.tconn.Call(ctx, nil, `tast.promisify(chrome.autotestPrivate.setAllowedPref)`, "settings.a11y.virtual_keyboard", enabled)
}
}
// SelectFromSuggestion returns an action waiting for suggestion candidate (Case Sensitive) to appear and clicks it to select.
func (vkbCtx *VirtualKeyboardContext) SelectFromSuggestion(candidateText string) uiauto.Action {
return vkbCtx.selectFromSuggestionFunc(candidateText, false)
}
// SelectFromSuggestionIgnoringCase returns an action waiting for suggestion candidate (Case Insensitive) to appear and clicks it to select.
func (vkbCtx *VirtualKeyboardContext) SelectFromSuggestionIgnoringCase(candidateText string) uiauto.Action {
return vkbCtx.selectFromSuggestionFunc(candidateText, true)
}
func (vkbCtx *VirtualKeyboardContext) selectFromSuggestionFunc(candidateText string, ignoringCase bool) uiauto.Action {
suggestionFinder := KeyFinder.Name(candidateText).HasClass("sk")
if ignoringCase {
suggestionFinder = KeyByNameIgnoringCase(candidateText).HasClass("sk")
}
opts := testing.PollOptions{Timeout: 3 * time.Second, Interval: 500 * time.Millisecond}
ac := vkbCtx.ui.WithPollOpts(opts)
return uiauto.Combine("wait for suggestion and select",
ac.WaitUntilExists(suggestionFinder),
ac.LeftClick(suggestionFinder))
}
// leftClickIfExist returns an action that checks the existence of a node within a short timeout,
// then clicks it if it exists and does nothing if not.
func (vkbCtx *VirtualKeyboardContext) leftClickIfExist(finder *nodewith.Finder) uiauto.Action {
return uiauto.IfSuccessThenWithLog(
vkbCtx.ui.WithTimeout(2*time.Second).WaitUntilExists(finder),
vkbCtx.ui.LeftClick(finder))
}
// ShiftState describes the shift state of the virtual keyboard.
type ShiftState int
// Available virtual keyboard shift state.
// Use ShiftStateUnknown when any errors happen in fetching shift state.
const (
ShiftStateNone ShiftState = iota
ShiftStateShifted
ShiftStateLocked
ShiftStateUnknown
)
// String returns the key representative string content of the shift state.
func (shiftState ShiftState) String() string {
switch shiftState {
case ShiftStateNone:
return "none"
case ShiftStateShifted:
return "shifted"
case ShiftStateLocked:
return "shift-locked"
}
return "unknown"
}
// ShiftState identifies and returns the current VK shift state using left-shift key 'data-key' attribute.
// Note: It only works on English(US).
// It works even the VK is not on screen.
// ShiftLeft: VK is not shifted.
// ShiftLeft-shift: Vk is Shifted.
// ShiftLeft-shiftlock: Vk is Shift locked.
// TODO(b/196272947): Support other input methods other than English(US).
func (vkbCtx *VirtualKeyboardContext) ShiftState(ctx context.Context) (ShiftState, error) {
inputViewConn, err := vkbCtx.UIConn(ctx)
if err != nil {
return ShiftStateUnknown, errors.Wrap(err, "failed to connect to input view page")
}
var shiftLeftKeyAttr string
expr := fmt.Sprintf(`shadowPiercingQuery(%q).getAttribute("data-key")`, `div.shift-key`)
if err := webutil.EvalWithShadowPiercer(ctx, inputViewConn, expr, &shiftLeftKeyAttr); err != nil {
return ShiftStateUnknown, errors.Wrap(err, "failed to get ShiftLeftKey status")
}
switch shiftLeftKeyAttr {
case "ShiftLeft":
return ShiftStateNone, nil
case "ShiftLeft-shift":
return ShiftStateShifted, nil
case "ShiftLeft-shiftlock":
return ShiftStateLocked, nil
}
return ShiftStateUnknown, errors.Wrapf(err, "VK shift status %q is unknown", shiftLeftKeyAttr)
}
// WaitUntilShiftStatus waits for up to 5s until the expected VK shift state.
// Note: It only works on US-en.
func (vkbCtx *VirtualKeyboardContext) WaitUntilShiftStatus(expectedShiftState ShiftState) uiauto.Action {
return func(ctx context.Context) error {
return testing.Poll(ctx, func(ctx context.Context) error {
if currentShiftState, err := vkbCtx.ShiftState(ctx); err != nil {
return errors.Wrap(err, "failed to get current VK shift status")
} else if currentShiftState != expectedShiftState {
return errors.Errorf("unexpected VK shift status: got %q, want %q", currentShiftState, expectedShiftState)
}
return nil
}, &testing.PollOptions{Timeout: 5 * time.Second})
}
}
// GlideTyping returns a user action to simulate glide typing on virtual keyboard.
// It works on both tablet VK and A11y VK.
func (vkbCtx *VirtualKeyboardContext) GlideTyping(keys []string, validateResultFunc uiauto.Action) uiauto.Action {
return func(ctx context.Context) error {
if len(keys) < 2 {
return errors.New("glide typing only works on multiple keys")
}
touchCtx, err := touch.New(ctx, vkbCtx.tconn)
if err != nil {
return errors.Wrap(err, "fail to get touch screen")
}
defer touchCtx.Close()
ui := uiauto.New(vkbCtx.tconn)
initKeyLoc, err := ui.Location(ctx, KeyByNameIgnoringCase(keys[0]))
if err != nil {
return errors.Wrap(err, "fail to find the location of first key")
}
var gestures []uiauto.Action
for i := 1; i < len(keys); i++ {
// Perform a swipe in 50ms and stop 200ms on each key.
gestures = append(gestures, uiauto.Sleep(200*time.Millisecond))
if keys[i] == keys[i-1] {
keyLoc, err := ui.Location(ctx, KeyByNameIgnoringCase(keys[i]))
if err != nil {
return errors.Wrapf(err, "fail to find the location of key: %q", keys[i])
}
gestures = append(gestures, touchCtx.SwipeTo(keyLoc.TopLeft(), 50*time.Millisecond))
}
gestures = append(gestures, touchCtx.SwipeToNode(KeyByNameIgnoringCase(keys[i]), 50*time.Millisecond))
}
return uiauto.Combine("swipe to glide typing and validate result",
touchCtx.Swipe(initKeyLoc.CenterPoint(), gestures...),
validateResultFunc,
)(ctx)
}
}
|
package 前缀和
func numberOfSubarrays(nums []int, k int) int {
if len(nums)==0{
return 0
}
m:=make(map[int]int,len(nums)+1)
oddnum :=0
count:=0
m[0]=1
for _,v:=range nums{
oddnum+=v&1
if v,ok:=m[oddnum-k];ok{
count+=v
}
m[oddnum]++
}
return count
}
|
package nanoid
import gonanoid "github.com/matoous/go-nanoid/v2"
type IDGenerator interface {
Generate() string
}
type nanoid struct{}
func New() IDGenerator {
return &nanoid{}
}
func (*nanoid) Generate() string {
id, err := gonanoid.New(11)
if err != nil {
panic("can't generate nanoid")
}
return id
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package keys
import (
"bytes"
"math"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/errors"
)
// MakeTenantPrefix creates the key prefix associated with the specified tenant.
func MakeTenantPrefix(tenID roachpb.TenantID) roachpb.Key {
if tenID == roachpb.SystemTenantID {
return nil
}
return encoding.EncodeUvarintAscending(TenantPrefix, tenID.ToUint64())
}
// DecodeTenantPrefix determines the tenant ID from the key prefix, returning
// the remainder of the key (with the prefix removed) and the decoded tenant ID.
func DecodeTenantPrefix(key roachpb.Key) ([]byte, roachpb.TenantID, error) {
if len(key) == 0 { // key.Equal(roachpb.RKeyMin)
return nil, roachpb.SystemTenantID, nil
}
if key[0] != tenantPrefixByte {
return key, roachpb.SystemTenantID, nil
}
rem, tenID, err := encoding.DecodeUvarintAscending(key[1:])
if err != nil {
return nil, roachpb.TenantID{}, err
}
return rem, roachpb.MakeTenantID(tenID), nil
}
// SQLCodec provides methods for encoding SQL table keys bound to a given
// tenant. The generator also provides methods for efficiently decoding keys
// previously generated by it. The generated keys are safe to use indefinitely
// and the generator is safe to use concurrently.
type SQLCodec struct {
sqlEncoder
sqlDecoder
_ func() // incomparable
}
// sqlEncoder implements the encoding logic for SQL keys.
//
// The type is expressed as a pointer to a slice instead of a slice directly so
// that its zero value is not usable. Any attempt to use the methods on the zero
// value of a sqlEncoder will panic.
type sqlEncoder struct {
buf *roachpb.Key
}
// sqlEncoder implements the decoding logic for SQL keys.
//
// The type is expressed as a pointer to a slice instead of a slice directly so
// that its zero value is not usable. Any attempt to use the methods on the zero
// value of a sqlDecoder will panic.
type sqlDecoder struct {
buf *roachpb.Key
}
// MakeSQLCodec creates a new SQLCodec suitable for manipulating SQL keys.
func MakeSQLCodec(tenID roachpb.TenantID) SQLCodec {
k := MakeTenantPrefix(tenID)
k = k[:len(k):len(k)] // bound capacity, avoid aliasing
return SQLCodec{
sqlEncoder: sqlEncoder{&k},
sqlDecoder: sqlDecoder{&k},
}
}
// SystemSQLCodec is a SQL key codec for the system tenant.
var SystemSQLCodec = MakeSQLCodec(roachpb.SystemTenantID)
// TODOSQLCodec is a SQL key codec. It is equivalent to SystemSQLCodec, but
// should be used when it is unclear which tenant should be referenced by the
// surrounding context.
var TODOSQLCodec = MakeSQLCodec(roachpb.SystemTenantID)
// ForSystemTenant returns whether the encoder is bound to the system tenant.
func (e sqlEncoder) ForSystemTenant() bool {
return len(e.TenantPrefix()) == 0
}
// TenantPrefix returns the key prefix used for the tenants's data.
func (e sqlEncoder) TenantPrefix() roachpb.Key {
return *e.buf
}
// TablePrefix returns the key prefix used for the table's data.
func (e sqlEncoder) TablePrefix(tableID uint32) roachpb.Key {
k := e.TenantPrefix()
return encoding.EncodeUvarintAscending(k, uint64(tableID))
}
// IndexPrefix returns the key prefix used for the index's data.
func (e sqlEncoder) IndexPrefix(tableID, indexID uint32) roachpb.Key {
k := e.TablePrefix(tableID)
return encoding.EncodeUvarintAscending(k, uint64(indexID))
}
// DescMetadataPrefix returns the key prefix for all descriptors in the
// system.descriptor table.
func (e sqlEncoder) DescMetadataPrefix() roachpb.Key {
return e.IndexPrefix(DescriptorTableID, DescriptorTablePrimaryKeyIndexID)
}
// DescMetadataKey returns the key for the descriptor in the system.descriptor
// table.
func (e sqlEncoder) DescMetadataKey(descID uint32) roachpb.Key {
k := e.DescMetadataPrefix()
k = encoding.EncodeUvarintAscending(k, uint64(descID))
return MakeFamilyKey(k, DescriptorTableDescriptorColFamID)
}
// TenantMetadataKey returns the key for the tenant metadata in the
// system.tenants table.
func (e sqlEncoder) TenantMetadataKey(tenID roachpb.TenantID) roachpb.Key {
k := e.IndexPrefix(TenantsTableID, TenantsTablePrimaryKeyIndexID)
k = encoding.EncodeUvarintAscending(k, tenID.ToUint64())
return MakeFamilyKey(k, 0)
}
// SequenceKey returns the key used to store the value of a sequence.
func (e sqlEncoder) SequenceKey(tableID uint32) roachpb.Key {
k := e.IndexPrefix(tableID, SequenceIndexID)
k = encoding.EncodeUvarintAscending(k, 0) // Primary key value
k = MakeFamilyKey(k, SequenceColumnFamilyID) // Column family
return k
}
// DescIDSequenceKey returns the key used for the descriptor ID sequence.
func (e sqlEncoder) DescIDSequenceKey() roachpb.Key {
if e.ForSystemTenant() {
// To maintain backwards compatibility, the system tenant uses a
// separate, non-SQL, key to store its descriptor ID sequence.
return descIDGenerator
}
return e.SequenceKey(DescIDSequenceID)
}
// ZoneKeyPrefix returns the key prefix for id's row in the system.zones table.
func (e sqlEncoder) ZoneKeyPrefix(id uint32) roachpb.Key {
if !e.ForSystemTenant() {
panic("zone keys only exist in the system tenant's keyspace")
}
k := e.IndexPrefix(ZonesTableID, ZonesTablePrimaryIndexID)
return encoding.EncodeUvarintAscending(k, uint64(id))
}
// ZoneKey returns the key for id's entry in the system.zones table.
func (e sqlEncoder) ZoneKey(id uint32) roachpb.Key {
if !e.ForSystemTenant() {
panic("zone keys only exist in the system tenant's keyspace")
}
k := e.ZoneKeyPrefix(id)
return MakeFamilyKey(k, uint32(ZonesTableConfigColumnID))
}
// MigrationKeyPrefix returns the key prefix to store all migration details.
func (e sqlEncoder) MigrationKeyPrefix() roachpb.Key {
return append(e.TenantPrefix(), MigrationPrefix...)
}
// MigrationLeaseKey returns the key that nodes must take a lease on in order to
// run system migrations on the cluster.
func (e sqlEncoder) MigrationLeaseKey() roachpb.Key {
return append(e.TenantPrefix(), MigrationLease...)
}
// unexpected to avoid colliding with sqlEncoder.tenantPrefix.
func (d sqlDecoder) tenantPrefix() roachpb.Key {
return *d.buf
}
// StripTenantPrefix validates that the given key has the proper tenant ID
// prefix, returning the remainder of the key with the prefix removed. The
// method returns an error if the key has a different tenant ID prefix than
// would be generated by the generator.
func (d sqlDecoder) StripTenantPrefix(key roachpb.Key) ([]byte, error) {
tenPrefix := d.tenantPrefix()
if !bytes.HasPrefix(key, tenPrefix) {
return nil, errors.Errorf("invalid tenant id prefix: %q", key)
}
return key[len(tenPrefix):], nil
}
// DecodeTablePrefix validates that the given key has a table prefix, returning
// the remainder of the key (with the prefix removed) and the decoded descriptor
// ID of the table.
func (d sqlDecoder) DecodeTablePrefix(key roachpb.Key) ([]byte, uint32, error) {
key, err := d.StripTenantPrefix(key)
if err != nil {
return nil, 0, err
}
if encoding.PeekType(key) != encoding.Int {
return nil, 0, errors.Errorf("invalid key prefix: %q", key)
}
key, tableID, err := encoding.DecodeUvarintAscending(key)
return key, uint32(tableID), err
}
// DecodeIndexPrefix validates that the given key has a table ID followed by an
// index ID, returning the remainder of the key (with the table and index prefix
// removed) and the decoded IDs of the table and index, respectively.
func (d sqlDecoder) DecodeIndexPrefix(key roachpb.Key) ([]byte, uint32, uint32, error) {
key, tableID, err := d.DecodeTablePrefix(key)
if err != nil {
return nil, 0, 0, err
}
if encoding.PeekType(key) != encoding.Int {
return nil, 0, 0, errors.Errorf("invalid key prefix: %q", key)
}
key, indexID, err := encoding.DecodeUvarintAscending(key)
return key, tableID, uint32(indexID), err
}
// DecodeDescMetadataID decodes a descriptor ID from a descriptor metadata key.
func (d sqlDecoder) DecodeDescMetadataID(key roachpb.Key) (uint32, error) {
// Extract table and index ID from key.
remaining, tableID, _, err := d.DecodeIndexPrefix(key)
if err != nil {
return 0, err
}
if tableID != DescriptorTableID {
return 0, errors.Errorf("key is not a descriptor table entry: %v", key)
}
// Extract the descriptor ID.
_, id, err := encoding.DecodeUvarintAscending(remaining)
if err != nil {
return 0, err
}
if id > math.MaxUint32 {
return 0, errors.Errorf("descriptor ID %d exceeds uint32 bounds", id)
}
return uint32(id), nil
}
// DecodeTenantMetadataID decodes a tenant ID from a tenant metadata key.
func (d sqlDecoder) DecodeTenantMetadataID(key roachpb.Key) (roachpb.TenantID, error) {
// Extract table and index ID from key.
remaining, tableID, _, err := d.DecodeIndexPrefix(key)
if err != nil {
return roachpb.TenantID{}, err
}
if tableID != TenantsTableID {
return roachpb.TenantID{}, errors.Errorf("key is not a tenant table entry: %v", key)
}
// Extract the tenant ID.
_, id, err := encoding.DecodeUvarintAscending(remaining)
if err != nil {
return roachpb.TenantID{}, err
}
return roachpb.MakeTenantID(id), nil
}
|
package main
import (
"fmt"
"strconv"
"time"
)
func main() {
m := map[string]string{}
go func() {
for i := 0; i < 200000; i++ {
istr := strconv.Itoa(i)
m[istr] = istr
}
}()
go func() {
for i := 0; i < 200000; i++ {
if _, ok := m["02"]; ok {
fmt.Println("aa")
}
}
}()
time.Sleep(3 * time.Second)
}
|
package util
import (
"fmt"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"sync"
)
type info struct {
ObjectMeta
GitServerURL *gitURL // The git server address
GitAPIURL *releaseURL // The address of the git api
GitHome string // The base path of the stored files
GitRevision string // Git SHA revision
Token string
}
// ServerTask allows starting administrative tasks on git repositories
type ServerTask interface {
ObjectMeta
BaseReleasePath() string
FullReleasePath() string
BaseRepoPath() string
FullRepoPath() string
InitRepository() (bool, error)
InitRelease(revision string) (bool, error)
RemoveBranchRef(refName string) error
WriteBranchRef(refPath, rev string) error
}
// APIInfo exposes information about the git API
type APIInfo interface {
ObjectMeta
ReleaseURL() *releaseURL
}
// ServerInfo exposes information about the git server
type ServerInfo interface {
ObjectMeta
GetCloneURL() *gitURL
}
// NewServerTask creates a new ServerTask
func NewServerTask(gitHome string, meta ObjectMeta) ServerTask {
return &info{GitHome: gitHome, ObjectMeta: meta}
}
// NewAPIInfo creates a new APIInfo
func NewAPIInfo(gitAPIHost string, meta ObjectMeta) APIInfo {
return &info{GitAPIURL: &releaseURL{addr: gitAPIHost}, ObjectMeta: meta}
}
// NewServerInfo creates a new ServerInfo
func NewServerInfo(gitServerAddr string, meta ObjectMeta) (ServerInfo, error) {
u, err := url.Parse(gitServerAddr)
if err != nil {
return nil, err
}
gURL := &gitURL{
addr: u,
user: meta.GetAuthUser(),
token: meta.GetAuthToken(),
repo: meta.GetRepository(),
}
return &info{GitServerURL: gURL, ObjectMeta: meta}, nil
}
// BaseRepoPath returns the absolute path for the base repository on the server
func (i *info) BaseRepoPath() string {
return filepath.Join(i.GitHome, repoPrefix)
}
// BaseReleasePath returns the absolute path for the base release folder on the server.
func (i *info) BaseReleasePath() string {
return filepath.Join(i.GitHome, releasePrefix)
}
// GetRepository returns the full repository name. e.g.: owner/repo
func (i *info) GetRepository() string {
return i.ObjectMeta.GetRepository()
}
// GetFullRepoPath returns the full absolute path of the repository
func (i *info) FullRepoPath() string {
return filepath.Join(i.BaseRepoPath(), i.GetRepository())
}
// FullReleasePath returns the full absolute path of the repository
func (i *info) FullReleasePath() string {
return filepath.Join(i.BaseReleasePath(), i.GetRepository())
}
// InitRepoPath initializes a new git bare repository
func (i *info) InitRepository() (bool, error) {
return createRepo(i.FullRepoPath(), true, &sync.Mutex{})
}
// InitRelease initializes the repository folder where the releases are going to be stored
func (i *info) InitRelease(revision string) (bool, error) {
return createRepo(filepath.Join(i.FullReleasePath(), revision), false, &sync.Mutex{})
}
// RemoveBranchRef exclude the target refName in a git repository
// https://git-scm.com/docs/githooks#update
func (i *info) RemoveBranchRef(refPath string) error {
return os.RemoveAll(filepath.Join(i.FullRepoPath(), refPath))
}
// WriteBranchRef write a revision in the refPath
func (i *info) WriteBranchRef(refPath, rev string) error {
p := filepath.Join(i.FullRepoPath(), refPath)
return ioutil.WriteFile(p, []byte(fmt.Sprintf("%s\n", rev)), 0644)
}
// ReleaseURL returns the URL for the given repository,
// if GitRevision is set it will be used at end of URL
func (i *info) ReleaseURL() *releaseURL {
pathURL := filepath.Join(releasePrefix, i.GetRepository(), i.GitRevision)
return &releaseURL{addr: fmt.Sprintf("%s/%s", i.GitAPIURL, pathURL)}
}
// GetCloneURL retrieves the url for cloning a repository
func (i *info) GetCloneURL() *gitURL {
return i.GitServerURL
}
|
package main
import "math"
import "math/rand"
import "math/big"
import "errors"
import "fmt"
import "os"
type Key struct {
key int
n int
}
type KeyPair struct {
Private Key
Public Key
}
func isPrime(n int) bool {
if n == 1 {
return false
}
for i := 2; i <= int(math.Floor(math.Sqrt(float64(n)))); i++ {
if n % i == 0 {
return false
}
}
return true
}
func gcd(a int, b int) int {
for {
if(a>b){
a%=b;
}else{
b%=a;}
if (a*b==0){return (a|b);}
}
}
func multiplicativeInverse(e int, phi int) int {
var table [][]int
A := phi
B := e
row := []int{A, B, A % B, A / B, -1, -1}
table = append(table, row)
for i := 0; table[i][2] != 0; i++ {
A = table[i][1]
B = table[i][2]
row := []int{A, B, A % B, A / B, -1, -1}
table = append(table, row)
}
table[len(table)-1][4] = 0
table[len(table)-1][5] = 1
for i := len(table)-2; i >= 0; i-- {
table[i][4] = table[i+1][5]
table[i][5] = table[i+1][4] - table[i+1][5]*table[i][3]
}
r := table[0][5] % phi
if r < 0 {
r = r + phi
}
return r
}
func GenerateKeypair(p int, q int) (KeyPair, error) {
if !(isPrime(p) && isPrime(q)){
return KeyPair{}, errors.New("Both numbers must be prime.")
} else if p == q {
return KeyPair{}, errors.New("p and q can't be equal.");
}
n := p * q
phi := (p - 1)*(q - 1)
e := rand.Intn(phi - 1) + 1
g := gcd(e, phi)
for g != 1 {
e = rand.Intn(phi - 1) + 1
g = gcd(e, phi)
}
d := multiplicativeInverse(e, phi)
return KeyPair{Key{e, n}, Key{d, n}}, nil
}
func Encrypt(pk Key, plaintext string) []int {
cipher := []int{}
n := new(big.Int)
for _, ch := range plaintext {
n = new(big.Int).Exp(
big.NewInt(int64(ch)), big.NewInt(int64(pk.key)), nil)
n = new(big.Int).Mod(n, big.NewInt(int64(pk.n)))
cipher = append(cipher, int(n.Int64()))
}
return cipher
}
func Decrypt(pk Key, cipher []int) string {
plaintext := ""
n := new(big.Int)
for _, ch := range cipher {
n = new(big.Int).Exp(
big.NewInt(int64(ch)), big.NewInt(int64(pk.key)), nil)
n = new(big.Int).Mod(n, big.NewInt(int64(pk.n)))
plaintext += string(rune(int(n.Int64())))
}
return plaintext
}
func main(){
var q, p int;
fmt.Print("Enter a number: ")
fmt.Scan(&q)
fmt.Print("Enter another number: ")
fmt.Scan(&p)
//var pk KeyPair
pk, err := GenerateKeypair(p,q)
if err != nil {
os.Exit(404)
}
var text string
fmt.Scan(&text)
var encrypted []int
encrypted = Encrypt(pk.Private,text)
var decrypted string
decrypted = Decrypt(pk.Public, encrypted)
fmt.Println(encrypted)
fmt.Println(decrypted)
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package health
import (
"context"
"chromiumos/tast/errors"
"chromiumos/tast/local/croshealthd"
"chromiumos/tast/testing"
)
type audioInfo struct {
InputDeviceName string `json:"input_device_name"`
InputGain int `json:"input_gain"`
InputMute bool `json:"input_mute"`
OutputDeviceName string `json:"output_device_name"`
OutputMute bool `json:"output_mute"`
OutputVolume int `json:"output_volume"`
SevereUnderruns int `json:"severe_underruns"`
Underruns int `json:"underruns"`
}
func init() {
testing.AddTest(&testing.Test{
Func: ProbeAudioInfo,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Check that we can probe cros_healthd for audio info",
Contacts: []string{"cros-tdm-tpe-eng@google.com"},
Attr: []string{"group:mainline"},
SoftwareDeps: []string{"chrome", "diagnostics"},
Fixture: "crosHealthdRunning",
})
}
func validateAudioData(audio *audioInfo) error {
// Check "input_device_name" is not empty.
if audio.InputDeviceName == "" {
return errors.New("Failed. input_device_name field is empty")
}
// Check "input_gain" is integer and between [0, 100].
if audio.InputGain < 0 || audio.InputGain > 100 {
return errors.Errorf("Failed. input_gain is not in a legal range [0, 100]: %d", audio.InputGain)
}
// Check "output_device_name" is not empty.
if audio.OutputDeviceName == "" {
return errors.New("Failed. output_device_name field is empty")
}
// Check "output_volume" is integer and between [0, 100].
if audio.OutputVolume < 0 || audio.OutputVolume > 100 {
return errors.Errorf("Failed. output_volume is not in a legal range [0, 100]: %d", audio.OutputVolume)
}
// Check "severe_underruns" is positive integer or zero.
if audio.SevereUnderruns < 0 {
return errors.Errorf("Failed. severe_underruns is smaller than zero: %d", audio.SevereUnderruns)
}
// Check "underruns" is positive integer or zero.
if audio.Underruns < 0 {
return errors.Errorf("Failed. underruns is smaller than zero: %d", audio.Underruns)
}
return nil
}
func ProbeAudioInfo(ctx context.Context, s *testing.State) {
params := croshealthd.TelemParams{Category: croshealthd.TelemCategoryAudio}
var audio audioInfo
if err := croshealthd.RunAndParseJSONTelem(ctx, params, s.OutDir(), &audio); err != nil {
s.Fatal("Failed to get audio telemetry info: ", err)
}
if err := validateAudioData(&audio); err != nil {
s.Fatalf("Failed to validate audio data, err [%v]", err)
}
}
|
package operator
import (
"context"
"fmt"
"reflect"
"strconv"
"strings"
"time"
"github.com/ghodss/yaml"
operatorv1 "github.com/openshift/api/operator/v1"
deschedulerv1beta1 "github.com/openshift/cluster-kube-descheduler-operator/pkg/apis/descheduler/v1beta1"
operatorconfigclientv1beta1 "github.com/openshift/cluster-kube-descheduler-operator/pkg/generated/clientset/versioned/typed/descheduler/v1beta1"
operatorclientinformers "github.com/openshift/cluster-kube-descheduler-operator/pkg/generated/informers/externalversions/descheduler/v1beta1"
"github.com/openshift/cluster-kube-descheduler-operator/pkg/operator/operatorclient"
"github.com/openshift/cluster-kube-descheduler-operator/pkg/operator/v410_00_assets"
"github.com/openshift/library-go/pkg/operator/events"
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
"github.com/openshift/library-go/pkg/operator/resource/resourcemerge"
"github.com/openshift/library-go/pkg/operator/resource/resourceread"
"github.com/openshift/library-go/pkg/operator/v1helpers"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api/v1alpha1"
)
const DefaultImage = "quay.io/openshift/origin-descheduler:latest"
// array of valid strategies. This currently supports the actual strategy names as defined in sigs.k8s.io/descheduler,
// as well as matching shortnames we have provided in the past for our operator and continue to support for now.
var validStrategies = sets.NewString(
"duplicates",
"removeduplicates",
"interpodantiaffinity",
"removepodsviolatinginterpodantiaffinity",
"lownodeutilization",
"nodeaffinity",
"removepodsviolatingnodeaffinity",
"nodetaints",
"removepodsviolatingnodetaints",
"removepodshavingtoomanyrestarts",
"podlifetime")
// deschedulerCommand provides descheduler command with policyconfigfile mounted as volume and log-level for backwards
// compatibility with 3.11
var DeschedulerCommand = []string{"/bin/descheduler", "--policy-config-file", "/policy-dir/policy.yaml", "--v", "2"}
type TargetConfigReconciler struct {
ctx context.Context
operatorClient operatorconfigclientv1beta1.KubedeschedulersV1beta1Interface
deschedulerClient *operatorclient.DeschedulerClient
kubeClient kubernetes.Interface
eventRecorder events.Recorder
queue workqueue.RateLimitingInterface
}
func NewTargetConfigReconciler(
ctx context.Context,
operatorConfigClient operatorconfigclientv1beta1.KubedeschedulersV1beta1Interface,
operatorClientInformer operatorclientinformers.KubeDeschedulerInformer,
deschedulerClient *operatorclient.DeschedulerClient,
kubeClient kubernetes.Interface,
eventRecorder events.Recorder,
) *TargetConfigReconciler {
c := &TargetConfigReconciler{
ctx: ctx,
operatorClient: operatorConfigClient,
deschedulerClient: deschedulerClient,
kubeClient: kubeClient,
eventRecorder: eventRecorder,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "TargetConfigReconciler"),
}
operatorClientInformer.Informer().AddEventHandler(c.eventHandler())
return c
}
func (c TargetConfigReconciler) sync() error {
descheduler, err := c.operatorClient.KubeDeschedulers(operatorclient.OperatorNamespace).Get(c.ctx, operatorclient.OperatorConfigName, metav1.GetOptions{})
if err != nil {
klog.ErrorS(err, "unable to get operator configuration", "namespace", operatorclient.OperatorNamespace, "kubedescheduler", operatorclient.OperatorConfigName)
return err
}
if descheduler.Spec.DeschedulingIntervalSeconds == nil {
return fmt.Errorf("descheduler should have an interval set")
}
if err := validateStrategies(descheduler.Spec.Strategies); err != nil {
return err
}
forceDeployment := false
_, forceDeployment, err = c.manageConfigMap(descheduler)
if err != nil {
return err
}
deployment, _, err := c.manageDeployment(descheduler, forceDeployment)
if err != nil {
return err
}
_, _, err = v1helpers.UpdateStatus(c.deschedulerClient, func(status *operatorv1.OperatorStatus) error {
resourcemerge.SetDeploymentGeneration(&status.Generations, deployment)
return nil
})
return err
}
func validateStrategies(strategies []deschedulerv1beta1.Strategy) error {
if len(strategies) == 0 {
return fmt.Errorf("descheduler should have atleast one strategy enabled and it should be one of %v", strings.Join(validStrategies.List(), ","))
}
if len(strategies) > len(validStrategies) {
return fmt.Errorf("descheduler can have a maximum of %v strategies enabled at this point of time", len(validStrategies))
}
invalidStrategies := make([]string, 0, len(strategies))
for _, strategy := range strategies {
if !validStrategies.Has(strings.ToLower(strategy.Name)) {
invalidStrategies = append(invalidStrategies, strategy.Name)
}
}
if len(invalidStrategies) > 0 {
return fmt.Errorf("expected one of the %v to be enabled but found following invalid strategies %v",
strings.Join(validStrategies.List(), ","), strings.Join(invalidStrategies, ","))
}
return nil
}
func (c *TargetConfigReconciler) manageConfigMap(descheduler *deschedulerv1beta1.KubeDescheduler) (*v1.ConfigMap, bool, error) {
required := resourceread.ReadConfigMapV1OrDie(v410_00_assets.MustAsset("v4.1.0/kube-descheduler/configmap.yaml"))
required.Name = descheduler.Name
required.Namespace = descheduler.Namespace
required.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: "v1beta1",
Kind: "KubeDescheduler",
Name: descheduler.Name,
UID: descheduler.UID,
},
}
configMapString, err := generateConfigMapString(descheduler.Spec.Strategies)
if err != nil {
return nil, false, err
}
required.Data = map[string]string{"policy.yaml": configMapString}
return resourceapply.ApplyConfigMap(c.kubeClient.CoreV1(), c.eventRecorder, required)
}
func generateNamespaces(params []deschedulerv1beta1.Param) *deschedulerapi.Namespaces {
var namespaces deschedulerapi.Namespaces
for _, param := range params {
switch strings.ToLower(param.Name) {
case "includenamespaces":
namespaces.Include = strings.Split(param.Value, ",")
case "excludenamespaces":
namespaces.Exclude = strings.Split(param.Value, ",")
default:
klog.Warningf("unknown Namespaces value: %s", param.Name)
}
}
return &namespaces
}
func isPriorityThresholdParam(param deschedulerv1beta1.Param) bool {
switch strings.ToLower(param.Name) {
case "thresholdpriority", "thresholdpriorityclassname":
return true
}
return false
}
func generatePriorityThreshold(params []deschedulerv1beta1.Param) (string, *int32, error) {
var thresholdPriority *int32
var thresholdPriorityClassName string
for _, param := range params {
switch strings.ToLower(param.Name) {
case "thresholdpriority":
value, err := strconv.Atoi(param.Value)
if err != nil {
return "", nil, err
}
priority := int32(value)
thresholdPriority = &priority
case "thresholdpriorityclassname":
thresholdPriorityClassName = param.Value
default:
klog.Warningf("unknown PriorityThreshold value: %s", param.Name)
}
}
if len(thresholdPriorityClassName) > 0 && thresholdPriority != nil {
return "", nil, fmt.Errorf("cannot set both thresholdPriorityClassName and thresholdPriority")
}
return thresholdPriorityClassName, thresholdPriority, nil
}
// generateConfigMapString generates configmap needed for the string.
// TODO(@damemi): Deprecate this in favor of an actual upstream Descheduler policy, see https://github.com/openshift/cluster-kube-descheduler-operator/issues/119
func generateConfigMapString(requestedStrategies []deschedulerv1beta1.Strategy) (string, error) {
policy := &deschedulerapi.DeschedulerPolicy{Strategies: make(deschedulerapi.StrategyList)}
// There is no need to do validation here. By the time, we reach here, validation would have already happened.
for _, strategy := range requestedStrategies {
switch strings.ToLower(strategy.Name) {
case "duplicates", "removeduplicates":
removeDuplicates := deschedulerapi.RemoveDuplicates{}
for _, param := range strategy.Params {
switch strings.ToLower(param.Name) {
case "excludeownerkinds":
removeDuplicates.ExcludeOwnerKinds = strings.Split(param.Value, ",")
}
}
priorityClassName, priority, err := generatePriorityThreshold(strategy.Params)
if err != nil {
return "", err
}
policy.Strategies["RemoveDuplicates"] = deschedulerapi.DeschedulerStrategy{Enabled: true,
Params: &deschedulerapi.StrategyParameters{
RemoveDuplicates: &removeDuplicates,
ThresholdPriority: priority,
ThresholdPriorityClassName: priorityClassName,
},
}
case "interpodantiaffinity", "removepodsviolatinginterpodantiaffinity":
priorityClassName, priority, err := generatePriorityThreshold(strategy.Params)
if err != nil {
return "", err
}
policy.Strategies["RemovePodsViolatingInterPodAntiAffinity"] = deschedulerapi.DeschedulerStrategy{Enabled: true,
Params: &deschedulerapi.StrategyParameters{
ThresholdPriorityClassName: priorityClassName,
ThresholdPriority: priority,
Namespaces: generateNamespaces(strategy.Params),
},
}
case "lownodeutilization":
utilizationThresholds := deschedulerapi.NodeResourceUtilizationThresholds{NumberOfNodes: 0}
thresholds := deschedulerapi.ResourceThresholds{}
targetThresholds := deschedulerapi.ResourceThresholds{}
for _, param := range strategy.Params {
if isPriorityThresholdParam(param) {
continue
}
value, err := strconv.Atoi(param.Value)
if err != nil {
return "", err
}
switch strings.ToLower(param.Name) {
case "cputhreshold":
thresholds[v1.ResourceCPU] = deschedulerapi.Percentage(value)
case "memorythreshold":
thresholds[v1.ResourceMemory] = deschedulerapi.Percentage(value)
case "podsthreshold":
thresholds[v1.ResourcePods] = deschedulerapi.Percentage(value)
case "cputargetthreshold":
targetThresholds[v1.ResourceCPU] = deschedulerapi.Percentage(value)
case "memorytargetthreshold":
targetThresholds[v1.ResourceMemory] = deschedulerapi.Percentage(value)
case "podstargetthreshold":
targetThresholds[v1.ResourcePods] = deschedulerapi.Percentage(value)
case "nodes", "numberOfNodes":
utilizationThresholds.NumberOfNodes = value
}
}
if len(thresholds) > 0 {
utilizationThresholds.Thresholds = thresholds
}
if len(targetThresholds) > 0 {
utilizationThresholds.TargetThresholds = targetThresholds
}
priorityClassName, priority, err := generatePriorityThreshold(strategy.Params)
if err != nil {
return "", err
}
policy.Strategies["LowNodeUtilization"] = deschedulerapi.DeschedulerStrategy{Enabled: true,
Params: &deschedulerapi.StrategyParameters{
NodeResourceUtilizationThresholds: &utilizationThresholds,
ThresholdPriorityClassName: priorityClassName,
ThresholdPriority: priority,
},
}
case "nodeaffinity", "removepodsviolatingnodeaffinity":
priorityClassName, priority, err := generatePriorityThreshold(strategy.Params)
if err != nil {
return "", err
}
policy.Strategies["RemovePodsViolatingNodeAffinity"] = deschedulerapi.DeschedulerStrategy{Enabled: true,
Params: &deschedulerapi.StrategyParameters{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
ThresholdPriorityClassName: priorityClassName,
ThresholdPriority: priority,
Namespaces: generateNamespaces(strategy.Params),
},
}
case "nodetaints", "removepodsviolatingnodetaints":
priorityClassName, priority, err := generatePriorityThreshold(strategy.Params)
if err != nil {
return "", err
}
policy.Strategies["RemovePodsViolatingNodeTaints"] = deschedulerapi.DeschedulerStrategy{Enabled: true,
Params: &deschedulerapi.StrategyParameters{
ThresholdPriorityClassName: priorityClassName,
ThresholdPriority: priority,
Namespaces: generateNamespaces(strategy.Params),
},
}
case "removepodshavingtoomanyrestarts":
podsHavingTooManyRestarts := deschedulerapi.PodsHavingTooManyRestarts{}
for _, param := range strategy.Params {
switch strings.ToLower(param.Name) {
case "podrestartthreshold":
value, err := strconv.Atoi(param.Value)
if err != nil {
return "", err
}
podsHavingTooManyRestarts.PodRestartThreshold = int32(value)
case "includinginitcontainers":
value, err := strconv.ParseBool(param.Value)
if err != nil {
return "", err
}
podsHavingTooManyRestarts.IncludingInitContainers = value
}
}
priorityClassName, priority, err := generatePriorityThreshold(strategy.Params)
if err != nil {
return "", err
}
policy.Strategies["RemovePodsHavingTooManyRestarts"] = deschedulerapi.DeschedulerStrategy{Enabled: true,
Params: &deschedulerapi.StrategyParameters{
PodsHavingTooManyRestarts: &podsHavingTooManyRestarts,
ThresholdPriorityClassName: priorityClassName,
ThresholdPriority: priority,
Namespaces: generateNamespaces(strategy.Params),
},
}
case "podlifetime":
var lifetimeSeconds *uint
for _, param := range strategy.Params {
switch strings.ToLower(param.Name) {
case "maxpodlifetimeseconds":
value, err := strconv.Atoi(param.Value)
if err != nil {
return "", err
}
val := uint(value)
lifetimeSeconds = &val
}
}
priorityClassName, priority, err := generatePriorityThreshold(strategy.Params)
if err != nil {
return "", err
}
policy.Strategies["PodLifeTime"] = deschedulerapi.DeschedulerStrategy{Enabled: true,
Params: &deschedulerapi.StrategyParameters{
MaxPodLifeTimeSeconds: lifetimeSeconds,
ThresholdPriorityClassName: priorityClassName,
ThresholdPriority: priority,
Namespaces: generateNamespaces(strategy.Params),
},
}
default:
klog.Warningf("not using unknown strategy '%s'", strategy.Name)
}
}
policyBytes, err := yaml.Marshal(policy)
if err != nil {
return "", err
}
return string(policyBytes), nil
}
func (c *TargetConfigReconciler) manageDeployment(descheduler *deschedulerv1beta1.KubeDescheduler, forceDeployment bool) (*appsv1.Deployment, bool, error) {
required := resourceread.ReadDeploymentV1OrDie(v410_00_assets.MustAsset("v4.1.0/kube-descheduler/deployment.yaml"))
required.Name = descheduler.Name
required.Namespace = descheduler.Namespace
required.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: "v1beta1",
Kind: "KubeDescheduler",
Name: descheduler.Name,
UID: descheduler.UID,
},
}
required.Spec.Template.Spec.Containers[0].Image = descheduler.Spec.Image
required.Spec.Template.Spec.Containers[0].Args = append(required.Spec.Template.Spec.Containers[0].Args,
fmt.Sprintf("--descheduling-interval=%ss", strconv.Itoa(int(*descheduler.Spec.DeschedulingIntervalSeconds))))
required.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name = descheduler.Name
// Add any additional flags that were specified
if len(descheduler.Spec.Flags) > 0 {
required.Spec.Template.Spec.Containers[0].Args = append(required.Spec.Template.Spec.Containers[0].Args, descheduler.Spec.Flags...)
}
if !forceDeployment {
existingDeployment, err := c.kubeClient.AppsV1().Deployments(required.Namespace).Get(c.ctx, descheduler.Name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
forceDeployment = true
} else {
return nil, false, err
}
} else {
forceDeployment = deploymentChanged(existingDeployment, required)
}
}
// FIXME: this method will disappear in 4.6 so we need to fix this ASAP
return resourceapply.ApplyDeploymentWithForce(
c.kubeClient.AppsV1(),
c.eventRecorder,
required,
resourcemerge.ExpectedDeploymentGeneration(required, descheduler.Status.Generations),
forceDeployment)
}
func deploymentChanged(existing, new *appsv1.Deployment) bool {
newArgs := sets.NewString(new.Spec.Template.Spec.Containers[0].Args...)
existingArgs := sets.NewString(existing.Spec.Template.Spec.Containers[0].Args...)
return existing.Name != new.Name ||
existing.Namespace != new.Namespace ||
existing.Spec.Template.Spec.Containers[0].Image != new.Spec.Template.Spec.Containers[0].Image ||
existing.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name != new.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap.LocalObjectReference.Name ||
!reflect.DeepEqual(newArgs, existingArgs)
}
// Run starts the kube-scheduler and blocks until stopCh is closed.
func (c *TargetConfigReconciler) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
klog.Infof("Starting TargetConfigReconciler")
defer klog.Infof("Shutting down TargetConfigReconciler")
// doesn't matter what workers say, only start one.
go wait.Until(c.runWorker, time.Second, stopCh)
<-stopCh
}
func (c *TargetConfigReconciler) runWorker() {
for c.processNextWorkItem() {
}
}
func (c *TargetConfigReconciler) processNextWorkItem() bool {
dsKey, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(dsKey)
err := c.sync()
if err == nil {
c.queue.Forget(dsKey)
return true
}
utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err))
c.queue.AddRateLimited(dsKey)
return true
}
// eventHandler queues the operator to check spec and status
func (c *TargetConfigReconciler) eventHandler() cache.ResourceEventHandler {
return cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { c.queue.Add(workQueueKey) },
UpdateFunc: func(old, new interface{}) { c.queue.Add(workQueueKey) },
DeleteFunc: func(obj interface{}) { c.queue.Add(workQueueKey) },
}
}
|
package main
import (
"database/sql"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strconv"
_ "github.com/go-sql-driver/mysql"
"github.com/gorilla/mux"
)
//Todo Struct
type Todo struct {
UserID int `json:"userId"`
ID int `json:"id"`
Title string `json:"title"`
Completed bool `json:"completed"`
}
const (
//DBDriver name
DBDriver = "mysql"
//DBName Schema name
DBName = "todo"
//DBUser for useranme of database
DBUser = "root"
//DBPassword for password
DBPassword = "dhirajpatel"
//DBURL for database connection url
DBURL = DBUser + ":" + DBPassword + "@/" + DBName
)
// GetDB return DB
func GetDB() (*sql.DB, error) {
db, err := sql.Open(DBDriver, DBURL)
if err != nil {
return db, err
}
err = db.Ping()
if err != nil {
return db, err
}
return db, nil
}
func main() {
router := mux.NewRouter()
router.HandleFunc("/tasks", post).Methods("POST")
router.HandleFunc("/tasks", get).Methods("GET")
router.HandleFunc("/tasks/{uid}/{id}", put).Methods("PUT")
router.HandleFunc("/tasks/{uid}/{id}", delete).Methods("DELETE")
http.ListenAndServe(":5000", router)
}
func get(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
bodyBytes, _ := ioutil.ReadAll(r.Body)
fmt.Println(bodyBytes)
bodyString := string(bodyBytes)
fmt.Println("API Response as String:\n" + bodyString)
var todoStruct Todo
json.Unmarshal(bodyBytes, &todoStruct)
todos := make([]Todo, 0)
db, err := GetDB()
if err != nil {
fmt.Println(err)
return
}
defer db.Close()
rows, err := db.Query(
`SELECT userId,
id,
title,
completed
FROM todo;
`)
if err != nil {
fmt.Println(err)
return
}
for rows.Next() {
t := Todo{}
rows.Scan(&t.UserID, &t.ID, &t.Title, &t.Completed)
todos = append(todos, t)
}
log.Println("todos: ", todos)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(todos)
}
func post(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
bodyBytes, _ := ioutil.ReadAll(r.Body)
bodyString := string(bodyBytes)
fmt.Println("API Response as String:\n" + bodyString)
var todoStruct Todo
json.Unmarshal(bodyBytes, &todoStruct)
db, err := GetDB()
if err != nil {
fmt.Println(err)
return
}
_, err = db.Exec(
`INSERT INTO todo (userId, id, title, completed)
VALUES (?, ?, ?, ?)`,
todoStruct.UserID, todoStruct.ID, todoStruct.Title, todoStruct.Completed)
if err != nil {
fmt.Println(err)
return
}
json.NewEncoder(w).Encode(todoStruct)
}
func put(w http.ResponseWriter, r *http.Request) {
userid := mux.Vars(r)["uid"]
id := mux.Vars(r)["id"]
defer r.Body.Close()
bodyBytes, _ := ioutil.ReadAll(r.Body)
bodyString := string(bodyBytes)
fmt.Println("API Response as String:\n" + bodyString)
var todoStruct Todo
json.Unmarshal(bodyBytes, &todoStruct)
db, err := GetDB()
if err != nil {
fmt.Println(err)
return
}
_, err = db.Exec(
`UPDATE todo SET title=? , completed=? WHERE (userId=? AND id=?)`,
todoStruct.Title, todoStruct.Completed, userid, id)
if err != nil {
fmt.Println(err)
return
}
ID, _ := strconv.Atoi(id)
UID, _ := strconv.Atoi(id)
todoStruct.ID = ID
todoStruct.UserID = UID
json.NewEncoder(w).Encode(todoStruct)
}
func delete(w http.ResponseWriter, r *http.Request) {
userid := mux.Vars(r)["uid"]
id := mux.Vars(r)["id"]
fmt.Println(userid)
fmt.Println(id)
db, err := GetDB()
if err != nil {
fmt.Println(err)
return
}
_, err = db.Exec(`DELETE FROM todo WHERE (userId=? AND id=?)`, userid, id)
if err != nil {
fmt.Println(err)
return
}
w.Write([]byte("Data Deleted !"))
w.WriteHeader(http.StatusNoContent)
}
|
package main
import "fmt"
//常量 学习
const p ="date & taxes"
func main() {
const q = 42
fmt.Println(p)
fmt.Println(q)
}
|
package lexers
import (
"regexp"
)
// TODO(moorereason): can this be factored away?
var bashAnalyserRe = regexp.MustCompile(`(?m)^#!.*/bin/(?:env |)(?:bash|zsh|sh|ksh)`)
func init() { // nolint: gochecknoinits
Get("bash").SetAnalyser(func(text string) float32 {
if bashAnalyserRe.FindString(text) != "" {
return 1.0
}
return 0.0
})
}
|
package datastore
import (
"context"
"errors"
"cloud.google.com/go/datastore"
"github.com/go-kit/kit/log"
"google.golang.org/api/iterator"
"github.com/revas/animo-service/pkg"
)
type GoogleDatastoreAnimoService struct {
Logger log.Logger
Client *datastore.Client
}
// Ensure InMemoryAnimoService implements the animo.AnimoService interface.
var _ animo.AnimoService = &GoogleDatastoreAnimoService{}
func (svc *GoogleDatastoreAnimoService) GetOrCreateProfile(ctx context.Context, identity string) (*animo.Profile, error) {
profile, err := findProfileByIdentity(ctx, svc, identity)
if err != nil {
return nil, err
}
if profile.ID == "" {
profile, err = createProfileFromIdentity(ctx, svc.Client, identity)
if err != nil {
return nil, err
}
}
return profile, nil
}
func (svc *GoogleDatastoreAnimoService) ResolveProfilesAliases(context context.Context, profilesAliases []string) ([]string, error) {
var profilesIds []string
for _, alias := range profilesAliases {
var profile *animo.Profile
var err error
if alias == "me" {
profile, err = svc.GetOrCreateProfile(context, context.Value("Identity").(string))
} else {
profile, err = findProfileByAlias(context, svc, alias)
}
if err != nil {
return nil, err
}
profilesIds = append(profilesIds, profile.ID)
}
return profilesIds, nil
}
func (svc *GoogleDatastoreAnimoService) GetProfiles(context context.Context, profilesIds []string) ([]*animo.Profile, error) {
profilesKeys := makeDatastoreKeysFromIds(profilesIds)
profiles := make([]*animo.Profile, len(profilesKeys))
err := svc.Client.GetMulti(context, profilesKeys, profiles)
if err != nil {
return nil, err
}
return profiles, nil
}
func (svc *GoogleDatastoreAnimoService) SearchProfiles(context context.Context, filter string) ([]*animo.Profile, error) {
query := datastore.NewQuery("Profiles").
Filter("Alias >=", filter).
Order("Alias").
Limit(5)
it := svc.Client.Run(context, query)
var profiles []*animo.Profile
for {
var profile animo.Profile
_, err := it.Next(&profile)
if err == iterator.Done {
break
}
if err != nil {
svc.Logger.Log("error", err.Error())
}
profiles = append(profiles, &profile)
}
return profiles, nil
}
func (svc *GoogleDatastoreAnimoService) UpdateProfiles(ctx context.Context, profilesIds []string, profiles []*animo.Profile) ([]*animo.Profile, error) {
profilesKeys := makeDatastoreKeysFromIds(profilesIds)
persistedProfiles := make([]*animo.Profile, len(profilesKeys))
err := svc.Client.GetMulti(ctx, profilesKeys, persistedProfiles)
if err != nil {
return nil, err
}
for index, persistedProfile := range persistedProfiles {
updatedProfile := profiles[index]
if persistedProfile.Alias != updatedProfile.Alias && updatedProfile.Alias != "me" {
profile, err := findProfileByAlias(ctx, svc, updatedProfile.Alias)
if err != nil {
return nil, err
}
if profile.ID != "" {
return nil, errors.New("profile alias is not available")
}
persistedProfile.Alias = updatedProfile.Alias
}
if updatedProfile.Name == "" || updatedProfile.Email == "" {
return nil, errors.New("profile values are empty")
}
persistedProfile.Name = updatedProfile.Name
persistedProfile.Email = updatedProfile.Email
persistedProfile.Picture = updatedProfile.Picture
}
_, err = svc.Client.PutMulti(ctx, profilesKeys, persistedProfiles)
if err != nil {
return nil, err
}
return persistedProfiles, nil
}
|
package interfaces
import "github.com/t-ash0410/tdd-sample/backend/internal/api/todo/entities"
type IListUsecase interface {
Handle(result *[]entities.Task) error
}
type IAddUsecase interface {
Handle(name string, description string) error
}
|
package main
import "fmt"
type Directon int
const (
North Directon = iota
East
South
West
)
func (d Directon) String() string {
return [...]string{"North", "East", "South", "West"}[d]
}
func main() {
fmt.Println(South)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.