text stringlengths 11 4.05M |
|---|
package main
//58. 最后一个单词的长度
//给你一个字符串 s,由若干单词组成,单词前后用一些空格字符隔开。返回字符串中最后一个单词的长度。
//
//单词 是指仅由字母组成、不包含任何空格字符的最大子字符串。
//
//
//
//示例 1:
//
//输入:s = "Hello World"
//输出:5
//示例 2:
//
//输入:s = " fly me to the moon "
//输出:4
//示例 3:
//
//输入:s = "luffy is still joyboy"
//输出:6
//
//
//提示:
//
//1 <= s.length <= 104
//s 仅有英文字母和空格 ' ' 组成
//s 中至少存在一个单词
func lengthOfLastWord(s string) int {
n := len(s)
index := n - 1
for s[index] == ' ' {
index--
}
var ans int
for index >= 0 && s[index] != ' ' {
ans++
index--
}
return ans
}
|
package indexer
import (
"fmt"
"os"
"unsafe"
"github.com/edsrzf/mmap-go"
"github.com/gdbu/atoms"
"github.com/hatchify/errors"
)
// New will return a new Indexer
func New(filename string) (ip *Indexer, err error) {
var i Indexer
if i.f, err = os.OpenFile(filename, os.O_CREATE|os.O_RDWR, 0744); err != nil {
err = fmt.Errorf("error opening file \"%s\": %v", filename, err)
return
}
defer func() {
if err != nil {
i.f.Close()
}
}()
if err = i.setSize(); err != nil {
err = fmt.Errorf("error setting size: %v", err)
return
}
if i.mm, err = mmap.Map(i.f, os.O_RDWR, 0); err != nil {
err = fmt.Errorf("error initializing MMAP: %v", err)
return
}
// Set underlying index bytes as MMAP bytes
i.index = (*atoms.Uint64)(unsafe.Pointer(&i.mm[0]))
// Associate returning pointer to created Indexer
ip = &i
return
}
// Indexer manages indexes
type Indexer struct {
f *os.File
mm mmap.MMap
index *atoms.Uint64
closed atoms.Bool
}
// Get will get the current Indexer value
func (i *Indexer) Get() (value uint64) {
return i.index.Load()
}
// Next will increment the Indexer value
func (i *Indexer) Next() (next uint64) {
return i.index.Add(1) - 1
}
// Set will set the current Indexer value
func (i *Indexer) Set(value uint64) {
i.index.Store(value)
}
// Flush will force a flush
// Note: The OS handles this automatically for MMAP data. This isn't necessary for most use-cases.
// This can be used to for situations where ACID compliance needs to be 100% guaranteed
func (i *Indexer) Flush() (err error) {
return i.mm.Flush()
}
// Close will close an Indexer
func (i *Indexer) Close() (err error) {
if !i.closed.Set(true) {
return errors.ErrIsClosed
}
var errs errors.ErrorList
errs.Push(i.f.Close())
errs.Push(i.mm.Flush())
errs.Push(i.mm.Unmap())
return errs.Err()
}
func (i *Indexer) setSize() (err error) {
var fi os.FileInfo
if fi, err = i.f.Stat(); err != nil {
err = fmt.Errorf("error getting file information: %v", err)
return
}
if fi.Size() == 64 {
return
}
return i.f.Truncate(64)
}
|
package BLC
import (
"bytes"
"encoding/hex"
"fmt"
)
type PHBTXInput struct {
PHBTxHash []byte //交易的Hash
PHBVout int //存储TXOutput在Vout里面的索引
PHBSignature []byte //数字签名
PHBPublicKey []byte //公钥,钱包里面
}
func (txInput *PHBTXInput) PHBPrintInfo() {
fmt.Printf("txHash:%s\n", hex.EncodeToString(txInput.PHBTxHash))
fmt.Printf("Vout:%d\n", txInput.PHBVout)
fmt.Printf("Signature:%x\n", txInput.PHBSignature)
fmt.Printf("PublicKey:%x\n", txInput.PHBPublicKey)
}
//判断当前的消费是谁的钱
func (txInput *PHBTXInput) PHBUnLockRipemd160Hash(ripemd160Hash []byte) bool {
publicKey := PHBRipemd160Hash(txInput.PHBPublicKey)
return bytes.Compare(publicKey, ripemd160Hash) == 0
}
|
/*
Package handlers : handle MQTT message and deploy object to kubernetes.
license: Apache license 2.0
copyright: Nobuyuki Matsui <nobuyuki.matsui@gmail.com>
*/
package handlers
import (
"fmt"
"go.uber.org/zap"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/retry"
)
type secretHandler struct {
kubeClient kubernetes.Interface
logger *zap.SugaredLogger
}
func newSecretHandler(clientset *kubernetes.Clientset, logger *zap.SugaredLogger) *secretHandler {
return &secretHandler{
kubeClient: clientset,
logger: logger,
}
}
func (h *secretHandler) Apply(rawData runtime.Object) string {
secret := rawData.(*apiv1.Secret)
secretsClient := h.kubeClient.CoreV1().Secrets(apiv1.NamespaceDefault)
name := secret.ObjectMeta.Name
current, getErr := secretsClient.Get(name, metav1.GetOptions{})
if current != nil && getErr == nil {
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
current.ObjectMeta.Labels = secret.ObjectMeta.Labels
current.ObjectMeta.Annotations = secret.ObjectMeta.Annotations
current.Type = secret.Type
current.Data = secret.Data
_, err := secretsClient.Update(current)
return err
})
if err != nil {
msg := fmt.Sprintf("update secret err -- %s", name)
h.logger.Errorf("%s: %s", msg, err.Error())
return msg
}
msg := fmt.Sprintf("update secret -- %s", name)
h.logger.Infof(msg)
return msg
} else if errors.IsNotFound(getErr) {
result, err := secretsClient.Create(secret)
if err != nil {
msg := fmt.Sprintf("create secret err -- %s", name)
h.logger.Errorf("%s: %s", msg, err.Error())
return msg
}
msg := fmt.Sprintf("create secret -- %s", result.GetObjectMeta().GetName())
h.logger.Infof(msg)
return msg
} else {
msg := fmt.Sprintf("get secret err -- %s", name)
h.logger.Errorf("%s: %s", msg, getErr.Error())
return msg
}
}
func (h *secretHandler) Delete(rawData runtime.Object) string {
secret := rawData.(*apiv1.Secret)
secretsClient := h.kubeClient.CoreV1().Secrets(apiv1.NamespaceDefault)
name := secret.ObjectMeta.Name
current, getErr := secretsClient.Get(name, metav1.GetOptions{})
if current != nil && getErr == nil {
deletePolicy := metav1.DeletePropagationForeground
if err := secretsClient.Delete(name, &metav1.DeleteOptions{
PropagationPolicy: &deletePolicy,
}); err != nil {
msg := fmt.Sprintf("delete secret err -- %s", name)
h.logger.Errorf("%s: %s", msg, err.Error())
return msg
}
msg := fmt.Sprintf("delete secret -- %s", name)
h.logger.Infof(msg)
return msg
} else if errors.IsNotFound(getErr) {
msg := fmt.Sprintf("secret does not exist -- %s", name)
h.logger.Infof(msg)
return msg
} else {
msg := fmt.Sprintf("get secret err -- %s", name)
h.logger.Errorf("%s: %s", msg, getErr.Error())
return msg
}
}
|
// Copyright 2020 The VectorSQL Authors.
//
// Code is licensed under Apache License, Version 2.0.
package executors
import (
"databases"
"planners"
)
type InsertExecutor struct {
ctx *ExecutorContext
plan *planners.InsertPlan
}
func NewInsertExecutor(ctx *ExecutorContext, plan planners.IPlan) IExecutor {
return &InsertExecutor{
ctx: ctx,
plan: plan.(*planners.InsertPlan),
}
}
func (executor *InsertExecutor) Execute() (*Result, error) {
log := executor.ctx.log
plan := executor.plan
conf := executor.ctx.conf
session := executor.ctx.session
schema := session.GetDatabase()
if plan.Schema != "" {
schema = plan.Schema
}
table := plan.Table
databaseCtx := databases.NewDatabaseContext(log, conf)
storage, err := databases.GetStorage(databaseCtx, schema, table)
if err != nil {
return nil, err
}
output, err := storage.GetOutputStream(session)
if err != nil {
return nil, err
}
result := NewResult()
result.SetOutput(output)
return result, nil
}
func (executor *InsertExecutor) String() string {
return ""
}
|
package lemon
import (
"flag"
"fmt"
"io/ioutil"
"regexp"
"time"
"github.com/mitchellh/go-homedir"
"github.com/monochromegane/conflag"
)
func (c *CLI) FlagParse(args []string, skip bool) error {
style, err := c.getCommandType(args)
if err != nil {
return err
}
if style == SUBCOMMAND {
args = args[:len(args)-1]
}
return c.parse(args, skip)
}
func (c *CLI) getCommandType(args []string) (s CommandStyle, err error) {
s = ALIAS
switch {
case regexp.MustCompile(`/?xdg-open$`).MatchString(args[0]):
c.Type = OPEN
return
case regexp.MustCompile(`/?pbpaste$`).MatchString(args[0]):
c.Type = PASTE
return
case regexp.MustCompile(`/?pbcopy$`).MatchString(args[0]):
c.Type = COPY
return
}
del := func(i int) {
copy(args[i+1:], args[i+2:])
args[len(args)-1] = ""
}
s = SUBCOMMAND
for i, v := range args[1:] {
switch v {
case "open":
c.Type = OPEN
del(i)
return
case "paste":
c.Type = PASTE
del(i)
return
case "copy":
c.Type = COPY
del(i)
return
case "server":
c.Type = SERVER
del(i)
return
}
}
return s, fmt.Errorf("Unknown SubCommand\n\n" + Usage)
}
func (c *CLI) flags() *flag.FlagSet {
flags := flag.NewFlagSet("lemonade", flag.ContinueOnError)
flags.IntVar(&c.Port, "port", 2489, "TCP port number")
flags.StringVar(&c.Allow, "allow", "0.0.0.0/0,::/0", "Allow IP range")
flags.StringVar(&c.Host, "host", "localhost", "Destination host name.")
flags.BoolVar(&c.Help, "help", false, "Show this message")
flags.BoolVar(&c.TransLoopback, "trans-loopback", true, "Translate loopback address")
flags.BoolVar(&c.TransLocalfile, "trans-localfile", true, "Translate local file")
flags.StringVar(&c.LineEnding, "line-ending", "", "Convert Line Endings (CR/CRLF)")
flags.BoolVar(&c.NoFallbackMessages, "no-fallback-messages", false, "Do not show fallback messages")
flags.DurationVar(&c.Timeout, "rpc-timeout", 100*time.Millisecond, "RPC timeout")
flags.IntVar(&c.LogLevel, "log-level", 1, "Log level")
return flags
}
func (c *CLI) parse(args []string, skip bool) error {
flags := c.flags()
confPath, err := homedir.Expand("~/.config/lemonade.toml")
if err == nil && !skip {
if confArgs, err := conflag.ArgsFrom(confPath); err == nil {
flags.Parse(confArgs)
}
}
var arg string
err = flags.Parse(args[1:])
if err != nil {
return err
}
if c.Type == PASTE || c.Type == SERVER {
return nil
}
for 0 < flags.NArg() {
arg = flags.Arg(0)
err := flags.Parse(flags.Args()[1:])
if err != nil {
return err
}
}
if c.Help {
return nil
}
if arg != "" {
c.DataSource = arg
} else {
b, err := ioutil.ReadAll(c.In)
if err != nil {
return err
}
c.DataSource = string(b)
}
return nil
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rowenc_test
import (
"bytes"
"fmt"
"reflect"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/randgen"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/leanovate/gopter"
"github.com/leanovate/gopter/prop"
"github.com/stretchr/testify/require"
)
func genColumnType() gopter.Gen {
return func(genParams *gopter.GenParameters) *gopter.GenResult {
columnType := randgen.RandColumnType(genParams.Rng)
return gopter.NewGenResult(columnType, gopter.NoShrinker)
}
}
func genRandomArrayType() gopter.Gen {
return func(genParams *gopter.GenParameters) *gopter.GenResult {
arrType := randgen.RandArrayType(genParams.Rng)
return gopter.NewGenResult(arrType, gopter.NoShrinker)
}
}
func genDatum() gopter.Gen {
return func(genParams *gopter.GenParameters) *gopter.GenResult {
return gopter.NewGenResult(randgen.RandDatum(genParams.Rng, randgen.RandColumnType(genParams.Rng),
false), gopter.NoShrinker)
}
}
func genDatumWithType(columnType interface{}) gopter.Gen {
return func(genParams *gopter.GenParameters) *gopter.GenResult {
datum := randgen.RandDatum(genParams.Rng, columnType.(*types.T), false)
return gopter.NewGenResult(datum, gopter.NoShrinker)
}
}
func genArrayDatumWithType(arrTyp interface{}) gopter.Gen {
return func(genParams *gopter.GenParameters) *gopter.GenResult {
// Mark the array contents to have a 1 in 10 chance of being null.
datum := randgen.RandArray(genParams.Rng, arrTyp.(*types.T), 10)
return gopter.NewGenResult(datum, gopter.NoShrinker)
}
}
func genEncodingDirection() gopter.Gen {
return func(genParams *gopter.GenParameters) *gopter.GenResult {
return gopter.NewGenResult(
encoding.Direction((genParams.Rng.Int()%int(encoding.Descending))+1),
gopter.NoShrinker)
}
}
func hasKeyEncoding(typ *types.T) bool {
// Only some types are round-trip key encodable.
switch typ.Family() {
case types.JsonFamily, types.CollatedStringFamily, types.TupleFamily, types.DecimalFamily,
types.GeographyFamily, types.GeometryFamily:
return false
case types.ArrayFamily:
return hasKeyEncoding(typ.ArrayContents())
}
return true
}
func TestEncodeTableValue(t *testing.T) {
a := &rowenc.DatumAlloc{}
ctx := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings())
parameters := gopter.DefaultTestParameters()
parameters.MinSuccessfulTests = 10000
properties := gopter.NewProperties(parameters)
var scratch []byte
properties.Property("roundtrip", prop.ForAll(
func(d tree.Datum) string {
b, err := rowenc.EncodeTableValue(nil, 0, d, scratch)
if err != nil {
return "error: " + err.Error()
}
newD, leftoverBytes, err := rowenc.DecodeTableValue(a, d.ResolvedType(), b)
if len(leftoverBytes) > 0 {
return "Leftover bytes"
}
if err != nil {
return "error: " + err.Error()
}
if newD.Compare(ctx, d) != 0 {
return "unequal"
}
return ""
},
genDatum(),
))
properties.TestingRun(t)
}
func TestEncodeTableKey(t *testing.T) {
a := &rowenc.DatumAlloc{}
ctx := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings())
parameters := gopter.DefaultTestParameters()
parameters.MinSuccessfulTests = 10000
properties := gopter.NewProperties(parameters)
roundtripDatum := func(d tree.Datum, dir encoding.Direction) string {
b, err := rowenc.EncodeTableKey(nil, d, dir)
if err != nil {
return "error: " + err.Error()
}
newD, leftoverBytes, err := rowenc.DecodeTableKey(a, d.ResolvedType(), b, dir)
if len(leftoverBytes) > 0 {
return "Leftover bytes"
}
if err != nil {
return "error: " + err.Error()
}
if newD.Compare(ctx, d) != 0 {
return "unequal"
}
return ""
}
properties.Property("roundtrip", prop.ForAll(
roundtripDatum,
genColumnType().
SuchThat(hasKeyEncoding).
FlatMap(genDatumWithType, reflect.TypeOf((*tree.Datum)(nil)).Elem()),
genEncodingDirection(),
))
// Also run the property on arrays possibly containing NULL values.
// The random generator in the property above does not generate NULLs.
properties.Property("roundtrip-arrays", prop.ForAll(
roundtripDatum,
genRandomArrayType().
SuchThat(hasKeyEncoding).
FlatMap(genArrayDatumWithType, reflect.TypeOf((*tree.Datum)(nil)).Elem()),
genEncodingDirection(),
))
generateAndCompareDatums := func(datums []tree.Datum, dir encoding.Direction) string {
d1 := datums[0]
d2 := datums[1]
b1, err := rowenc.EncodeTableKey(nil, d1, dir)
if err != nil {
return "error: " + err.Error()
}
b2, err := rowenc.EncodeTableKey(nil, d2, dir)
if err != nil {
return "error: " + err.Error()
}
expectedCmp := d1.Compare(ctx, d2)
cmp := bytes.Compare(b1, b2)
if expectedCmp == 0 {
if cmp != 0 {
return fmt.Sprintf("equal inputs produced inequal outputs: \n%v\n%v", b1, b2)
}
// If the inputs are equal and so are the outputs, no more checking to do.
return ""
}
cmpsMatch := expectedCmp == cmp
dirIsAscending := dir == encoding.Ascending
if cmpsMatch != dirIsAscending {
return fmt.Sprintf("non-order preserving encoding: \n%v\n%v", b1, b2)
}
return ""
}
properties.Property("order-preserving", prop.ForAll(
generateAndCompareDatums,
// For each column type, generate two datums of that type.
genColumnType().
SuchThat(hasKeyEncoding).
FlatMap(
func(t interface{}) gopter.Gen {
colTyp := t.(*types.T)
return gopter.CombineGens(
genDatumWithType(colTyp),
genDatumWithType(colTyp))
}, reflect.TypeOf([]interface{}{})).
Map(func(datums []interface{}) []tree.Datum {
ret := make([]tree.Datum, len(datums))
for i, d := range datums {
ret[i] = d.(tree.Datum)
}
return ret
}),
genEncodingDirection(),
))
// Also run the property on arrays possibly containing NULL values.
// The random generator in the property above does not generate NULLs.
properties.Property("order-preserving-arrays", prop.ForAll(
generateAndCompareDatums,
// For each column type, generate two datums of that type.
genRandomArrayType().
SuchThat(hasKeyEncoding).
FlatMap(
func(t interface{}) gopter.Gen {
colTyp := t.(*types.T)
return gopter.CombineGens(
genArrayDatumWithType(colTyp),
genArrayDatumWithType(colTyp))
}, reflect.TypeOf([]interface{}{})).
Map(func(datums []interface{}) []tree.Datum {
ret := make([]tree.Datum, len(datums))
for i, d := range datums {
ret[i] = d.(tree.Datum)
}
return ret
}),
genEncodingDirection(),
))
properties.TestingRun(t)
}
func TestSkipTableKey(t *testing.T) {
parameters := gopter.DefaultTestParameters()
parameters.MinSuccessfulTests = 10000
properties := gopter.NewProperties(parameters)
properties.Property("correctness", prop.ForAll(
func(d tree.Datum, dir encoding.Direction) string {
b, err := rowenc.EncodeTableKey(nil, d, dir)
if err != nil {
return "error: " + err.Error()
}
res, err := rowenc.SkipTableKey(b)
if err != nil {
return "error: " + err.Error()
}
if len(res) != 0 {
fmt.Println(res, len(res), d.ResolvedType(), d.ResolvedType().Family())
return "expected 0 bytes remaining"
}
return ""
},
genColumnType().
SuchThat(hasKeyEncoding).FlatMap(genDatumWithType, reflect.TypeOf((*tree.Datum)(nil)).Elem()),
genEncodingDirection(),
))
properties.TestingRun(t)
}
func TestMarshalColumnValueRoundtrip(t *testing.T) {
a := &rowenc.DatumAlloc{}
ctx := tree.NewTestingEvalContext(cluster.MakeTestingClusterSettings())
parameters := gopter.DefaultTestParameters()
parameters.MinSuccessfulTests = 10000
properties := gopter.NewProperties(parameters)
properties.Property("roundtrip",
prop.ForAll(
func(typ *types.T) string {
d, ok := genDatumWithType(typ).Sample()
if !ok {
return "error generating datum"
}
datum := d.(tree.Datum)
desc := descpb.ColumnDescriptor{
Type: typ,
}
value, err := rowenc.MarshalColumnValue(&desc, datum)
if err != nil {
return "error marshaling: " + err.Error()
}
outDatum, err := rowenc.UnmarshalColumnValue(a, typ, value)
if err != nil {
return "error unmarshaling: " + err.Error()
}
if datum.Compare(ctx, outDatum) != 0 {
return fmt.Sprintf("datum didn't roundtrip.\ninput: %v\noutput: %v", datum, outDatum)
}
return ""
},
genColumnType(),
),
)
properties.TestingRun(t)
}
// TestDecodeTableKeyOutOfRangeTimestamp deliberately tests out of range timestamps
// can still be decoded from disk. See #46973.
func TestDecodeTableKeyOutOfRangeTimestamp(t *testing.T) {
for _, d := range []tree.Datum{
&tree.DTimestamp{Time: timeutil.Unix(-9223372036854775808, 0).In(time.UTC)},
&tree.DTimestampTZ{Time: timeutil.Unix(-9223372036854775808, 0).In(time.UTC)},
} {
for _, dir := range []encoding.Direction{encoding.Ascending, encoding.Descending} {
t.Run(fmt.Sprintf("%s/direction:%d", d.String(), dir), func(t *testing.T) {
encoded, err := rowenc.EncodeTableKey([]byte{}, d, dir)
require.NoError(t, err)
a := &rowenc.DatumAlloc{}
decoded, _, err := rowenc.DecodeTableKey(a, d.ResolvedType(), encoded, dir)
require.NoError(t, err)
require.Equal(t, d, decoded)
})
}
}
}
// TestDecodeTableValueOutOfRangeTimestamp deliberately tests out of range timestamps
// can still be decoded from disk. See #46973.
func TestDecodeTableValueOutOfRangeTimestamp(t *testing.T) {
for _, d := range []tree.Datum{
&tree.DTimestamp{Time: timeutil.Unix(-9223372036854775808, 0).In(time.UTC)},
&tree.DTimestampTZ{Time: timeutil.Unix(-9223372036854775808, 0).In(time.UTC)},
} {
t.Run(d.String(), func(t *testing.T) {
var b []byte
colID := descpb.ColumnID(1)
encoded, err := rowenc.EncodeTableValue(b, colID, d, []byte{})
require.NoError(t, err)
a := &rowenc.DatumAlloc{}
decoded, _, err := rowenc.DecodeTableValue(a, d.ResolvedType(), encoded)
require.NoError(t, err)
require.Equal(t, d, decoded)
})
}
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"github.com/google/gapid/core/app"
"github.com/google/gapid/core/log"
"github.com/google/gapid/gapis/service"
)
type dumpVerb struct{ DumpFlags }
func init() {
verb := &dumpVerb{}
app.AddVerb(&app.Verb{
Name: "dump",
ShortHelp: "Dump a textual representation of a .gfxtrace file",
Action: verb,
})
}
func (verb *dumpVerb) Run(ctx context.Context, flags flag.FlagSet) error {
if flags.NArg() != 1 {
app.Usage(ctx, "Exactly one gfx trace file expected, got %d", flags.NArg())
return nil
}
client, cp, err := getGapisAndLoadCapture(ctx, verb.Gapis, verb.Gapir, flags.Arg(0), verb.CaptureFileFlags)
if err != nil {
return err
}
defer client.Close()
boxedCapture, err := client.Get(ctx, cp.Path(), nil)
if err != nil {
return log.Err(ctx, err, "Failed to load the capture")
}
c := boxedCapture.(*service.Capture)
boxedCommands, err := client.Get(ctx, cp.Commands().Path(), nil)
if err != nil {
return log.Err(ctx, err, "Failed to acquire the capture's commands")
}
commands := boxedCommands.(*service.Commands).List
if verb.ShowDeviceInfo {
dev, err := json.MarshalIndent(c.Device, "", " ")
if err != nil {
return log.Err(ctx, err, "Failed to marshal capture device to JSON")
}
fmt.Printf("Device Information:\n%s\n", string(dev))
}
if verb.ShowABIInfo {
abi, err := json.MarshalIndent(c.ABI, "", " ")
if err != nil {
return log.Err(ctx, err, "Failed to marshal capture abi to JSON")
}
fmt.Printf("Trace ABI Information:\n%s\n", string(abi))
}
if verb.ShowDeviceInfo || verb.ShowABIInfo {
return nil // That's all that was requested
}
for _, c := range commands {
if err := getAndPrintCommand(ctx, client, c, verb.Observations); err != nil {
return err
}
}
return nil
}
|
// Copyright 2019 Liquidata, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package doltdb
import (
"context"
"io/ioutil"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/dolthub/dolt/go/libraries/doltcore/dbfactory"
"github.com/dolthub/dolt/go/libraries/doltcore/ref"
"github.com/dolthub/dolt/go/libraries/doltcore/schema"
"github.com/dolthub/dolt/go/libraries/utils/filesys"
"github.com/dolthub/dolt/go/libraries/utils/test"
"github.com/dolthub/dolt/go/store/hash"
"github.com/dolthub/dolt/go/store/types"
)
const (
idTag = 0
firstTag = 1
lastTag = 2
isMarriedTag = 3
ageTag = 4
emptyTag = 5
)
const testSchemaIndexName = "idx_name"
const testSchemaIndexAge = "idx_age"
func createTestSchema(t *testing.T) schema.Schema {
colColl, _ := schema.NewColCollection(
schema.NewColumn("id", idTag, types.UUIDKind, true, schema.NotNullConstraint{}),
schema.NewColumn("first", firstTag, types.StringKind, false, schema.NotNullConstraint{}),
schema.NewColumn("last", lastTag, types.StringKind, false, schema.NotNullConstraint{}),
schema.NewColumn("is_married", isMarriedTag, types.BoolKind, false),
schema.NewColumn("age", ageTag, types.UintKind, false),
schema.NewColumn("empty", emptyTag, types.IntKind, false),
)
sch := schema.SchemaFromCols(colColl)
_, err := sch.Indexes().AddIndexByColTags(testSchemaIndexName, []uint64{firstTag, lastTag}, schema.IndexProperties{IsUnique: false, Comment: ""})
require.NoError(t, err)
_, err = sch.Indexes().AddIndexByColTags(testSchemaIndexAge, []uint64{ageTag}, schema.IndexProperties{IsUnique: false, Comment: ""})
require.NoError(t, err)
return sch
}
func TestEmptyInMemoryRepoCreation(t *testing.T) {
ddb, err := LoadDoltDB(context.Background(), types.Format_7_18, InMemDoltDB)
if err != nil {
t.Fatal("Failed to load db")
}
err = ddb.WriteEmptyRepo(context.Background(), "Bill Billerson", "bigbillieb@fake.horse")
if err != nil {
t.Fatal("Unexpected error creating empty repo", err)
}
cs, _ := NewCommitSpec("master")
commit, err := ddb.Resolve(context.Background(), cs, nil)
if err != nil {
t.Fatal("Could not find commit")
}
h, err := commit.HashOf()
assert.NoError(t, err)
cs2, _ := NewCommitSpec(h.String())
_, err = ddb.Resolve(context.Background(), cs2, nil)
if err != nil {
t.Fatal("Failed to get commit by hash")
}
}
func TestLoadNonExistentLocalFSRepo(t *testing.T) {
_, err := test.ChangeToTestDir("TestLoadRepo")
if err != nil {
panic("Couldn't change the working directory to the test directory.")
}
ddb, err := LoadDoltDB(context.Background(), types.Format_7_18, LocalDirDoltDB)
assert.Nil(t, ddb, "Should return nil when loading a non-existent data dir")
assert.Error(t, err, "Should see an error here")
}
func TestLoadBadLocalFSRepo(t *testing.T) {
testDir, err := test.ChangeToTestDir("TestLoadRepo")
if err != nil {
panic("Couldn't change the working directory to the test directory.")
}
contents := []byte("not a directory")
ioutil.WriteFile(filepath.Join(testDir, dbfactory.DoltDataDir), contents, 0644)
ddb, err := LoadDoltDB(context.Background(), types.Format_7_18, LocalDirDoltDB)
assert.Nil(t, ddb, "Should return nil when loading a non-directory data dir file")
assert.Error(t, err, "Should see an error here")
}
func TestLDNoms(t *testing.T) {
testDir, err := test.ChangeToTestDir("TestLoadRepo")
if err != nil {
panic("Couldn't change the working directory to the test directory.")
}
committerName := "Bill Billerson"
committerEmail := "bigbillieb@fake.horse"
// Create an empty repo in a temp dir on the filesys
{
err := filesys.LocalFS.MkDirs(filepath.Join(testDir, dbfactory.DoltDataDir))
if err != nil {
t.Fatal("Failed to create noms directory")
}
ddb, _ := LoadDoltDB(context.Background(), types.Format_7_18, LocalDirDoltDB)
err = ddb.WriteEmptyRepo(context.Background(), committerName, committerEmail)
if err != nil {
t.Fatal("Unexpected error creating empty repo", err)
}
}
//read the empty repo back and add a new table. Write the value, but don't commit
var valHash hash.Hash
var tbl *Table
{
ddb, _ := LoadDoltDB(context.Background(), types.Format_7_18, LocalDirDoltDB)
cs, _ := NewCommitSpec("master")
commit, err := ddb.Resolve(context.Background(), cs, nil)
if err != nil {
t.Fatal("Couldn't find commit")
}
meta, err := commit.GetCommitMeta()
assert.NoError(t, err)
if meta.Name != committerName || meta.Email != committerEmail {
t.Error("Unexpected metadata")
}
root, err := commit.GetRootValue()
assert.NoError(t, err)
names, err := root.GetTableNames(context.Background())
assert.NoError(t, err)
if len(names) != 0 {
t.Fatal("There should be no tables in empty db")
}
tSchema := createTestSchema(t)
rowData, _ := createTestRowData(t, ddb.db, tSchema)
tbl, err = createTestTable(ddb.db, tSchema, rowData)
if err != nil {
t.Fatal("Failed to create test table with data")
}
root, err = root.PutTable(context.Background(), "test", tbl)
assert.NoError(t, err)
valHash, err = ddb.WriteRootValue(context.Background(), root)
assert.NoError(t, err)
}
// reopen the db and commit the value. Perform a couple checks for
{
ddb, _ := LoadDoltDB(context.Background(), types.Format_7_18, LocalDirDoltDB)
meta, err := NewCommitMeta(committerName, committerEmail, "Sample data")
if err != nil {
t.Error("Failled to commit")
}
commit, err := ddb.Commit(context.Background(), valHash, ref.NewBranchRef("master"), meta)
if err != nil {
t.Error("Failled to commit")
}
numParents, err := commit.NumParents()
assert.NoError(t, err)
if numParents != 1 {
t.Error("Unexpected ancestry")
}
root, err := commit.GetRootValue()
assert.NoError(t, err)
readTable, ok, err := root.GetTable(context.Background(), "test")
assert.NoError(t, err)
if !ok {
t.Error("Could not retrieve test table")
}
has, err := readTable.HasTheSameSchema(tbl)
assert.NoError(t, err)
if !has {
t.Error("Unexpected schema")
}
}
}
|
package steps
import (
"errors"
s "github.com/pganalyze/collector/setup/state"
)
var CheckReplicationStatus = &s.Step{
ID: "check_replication_status",
Description: "Check whether the database is a replica, which is currently unsupported by pganalyze guided setup",
Check: func(state *s.SetupState) (bool, error) {
result, err := state.QueryRunner.QueryRow("SELECT pg_is_in_recovery()")
if err != nil {
return false, err
}
isInRecovery := result.GetBool(0)
if isInRecovery {
return false, errors.New("Postgres server is a replica; this is currently not supported")
}
return true, nil
},
}
|
package heapsort
// i -> это индекс рута поддерева, а не самого дерева
// To heapify a SUBtree (arr) rooted with node i which is
// an index in arr[]. Size for 'deleting' prev root
// i -> индекс root'a (не max, a того что с ним поменяли)
func heapify(arr []int, size, i int) {
l, r := 2*i+1, 2*i+2 // дочерние элементы к нашему root
max := i
// если l или r больше size, то нет потомков
// если дочерний элемент больше, то должны поменять их с root
if l < size && arr[l] > arr[max] {
max = l
}
if r < size && arr[r] > arr[max] {
max = r
}
// дочерний элемент оказался больше
if max != i {
// меняем дочерний с root
arr[i], arr[max] = arr[max], arr[i]
// продолжаем пока root не станет листом
heapify(arr, size, max)
}
}
func HeapSort(arr []int) {
size := len(arr) // можно воспринимать уже как дерево
// Build maxheap (rearrange array)
// применяем к верхним узлам (нижним некуда опускаться)
// половина будет листами -> смысла их опускать нет
for i := size / 2 - 1; i >= 0; i-- { //-1 ->так как нумерация с 0
heapify(arr, size, i)
}
for i := size - 1; i > 0; i-- {
// 0й элемент -> максимальный. Ставим его в конец
arr[0], arr[i] = arr[i], arr[0]
// теперь наверху не макс элем -> нужно снова делать
// maxHeap из него
heapify(arr, i, 0) // массив без очередного элемента в конце
}
}
|
package cmd
import (
"fmt"
"os"
"github.com/kairen/kubeconfig-generator/pkg/client"
"github.com/spf13/cobra"
)
var ldapCmd = &cobra.Command{
Use: "ldap",
Short: "Generate the Kubernetes config for LDAP user token.",
Run: func(cmd *cobra.Command, args []string) {
c := client.NewClient(flags)
if err := c.GenerateKubeconfig(output); err != nil {
fmt.Fprintf(os.Stderr, "%v.\n", err)
}
},
}
func init() {
ldapCmd.Flags().StringVarP(&flags.DN, "dn", "", "", "Use the given DN to validate the target's LDAP server.")
ldapCmd.Flags().StringVarP(&flags.Password, "password", "", "", "Use the given password to validate the target's LDAP server.")
ldapCmd.MarkFlagRequired("dn")
ldapCmd.MarkFlagRequired("password")
}
|
package model
import "time"
type UserConfigState struct {
ArgsChangeTime time.Time
Args []string
}
func NewUserConfigState(args []string) UserConfigState {
return UserConfigState{Args: args}
}
func (ucs UserConfigState) WithArgs(args []string) UserConfigState {
ucs.Args = args
ucs.ArgsChangeTime = time.Now()
return ucs
}
|
package snmp
import (
"fmt"
"net"
"os"
"os/exec"
"strings"
"sync"
"testing"
"time"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/testutil"
"github.com/influxdata/toml"
"github.com/soniah/gosnmp"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func mockExecCommand(arg0 string, args ...string) *exec.Cmd {
args = append([]string{"-test.run=TestMockExecCommand", "--", arg0}, args...)
cmd := exec.Command(os.Args[0], args...)
cmd.Stderr = os.Stderr // so the test output shows errors
return cmd
}
func TestMockExecCommand(t *testing.T) {
var cmd []string
for _, arg := range os.Args {
if string(arg) == "--" {
cmd = []string{}
continue
}
if cmd == nil {
continue
}
cmd = append(cmd, string(arg))
}
if cmd == nil {
return
}
// will not properly handle args with spaces, but it's good enough
cmdStr := strings.Join(cmd, " ")
switch cmdStr {
case "snmptranslate -m all .1.0.0.0":
fmt.Printf("TEST::testTable\n")
case "snmptranslate -m all .1.0.0.0.1.1":
fmt.Printf("server\n")
case "snmptranslate -m all .1.0.0.0.1.1.0":
fmt.Printf("server.0\n")
case "snmptranslate -m all .1.0.0.1.1":
fmt.Printf("hostname\n")
case "snmptranslate -m all .999":
fmt.Printf(".999\n")
case "snmptranslate -m all -On TEST::testTable":
fmt.Printf(".1.0.0.0\n")
case "snmptranslate -m all -On TEST::hostname":
fmt.Printf(".1.0.0.1.1\n")
case "snmptranslate -m all -On TEST::server":
fmt.Printf(".1.0.0.0.1.1\n")
case "snmptranslate -m all -On TEST::connections":
fmt.Printf(".1.0.0.0.1.2\n")
case "snmptranslate -m all -On TEST::latency":
fmt.Printf(".1.0.0.0.1.3\n")
case "snmptranslate -m all -On TEST::server.0":
fmt.Printf(".1.0.0.0.1.1.0\n")
case "snmptranslate -m all -Td .1.0.0.0.1":
fmt.Printf(`TEST::testTableEntry
testTableEntry OBJECT-TYPE
-- FROM TEST
MAX-ACCESS not-accessible
STATUS current
INDEX { server }
::= { iso(1) 2 testOID(3) testTable(0) 1 }
`)
case "snmptable -m all -Ch -Cl -c public 127.0.0.1 .1.0.0.0":
fmt.Printf(`server connections latency
TEST::testTable: No entries
`)
default:
fmt.Fprintf(os.Stderr, "Command not mocked: `%s`\n", cmdStr)
// you get the expected output by running the missing command with `-M testdata` in the plugin directory.
os.Exit(1)
}
os.Exit(0)
}
func init() {
execCommand = mockExecCommand
}
type testSNMPConnection struct {
host string
values map[string]interface{}
}
func (tsc *testSNMPConnection) Host() string {
return tsc.host
}
func (tsc *testSNMPConnection) Get(oids []string) (*gosnmp.SnmpPacket, error) {
sp := &gosnmp.SnmpPacket{}
for _, oid := range oids {
v, ok := tsc.values[oid]
if !ok {
sp.Variables = append(sp.Variables, gosnmp.SnmpPDU{
Name: oid,
Type: gosnmp.NoSuchObject,
})
continue
}
sp.Variables = append(sp.Variables, gosnmp.SnmpPDU{
Name: oid,
Value: v,
})
}
return sp, nil
}
func (tsc *testSNMPConnection) Walk(oid string, wf gosnmp.WalkFunc) error {
for void, v := range tsc.values {
if void == oid || (len(void) > len(oid) && void[:len(oid)+1] == oid+".") {
if err := wf(gosnmp.SnmpPDU{
Name: void,
Value: v,
}); err != nil {
return err
}
}
}
return nil
}
var tsc = &testSNMPConnection{
host: "tsc",
values: map[string]interface{}{
".1.0.0.0.1.1.0": "foo",
".1.0.0.0.1.1.1": []byte("bar"),
".1.0.0.0.1.102": "bad",
".1.0.0.0.1.2.0": 1,
".1.0.0.0.1.2.1": 2,
".1.0.0.0.1.3.0": "0.123",
".1.0.0.0.1.3.1": "0.456",
".1.0.0.0.1.3.2": "9.999",
".1.0.0.0.1.4.0": 123456,
".1.0.0.1.1": "baz",
".1.0.0.1.2": 234,
".1.0.0.1.3": []byte("byte slice"),
},
}
func TestSampleConfig(t *testing.T) {
conf := struct {
Inputs struct {
Snmp []*Snmp
}
}{}
err := toml.Unmarshal([]byte("[[inputs.snmp]]\n"+(*Snmp)(nil).SampleConfig()), &conf)
assert.NoError(t, err)
s := Snmp{
Agents: []string{"127.0.0.1:161"},
Timeout: internal.Duration{Duration: 5 * time.Second},
Version: 2,
Community: "public",
MaxRepetitions: 50,
Name: "system",
Fields: []Field{
{Name: "hostname", Oid: ".1.0.0.1.1"},
{Name: "uptime", Oid: ".1.0.0.1.2"},
{Name: "load", Oid: ".1.0.0.1.3"},
{Oid: "HOST-RESOURCES-MIB::hrMemorySize"},
},
Tables: []Table{
{
Name: "remote_servers",
InheritTags: []string{"hostname"},
Fields: []Field{
{Name: "server", Oid: ".1.0.0.0.1.0", IsTag: true},
{Name: "connections", Oid: ".1.0.0.0.1.1"},
{Name: "latency", Oid: ".1.0.0.0.1.2"},
},
},
{
Oid: "HOST-RESOURCES-MIB::hrNetworkTable",
},
},
}
assert.Equal(t, s, *conf.Inputs.Snmp[0])
}
func TestFieldInit(t *testing.T) {
translations := []struct {
inputOid string
inputName string
expectedOid string
expectedName string
}{
{".1.0.0.0.1.1", "", ".1.0.0.0.1.1", "server"},
{".1.0.0.0.1.1.0", "", ".1.0.0.0.1.1.0", "server.0"},
{".999", "", ".999", ".999"},
{"TEST::server", "", ".1.0.0.0.1.1", "server"},
{"TEST::server.0", "", ".1.0.0.0.1.1.0", "server.0"},
{"TEST::server", "foo", ".1.0.0.0.1.1", "foo"},
}
for _, txl := range translations {
f := Field{Oid: txl.inputOid, Name: txl.inputName}
err := f.init()
if !assert.NoError(t, err, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName) {
continue
}
assert.Equal(t, txl.expectedOid, f.Oid, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName)
assert.Equal(t, txl.expectedName, f.Name, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName)
}
}
func TestTableInit(t *testing.T) {
tbl := Table{
Oid: ".1.0.0.0",
Fields: []Field{{Oid: ".999", Name: "foo"}},
}
err := tbl.init()
require.NoError(t, err)
assert.Equal(t, "testTable", tbl.Name)
assert.Len(t, tbl.Fields, 4)
assert.Contains(t, tbl.Fields, Field{Oid: ".999", Name: "foo", initialized: true})
assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.1", Name: "server", IsTag: true, initialized: true})
assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.2", Name: "connections", initialized: true})
assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.3", Name: "latency", initialized: true})
}
func TestSnmpInit(t *testing.T) {
s := &Snmp{
Tables: []Table{
{Oid: "TEST::testTable"},
},
Fields: []Field{
{Oid: "TEST::hostname"},
},
}
err := s.init()
require.NoError(t, err)
assert.Len(t, s.Tables[0].Fields, 3)
assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.1", Name: "server", IsTag: true, initialized: true})
assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.2", Name: "connections", initialized: true})
assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.3", Name: "latency", initialized: true})
assert.Equal(t, Field{
Oid: ".1.0.0.1.1",
Name: "hostname",
initialized: true,
}, s.Fields[0])
}
func TestGetSNMPConnection_v2(t *testing.T) {
s := &Snmp{
Timeout: internal.Duration{Duration: 3 * time.Second},
Retries: 4,
Version: 2,
Community: "foo",
}
gsc, err := s.getConnection("1.2.3.4:567")
require.NoError(t, err)
gs := gsc.(gosnmpWrapper)
assert.Equal(t, "1.2.3.4", gs.Target)
assert.EqualValues(t, 567, gs.Port)
assert.Equal(t, gosnmp.Version2c, gs.Version)
assert.Equal(t, "foo", gs.Community)
gsc, err = s.getConnection("1.2.3.4")
require.NoError(t, err)
gs = gsc.(gosnmpWrapper)
assert.Equal(t, "1.2.3.4", gs.Target)
assert.EqualValues(t, 161, gs.Port)
}
func TestGetSNMPConnection_v3(t *testing.T) {
s := &Snmp{
Version: 3,
MaxRepetitions: 20,
ContextName: "mycontext",
SecLevel: "authPriv",
SecName: "myuser",
AuthProtocol: "md5",
AuthPassword: "password123",
PrivProtocol: "des",
PrivPassword: "321drowssap",
EngineID: "myengineid",
EngineBoots: 1,
EngineTime: 2,
}
gsc, err := s.getConnection("1.2.3.4")
require.NoError(t, err)
gs := gsc.(gosnmpWrapper)
assert.Equal(t, gs.Version, gosnmp.Version3)
sp := gs.SecurityParameters.(*gosnmp.UsmSecurityParameters)
assert.Equal(t, "1.2.3.4", gsc.Host())
assert.Equal(t, 20, gs.MaxRepetitions)
assert.Equal(t, "mycontext", gs.ContextName)
assert.Equal(t, gosnmp.AuthPriv, gs.MsgFlags&gosnmp.AuthPriv)
assert.Equal(t, "myuser", sp.UserName)
assert.Equal(t, gosnmp.MD5, sp.AuthenticationProtocol)
assert.Equal(t, "password123", sp.AuthenticationPassphrase)
assert.Equal(t, gosnmp.DES, sp.PrivacyProtocol)
assert.Equal(t, "321drowssap", sp.PrivacyPassphrase)
assert.Equal(t, "myengineid", sp.AuthoritativeEngineID)
assert.EqualValues(t, 1, sp.AuthoritativeEngineBoots)
assert.EqualValues(t, 2, sp.AuthoritativeEngineTime)
}
func TestGetSNMPConnection_caching(t *testing.T) {
s := &Snmp{}
gs1, err := s.getConnection("1.2.3.4")
require.NoError(t, err)
gs2, err := s.getConnection("1.2.3.4")
require.NoError(t, err)
gs3, err := s.getConnection("1.2.3.5")
require.NoError(t, err)
assert.True(t, gs1 == gs2)
assert.False(t, gs2 == gs3)
}
func TestGosnmpWrapper_walk_retry(t *testing.T) {
srvr, err := net.ListenUDP("udp4", &net.UDPAddr{})
defer srvr.Close()
require.NoError(t, err)
reqCount := 0
// Set up a WaitGroup to wait for the server goroutine to exit and protect
// reqCount.
// Even though simultaneous access is impossible because the server will be
// blocked on ReadFrom, without this the race detector gets unhappy.
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
buf := make([]byte, 256)
for {
_, addr, err := srvr.ReadFrom(buf)
if err != nil {
return
}
reqCount++
srvr.WriteTo([]byte{'X'}, addr) // will cause decoding error
}
}()
gs := &gosnmp.GoSNMP{
Target: srvr.LocalAddr().(*net.UDPAddr).IP.String(),
Port: uint16(srvr.LocalAddr().(*net.UDPAddr).Port),
Version: gosnmp.Version2c,
Community: "public",
Timeout: time.Millisecond * 10,
Retries: 1,
}
err = gs.Connect()
require.NoError(t, err)
conn := gs.Conn
gsw := gosnmpWrapper{gs}
err = gsw.Walk(".1.0.0", func(_ gosnmp.SnmpPDU) error { return nil })
srvr.Close()
wg.Wait()
assert.Error(t, err)
assert.False(t, gs.Conn == conn)
assert.Equal(t, (gs.Retries+1)*2, reqCount)
}
func TestGosnmpWrapper_get_retry(t *testing.T) {
srvr, err := net.ListenUDP("udp4", &net.UDPAddr{})
defer srvr.Close()
require.NoError(t, err)
reqCount := 0
// Set up a WaitGroup to wait for the server goroutine to exit and protect
// reqCount.
// Even though simultaneous access is impossible because the server will be
// blocked on ReadFrom, without this the race detector gets unhappy.
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
buf := make([]byte, 256)
for {
_, addr, err := srvr.ReadFrom(buf)
if err != nil {
return
}
reqCount++
srvr.WriteTo([]byte{'X'}, addr) // will cause decoding error
}
}()
gs := &gosnmp.GoSNMP{
Target: srvr.LocalAddr().(*net.UDPAddr).IP.String(),
Port: uint16(srvr.LocalAddr().(*net.UDPAddr).Port),
Version: gosnmp.Version2c,
Community: "public",
Timeout: time.Millisecond * 10,
Retries: 1,
}
err = gs.Connect()
require.NoError(t, err)
conn := gs.Conn
gsw := gosnmpWrapper{gs}
_, err = gsw.Get([]string{".1.0.0"})
srvr.Close()
wg.Wait()
assert.Error(t, err)
assert.False(t, gs.Conn == conn)
assert.Equal(t, (gs.Retries+1)*2, reqCount)
}
func TestTableBuild_walk(t *testing.T) {
tbl := Table{
Name: "mytable",
Fields: []Field{
{
Name: "myfield1",
Oid: ".1.0.0.0.1.1",
IsTag: true,
},
{
Name: "myfield2",
Oid: ".1.0.0.0.1.2",
},
{
Name: "myfield3",
Oid: ".1.0.0.0.1.3",
Conversion: "float",
},
},
}
tb, err := tbl.Build(tsc, true)
require.NoError(t, err)
assert.Equal(t, tb.Name, "mytable")
rtr1 := RTableRow{
Tags: map[string]string{"myfield1": "foo"},
Fields: map[string]interface{}{"myfield2": 1, "myfield3": float64(0.123)},
}
rtr2 := RTableRow{
Tags: map[string]string{"myfield1": "bar"},
Fields: map[string]interface{}{"myfield2": 2, "myfield3": float64(0.456)},
}
assert.Len(t, tb.Rows, 2)
assert.Contains(t, tb.Rows, rtr1)
assert.Contains(t, tb.Rows, rtr2)
}
func TestTableBuild_noWalk(t *testing.T) {
tbl := Table{
Name: "mytable",
Fields: []Field{
{
Name: "myfield1",
Oid: ".1.0.0.1.1",
IsTag: true,
},
{
Name: "myfield2",
Oid: ".1.0.0.1.2",
},
{
Name: "myfield3",
Oid: ".1.0.0.1.2",
IsTag: true,
},
},
}
tb, err := tbl.Build(tsc, false)
require.NoError(t, err)
rtr := RTableRow{
Tags: map[string]string{"myfield1": "baz", "myfield3": "234"},
Fields: map[string]interface{}{"myfield2": 234},
}
assert.Len(t, tb.Rows, 1)
assert.Contains(t, tb.Rows, rtr)
}
func TestGather(t *testing.T) {
s := &Snmp{
Agents: []string{"TestGather"},
Name: "mytable",
Fields: []Field{
{
Name: "myfield1",
Oid: ".1.0.0.1.1",
IsTag: true,
},
{
Name: "myfield2",
Oid: ".1.0.0.1.2",
},
{
Name: "myfield3",
Oid: "1.0.0.1.1",
},
},
Tables: []Table{
{
Name: "myOtherTable",
InheritTags: []string{"myfield1"},
Fields: []Field{
{
Name: "myOtherField",
Oid: ".1.0.0.0.1.4",
},
},
},
},
connectionCache: map[string]snmpConnection{
"TestGather": tsc,
},
}
acc := &testutil.Accumulator{}
tstart := time.Now()
s.Gather(acc)
tstop := time.Now()
require.Len(t, acc.Metrics, 2)
m := acc.Metrics[0]
assert.Equal(t, "mytable", m.Measurement)
assert.Equal(t, "tsc", m.Tags["agent_host"])
assert.Equal(t, "baz", m.Tags["myfield1"])
assert.Len(t, m.Fields, 2)
assert.Equal(t, 234, m.Fields["myfield2"])
assert.Equal(t, "baz", m.Fields["myfield3"])
assert.True(t, tstart.Before(m.Time))
assert.True(t, tstop.After(m.Time))
m2 := acc.Metrics[1]
assert.Equal(t, "myOtherTable", m2.Measurement)
assert.Equal(t, "tsc", m2.Tags["agent_host"])
assert.Equal(t, "baz", m2.Tags["myfield1"])
assert.Len(t, m2.Fields, 1)
assert.Equal(t, 123456, m2.Fields["myOtherField"])
}
func TestGather_host(t *testing.T) {
s := &Snmp{
Agents: []string{"TestGather"},
Name: "mytable",
Fields: []Field{
{
Name: "host",
Oid: ".1.0.0.1.1",
IsTag: true,
},
{
Name: "myfield2",
Oid: ".1.0.0.1.2",
},
},
connectionCache: map[string]snmpConnection{
"TestGather": tsc,
},
}
acc := &testutil.Accumulator{}
s.Gather(acc)
require.Len(t, acc.Metrics, 1)
m := acc.Metrics[0]
assert.Equal(t, "baz", m.Tags["host"])
}
func TestFieldConvert(t *testing.T) {
testTable := []struct {
input interface{}
conv string
expected interface{}
}{
{[]byte("foo"), "", string("foo")},
{"0.123", "float", float64(0.123)},
{[]byte("0.123"), "float", float64(0.123)},
{float32(0.123), "float", float64(float32(0.123))},
{float64(0.123), "float", float64(0.123)},
{123, "float", float64(123)},
{123, "float(0)", float64(123)},
{123, "float(4)", float64(0.0123)},
{int8(123), "float(3)", float64(0.123)},
{int16(123), "float(3)", float64(0.123)},
{int32(123), "float(3)", float64(0.123)},
{int64(123), "float(3)", float64(0.123)},
{uint(123), "float(3)", float64(0.123)},
{uint8(123), "float(3)", float64(0.123)},
{uint16(123), "float(3)", float64(0.123)},
{uint32(123), "float(3)", float64(0.123)},
{uint64(123), "float(3)", float64(0.123)},
{"123", "int", int64(123)},
{[]byte("123"), "int", int64(123)},
{float32(12.3), "int", int64(12)},
{float64(12.3), "int", int64(12)},
{int(123), "int", int64(123)},
{int8(123), "int", int64(123)},
{int16(123), "int", int64(123)},
{int32(123), "int", int64(123)},
{int64(123), "int", int64(123)},
{uint(123), "int", int64(123)},
{uint8(123), "int", int64(123)},
{uint16(123), "int", int64(123)},
{uint32(123), "int", int64(123)},
{uint64(123), "int", int64(123)},
}
for _, tc := range testTable {
act := fieldConvert(tc.conv, tc.input)
assert.EqualValues(t, tc.expected, act, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected)
}
}
func TestError(t *testing.T) {
e := fmt.Errorf("nested error")
err := Errorf(e, "top error %d", 123)
require.Error(t, err)
ne, ok := err.(NestedError)
require.True(t, ok)
assert.Equal(t, e, ne.NestedErr)
assert.Contains(t, err.Error(), "top error 123")
assert.Contains(t, err.Error(), "nested error")
}
|
// +build darwin
package main
// commands used by the compilation process might have different file names on macOS than those used on Linux.
var commands = map[string]string{
"ar": "llvm-ar",
"clang": "clang-7",
"ld.lld": "ld.lld-7",
"wasm-ld": "wasm-ld-7",
}
|
package stormpathweb
import (
"fmt"
"html/template"
"net/http"
"net/http/httptest"
"net/url"
"os/exec"
"strings"
"testing"
"github.com/jarias/stormpath-sdk-go"
)
var mainTemplate = `
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
<title>Example</title>
<!-- Bootstrap -->
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css" integrity="sha384-1q8mTJOASx8j1Au+a5WDVnPi2lkFfwwEAa8hDDdjZlpLegxhjVME1fgjWPGmkzs7"
crossorigin="anonymous">
<link rel="stylesheet" href="/stormpath/assets/css/stormpath.css">
<!-- HTML5 shim and Respond.js for IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body>
<div class="container">
{{ if .account }}
<h1>Hello {{ .account.FullName }}</h1>
<h4>Provider: {{ .account.ProviderData.ProviderID }}</h4>
<form id="logoutForm" action="{{ .logoutUri }}" method="post">
<input type="submit" class="btn btn-danger" value="Logout"/>
</form>
{{ else }}
<h1>Hello World</h1>
<a href="{{ .loginUri }}" class="btn btn-primary">Login</a>
{{ end }}
</div>
</body>
</html>
`
func GetTestServer() (*httptest.Server, string) {
mux := http.NewServeMux()
stormpathMiddleware := NewStormpathMiddleware(mux, nil)
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
account := stormpathMiddleware.GetAuthenticatedAccount(w, r)
w.Header().Add("Content-Type", "text/html")
template, err := template.New("main").Parse(mainTemplate)
if err != nil {
fmt.Fprint(w, err)
return
}
model := map[string]interface{}{
"account": account,
"loginUri": Config.LoginURI,
"logoutUri": Config.LogoutURI,
}
if account != nil {
model["name"] = account.GivenName
}
template.Execute(w, model)
}))
return httptest.NewServer(stormpathMiddleware), stormpathMiddleware.Application.Href
}
func BenchmarkGETLoginHTML(b *testing.B) {
ts, _ := GetTestServer()
defer ts.Close()
for i := 0; i < b.N; i++ {
req, _ := http.NewRequest(http.MethodGet, ts.URL+"/login", nil)
req.Header.Set(stormpath.AcceptHeader, stormpath.TextHTML)
_, err := http.DefaultClient.Do(req)
if err != nil {
panic(err)
}
}
}
func BenchmarkGETLoginJSON(b *testing.B) {
ts, _ := GetTestServer()
defer ts.Close()
for i := 0; i < b.N; i++ {
req, _ := http.NewRequest(http.MethodGet, ts.URL+"/login", nil)
req.Header.Set(stormpath.AcceptHeader, stormpath.ApplicationJSON)
_, err := http.DefaultClient.Do(req)
if err != nil {
panic(err)
}
}
}
func TestTCK(t *testing.T) {
ts, applicationHref := GetTestServer()
defer ts.Close()
url, _ := url.Parse(ts.URL)
cmd := exec.Command("./tck.sh", url.Host[strings.Index(url.Host, ":")+1:], applicationHref)
err := cmd.Start()
if err != nil {
t.Errorf("Failed to start tck.sh script: %s", err)
}
err = cmd.Wait()
if err != nil {
t.Errorf("tck.sh fail: %s", err)
}
}
|
package web
import "github.com/junhwong/goost/security"
type AuthenticationFilter interface {
// 处理认证
Filter(Context) (security.Authentication, error)
}
|
package bidscube
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strconv"
"github.com/buger/jsonparser"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/adapters"
"github.com/prebid/prebid-server/config"
"github.com/prebid/prebid-server/errortypes"
"github.com/prebid/prebid-server/openrtb_ext"
)
type adapter struct {
endpoint string
}
func (a *adapter) MakeRequests(request *openrtb2.BidRequest, _ *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) {
headers := http.Header{}
headers.Add("Content-Type", "application/json;charset=utf-8")
headers.Add("Accept", "application/json")
impressions := request.Imp
result := make([]*adapters.RequestData, 0, len(impressions))
var errs []error
for _, impression := range impressions {
var impExt map[string]json.RawMessage
if err := json.Unmarshal(impression.Ext, &impExt); err != nil {
errs = append(errs, err)
continue
}
bidderExt, bidderExtExists := impExt["bidder"]
if !bidderExtExists || len(bidderExt) == 0 {
errs = append(errs, errors.New("bidder parameters required"))
continue
}
impression.Ext = bidderExt
request.Imp = []openrtb2.Imp{impression}
body, err := json.Marshal(request)
if err != nil {
errs = append(errs, err)
continue
}
result = append(result, &adapters.RequestData{
Method: "POST",
Uri: a.endpoint,
Body: body,
Headers: headers,
})
}
request.Imp = impressions
return result, errs
}
func (a *adapter) MakeBids(request *openrtb2.BidRequest, _ *adapters.RequestData, responseData *adapters.ResponseData) (*adapters.BidderResponse, []error) {
var errs []error
switch responseData.StatusCode {
case http.StatusNoContent:
return nil, nil
case http.StatusBadRequest:
return nil, []error{&errortypes.BadInput{
Message: "unexpected status code: " + strconv.Itoa(responseData.StatusCode),
}}
case http.StatusOK:
break
default:
return nil, []error{&errortypes.BadServerResponse{
Message: "unexpected status code: " + strconv.Itoa(responseData.StatusCode),
}}
}
var bidResponse openrtb2.BidResponse
err := json.Unmarshal(responseData.Body, &bidResponse)
if err != nil {
return nil, []error{&errortypes.BadServerResponse{
Message: err.Error(),
}}
}
response := adapters.NewBidderResponseWithBidsCapacity(len(request.Imp))
for _, seatBid := range bidResponse.SeatBid {
for i := range seatBid.Bid {
bidType, err := jsonparser.GetString(seatBid.Bid[i].Ext, "prebid", "type")
if err != nil {
errs = append(errs, fmt.Errorf("unable to read bid.ext.prebid.type: %v", err))
continue
}
response.Bids = append(response.Bids, &adapters.TypedBid{
Bid: &seatBid.Bid[i],
BidType: getMediaTypeForImp(bidType),
})
}
}
return response, errs
}
func getMediaTypeForImp(bidType string) openrtb_ext.BidType {
switch bidType {
case "banner":
return openrtb_ext.BidTypeBanner
case "video":
return openrtb_ext.BidTypeVideo
case "native":
return openrtb_ext.BidTypeNative
}
return openrtb_ext.BidTypeBanner
}
// Builder builds a new instance of the BidsCube adapter for the given bidder with the given config.
func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {
bidder := &adapter{
endpoint: config.Endpoint,
}
return bidder, nil
}
|
package pkg
import (
"github.com/gin-gonic/gin"
"snippetBox-microservice/news/pkg/domain"
)
func SetupRoutes(controller domain.NewsController) *gin.Engine {
r := gin.New()
r.Use(gin.Logger(), gin.Recovery(), SecureHeaders())
r.GET("/", controller.Home)
r.GET("/news/:id", controller.ShowNews)
return r
}
|
package main
import (
"bufio"
"fmt"
"os"
"strconv"
)
func main() {
scanner := bufio.NewScanner(os.Stdin)
var input string
for input != "EXIT" {
fmt.Print("What integer would you like to convert to a hex value? (Type 'EXIT' to leave)")
scanner.Scan()
input = scanner.Text()
if input != "EXIT" {
integer, error := strconv.Atoi(input)
if error != nil {
fmt.Print("Please enter an integer...\n")
} else {
hex := fmt.Sprintf("%x", integer)
fmt.Printf("Hex conversion of '%d' is '%s'\n", integer, hex)
}
}
}
}
|
// Copyright 2020 Ye Zi Jie. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: FishGoddess
// Email: fishgoddess@qq.com
// Created at 2020/03/02 20:51:29
package main
import (
"testing"
//"time"
"github.com/FishGoddess/logit"
//"github.com/FishGoddess/logit/files"
//"github.com/kataras/golog"
//"github.com/sirupsen/logrus"
//"go.uber.org/zap"
//"go.uber.org/zap/zapcore"
)
const (
// 时间格式化字符串
timeFormat = "2006-01-02 15:04:05"
)
type nopWriter struct{}
func (w *nopWriter) Write(p []byte) (n int, err error) {
return 0, nil
}
/*
BenchmarkLogitLogger-8 6429907 1855 ns/op 384 B/op 8 allocs/op
BenchmarkGologLogger-8 3361483 3589 ns/op 712 B/op 24 allocs/op
BenchmarkZapLogger-8 2971119 4066 ns/op 448 B/op 16 allocs/op
BenchmarkLogrusLogger-8 1553419 7869 ns/op 1633 B/op 52 allocs/op
***************************************************************************************************************
BenchmarkLogitFile-8 1000000 10604 ns/op 384 B/op 8 allocs/op
BenchmarkGologFile-8 600966 20385 ns/op 712 B/op 24 allocs/op
BenchmarkZapFile-8 828692 13586 ns/op 448 B/op 16 allocs/op
BenchmarkLogrusFile-8 632258 16950 ns/op 1633 B/op 52 allocs/op
*/
// 测试 logit 日志记录器的速度
func BenchmarkLogitLogger(b *testing.B) {
// 测试用的日志记录器
logger := logit.NewLogger(logit.DebugLevel, logit.NewStandardHandler(&nopWriter{}, logit.TextEncoder(), timeFormat))
// 测试用的日志任务
logTask := func() {
logger.Debug("debug...")
logger.Info("info...")
logger.Warn("warning...")
logger.Error("error...")
}
// 开始性能测试
b.ReportAllocs()
b.StartTimer()
for i := 0; i < b.N; i++ {
logTask()
}
}
// 测试 golog 日志记录器的速度
//func BenchmarkGologLogger(b *testing.B) {
//
// logger := golog.New()
// logger.SetOutput(&nopWriter{})
// logger.SetLevel("debug")
// logger.SetTimeFormat(timeFormat)
//
// // 测试用的日志任务
// logTask := func() {
// logger.Debug("debug...")
// logger.Info("info...")
// logger.Warn("warning...")
// logger.Error("error...")
// }
//
// // 开始性能测试
// b.ReportAllocs()
// b.StartTimer()
//
// for i := 0; i < b.N; i++ {
// logTask()
// }
//}
// 测试 zap 日志记录器的速度
//func BenchmarkZapLogger(b *testing.B) {
//
// // 测试用的日志记录器
// config := zap.NewProductionEncoderConfig()
// config.EncodeTime = func(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
// enc.AppendString(t.Format(timeFormat))
// }
// encoder := zapcore.NewConsoleEncoder(config)
// nopWriteSyncer := zapcore.AddSync(&nopWriter{})
// core := zapcore.NewCore(encoder, nopWriteSyncer, zapcore.DebugLevel)
// logger := zap.New(core)
// defer logger.Sync()
//
// // 测试用的日志任务
// logTask := func() {
// logger.Debug("debug...")
// logger.Info("info...")
// logger.Warn("warning...")
// logger.Error("error...")
// }
//
// // 开始性能测试
// b.ReportAllocs()
// b.StartTimer()
//
// for i := 0; i < b.N; i++ {
// logTask()
// }
//}
// 测试 logrus 日志记录器的速度
//func BenchmarkLogrusLogger(b *testing.B) {
//
// logger := logrus.New()
// logger.SetOutput(&nopWriter{})
// logger.SetLevel(logrus.DebugLevel)
// logger.SetFormatter(&logrus.TextFormatter{
// TimestampFormat: timeFormat,
// })
//
// // 测试用的日志任务
// logTask := func() {
// logger.Debug("debug...")
// logger.Info("info...")
// logger.Warn("warning...")
// logger.Error("error...")
// }
//
// b.ReportAllocs()
// b.StartTimer()
//
// for i := 0; i < b.N; i++ {
// logTask()
// }
//}
// ******************************************************
//// 测试 logit 文件日志记录器的速度
//func BenchmarkLogitFile(b *testing.B) {
//
// file, _ := files.CreateFileOf("D:/BenchmarkLogitFile.log")
// logger := logit.NewLogger(logit.DebugLevel, logit.NewStandardHandler(file, logit.TextEncoder(), timeFormat))
//
// // 测试用的日志任务
// logTask := func() {
// logger.Debug("debug...")
// logger.Info("info...")
// logger.Warn("warning...")
// logger.Error("error...")
// }
//
// b.ReportAllocs()
// b.StartTimer()
//
// for i := 0; i < b.N; i++ {
// logTask()
// }
//}
//
//// 测试 golog 文件日志记录器的速度
//func BenchmarkGologFile(b *testing.B) {
//
// logger := golog.New()
// file, _ := files.CreateFileOf("D:/BenchmarkGologFile.log")
// logger.SetOutput(file)
// logger.SetLevel("debug")
// logger.SetTimeFormat(timeFormat)
//
// // 测试用的日志任务
// logTask := func() {
// logger.Debug("debug...")
// logger.Info("info...")
// logger.Warn("warning...")
// logger.Error("error...")
// }
//
// // 开始性能测试
// b.ReportAllocs()
// b.StartTimer()
//
// for i := 0; i < b.N; i++ {
// logTask()
// }
//}
//
//// 测试 zap 文件日志记录器的速度
//func BenchmarkZapFile(b *testing.B) {
//
// // 测试用的日志记录器
// config := zap.NewProductionEncoderConfig()
// config.EncodeTime = func(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
// enc.AppendString(t.Format(timeFormat))
// }
// encoder := zapcore.NewConsoleEncoder(config)
// file, _ := files.CreateFileOf("D:/BenchmarkZapFile.log")
// writeSyncer := zapcore.AddSync(file)
// core := zapcore.NewCore(encoder, writeSyncer, zapcore.DebugLevel)
// logger := zap.New(core)
// defer logger.Sync()
//
// // 测试用的日志任务
// logTask := func() {
// logger.Debug("debug...")
// logger.Info("info...")
// logger.Warn("warning...")
// logger.Error("error...")
// }
//
// // 开始性能测试
// b.ReportAllocs()
// b.StartTimer()
//
// for i := 0; i < b.N; i++ {
// logTask()
// }
//}
//
//// 测试 logrus 文件日志记录器的速度
//func BenchmarkLogrusFile(b *testing.B) {
//
// logger := logrus.New()
// file, _ := files.CreateFileOf("D:/BenchmarkLogrusFile.log")
// logger.SetOutput(file)
// logger.SetLevel(logrus.DebugLevel)
// logger.SetFormatter(&logrus.TextFormatter{
// TimestampFormat: timeFormat,
// })
//
// // 测试用的日志任务
// logTask := func() {
// logger.Debug("debug...")
// logger.Info("info...")
// logger.Warn("warning...")
// logger.Error("error...")
// }
//
// b.ReportAllocs()
// b.StartTimer()
//
// for i := 0; i < b.N; i++ {
// logTask()
// }
//}
|
package plex
import (
"net/http"
)
type StatusService service
// TranscodeSessionsResponse is the result for transcode session endpoint /transcode/sessions
type TranscodeSessionsResponse struct {
Children []struct {
ElementType string `json:"_elementType"`
AudioChannels int `json:"audioChannels"`
AudioCodec string `json:"audioCodec"`
AudioDecision string `json:"audioDecision"`
Container string `json:"container"`
Context string `json:"context"`
Duration int `json:"duration"`
Height int `json:"height"`
Key string `json:"key"`
Progress float64 `json:"progress"`
Protocol string `json:"protocol"`
Remaining int `json:"remaining"`
Speed float64 `json:"speed"`
Throttled bool `json:"throttled"`
VideoCodec string `json:"videoCodec"`
VideoDecision string `json:"videoDecision"`
Width int `json:"width"`
} `json:"_children"`
ElementType string `json:"_elementType"`
}
// CurrentSessionsVideo are current video sessions
type CurrentSessionsVideo struct {
AddedAt string `json:"addedAt" xml:"addedAt,attr"`
Art string `json:"art" xml:"art,attr"`
ChapterSource string `json:"chapterSource" xml:"chapterSource,attr"`
ContentRating string `json:"contentRating" xml:"contentRating,attr"`
Duration string `json:"duration" xml:"duration,attr"`
GUID string `json:"guid" xml:"guid,attr"`
Key string `json:"key" xml:"key,attr"`
LibrarySectionID string `json:"librarySectionID" xml:"librarySectionID,attr"`
OriginallyAvailableAt string `json:"originallyAvailableAt" xml:"originallyAvailableAt,attr"`
PrimaryExtraKey string `json:"primaryExtraKey" xml:"primaryExtraKey,attr"`
Rating string `json:"rating" xml:"rating,attr"`
RatingKey string `json:"ratingKey" xml:"ratingKey,attr"`
SessionKey string `json:"sessionKey" xml:"sessionKey,attr"`
Studio string `json:"studio" xml:"studio,attr"`
Summary string `json:"summary" xml:"summary,attr"`
Tagline string `json:"tagline" xml:"tagline,attr"`
Thumb string `json:"thumb" xml:"thumb,attr"`
Title string `json:"title" xml:"title,attr"`
TitleSort string `json:"titleSort" xml:"titleSort,attr"`
Type string `json:"type" xml:"type,attr"`
UpdatedAt string `json:"updatedAt" xml:"updatedAt,attr"`
ViewOffset string `json:"viewOffset" xml:"viewOffset,attr"`
Year string `json:"year" xml:"year,attr"`
Media []struct {
AspectRatio string `json:"aspectRatio" xml:"aspectRatio,attr"`
AudioChannels string `json:"audioChannels" xml:"audioChannels,attr"`
AudioCodec string `json:"audioCodec" xml:"audioCodec,attr"`
AudioProfile string `json:"audioProfile" xml:"audioProfile,attr"`
Bitrate string `json:"bitrate" xml:"bitrate,attr"`
Container string `json:"container" xml:"container,attr"`
Duration string `json:"duration" xml:"duration,attr"`
Has64bitOffsets string `json:"has64bitOffsets" xml:"has64bitOffsets,attr"`
Height string `json:"height" xml:"height,attr"`
ID string `json:"id" xml:"id,attr"`
OptimizedForStreaming string `json:"optimizedForStreaming" xml:"optimizedForStreaming,attr"`
VideoCodec string `json:"videoCodec" xml:"videoCodec,attr"`
VideoFrameRate string `json:"videoFrameRate" xml:"videoFrameRate,attr"`
VideoProfile string `json:"videoProfile" xml:"videoProfile,attr"`
VideoResolution string `json:"videoResolution" xml:"videoResolution,attr"`
Width string `json:"width" xml:"width,attr"`
Part []struct {
AudioProfile string `json:"audioProfile" xml:"audioProfile,attr"`
Container string `json:"container" xml:"container,attr"`
Duration string `json:"duration" xml:"duration,attr"`
File string `json:"file" xml:"file,attr"`
Has64bitOffsets string `json:"has64bitOffsets" xml:"has64bitOffsets,attr"`
ID string `json:"id" xml:"id,attr"`
Indexes string `json:"indexes" xml:"indexes,attr"`
Key string `json:"key" xml:"key,attr"`
OptimizedForStreaming string `json:"optimizedForStreaming" xml:"optimizedForStreaming,attr"`
Size string `json:"size" xml:"size,attr"`
VideoProfile string `json:"videoProfile" xml:"videoProfile,attr"`
Stream []struct {
BitDepth string `json:"bitDepth" xml:"bitDepth,attr"`
Bitrate string `json:"bitrate" xml:"bitrate,attr"`
Cabac string `json:"cabac" xml:"cabac,attr"`
ChromaSubsampling string `json:"chromaSubsampling" xml:"chromaSubsampling,attr"`
Codec string `json:"codec" xml:"codec,attr"`
CodecID string `json:"codecID" xml:"codecID,attr"`
ColorRange string `json:"colorRange" xml:"colorRange,attr"`
ColorSpace string `json:"colorSpace" xml:"colorSpace,attr"`
Default string `json:"default" xml:"default,attr"`
Duration string `json:"duration" xml:"duration,attr"`
FrameRate string `json:"frameRate" xml:"frameRate,attr"`
FrameRateMode string `json:"frameRateMode" xml:"frameRateMode,attr"`
HasScalingMatrix string `json:"hasScalingMatrix" xml:"hasScalingMatrix,attr"`
Height string `json:"height" xml:"height,attr"`
ID string `json:"id" xml:"id,attr"`
Index string `json:"index" xml:"index,attr"`
Level string `json:"level" xml:"level,attr"`
PixelFormat string `json:"pixelFormat" xml:"pixelFormat,attr"`
Profile string `json:"profile" xml:"profile,attr"`
RefFrames string `json:"refFrames" xml:"refFrames,attr"`
ScanType string `json:"scanType" xml:"scanType,attr"`
StreamIdentifier string `json:"streamIdentifier" xml:"streamIdentifier,attr"`
StreamType string `json:"streamType" xml:"streamType,attr"`
Width string `json:"width" xml:"width,attr"`
AudioChannelLayout string `json:"audioChannelLayout" xml:"audioChannelLayout,attr"`
BitrateMode string `json:"bitrateMode" xml:"bitrateMode,attr"`
Channels string `json:"channels" xml:"channels,attr"`
Language string `json:"language" xml:"language,attr"`
LanguageCode string `json:"languageCode" xml:"languageCode,attr"`
SamplingRate string `json:"samplingRate" xml:"samplingRate,attr"`
Selected string `json:"selected" xml:"selected,attr"`
Format string `json:"format" xml:"format,attr"`
Key string `json:"key" xml:"key,attr"`
} `json:"Stream" xml:"Stream"`
} `json:"Part" xml:"Part"`
} `json:"Media" xml:"Media"`
Genre []struct {
Count string `json:"count" xml:"count,attr"`
ID string `json:"id" xml:"id,attr"`
Tag string `json:"tag" xml:"tag,attr"`
} `json:"Genre" xml:"Genre"`
Writer []struct {
ID string `json:"id" xml:"id,attr"`
Tag string `json:"tag" xml:"tag,attr"`
Count string `json:"count" xml:"count,attr"`
} `json:"Writer" xml:"Writer"`
Director []struct {
Count string `json:"count" xml:"count,attr"`
ID string `json:"id" xml:"id,attr"`
Tag string `json:"tag" xml:"tag,attr"`
} `json:"Director" xml:"Director"`
Producer []struct {
Count string `json:"count" xml:"count,attr"`
ID string `json:"id" xml:"id,attr"`
Tag string `json:"tag" xml:"tag,attr"`
} `json:"Producer" xml:"Producer"`
Country []struct {
Count string `json:"count" xml:"count,attr"`
ID string `json:"id" xml:"id,attr"`
Tag string `json:"tag" xml:"tag,attr"`
} `json:"Country" xml:"Country"`
Role []struct {
Count string `json:"count" xml:"count,attr"`
ID string `json:"id" xml:"id,attr"`
Role string `json:"role" xml:"role,attr"`
Tag string `json:"tag" xml:"tag,attr"`
} `json:"Role" xml:"Role"`
Similar []struct {
Count string `json:"count"`
Filter string `json:"filter"`
ID string `json:"id"`
Tag string `json:"tag"`
} `json:"Similar"`
Collection struct {
Count string `json:"count" xml:"count,attr"`
ID string `json:"id" xml:"id,attr"`
Tag string `json:"tag" xml:"tag,attr"`
} `json:"Collection" xml:"Collection"`
Label struct {
ID string `json:"id" xml:"id,attr"`
Tag string `json:"tag" xml:"tag,attr"`
} `json:"Label" xml:"Label"`
Field struct {
Locked string `json:"locked" xml:"locked,attr"`
Name string `json:"name" xml:"name,attr"`
} `json:"Field" xml:"Field"`
User struct {
ID string `json:"id" xml:"id,attr"`
Title string `json:"title" xml:"title,attr"`
Thumb string `json:"thumb" xml:"thumb,attr"`
} `json:"User" xml:"User"`
Player struct {
Address string `json:"address" xml:"address,attr"`
Device string `json:"device" xml:"device,attr"`
MachineIdentifier string `json:"machineIdentifier" xml:"machineIdentifier,attr"`
Model string `json:"model" xml:"model,attr"`
Platform string `json:"platform" xml:"platform,attr"`
PlatformVersion string `json:"platformVersion" xml:"platformVersion,attr"`
Product string `json:"product" xml:"product,attr"`
Profile string `json:"profile" xml:"profile,attr"`
State string `json:"state" xml:"state,attr"`
Title string `json:"title" xml:"title,attr"`
Vendor string `json:"vendor" xml:"vendor,attr"`
Version string `json:"version" xml:"version,attr"`
} `json:"Player" xml:"Player"`
GrandparentArt string `json:"grandparentArt" xml:"grandparentArt,attr"`
GrandparentKey string `json:"grandparentKey" xml:"grandparentKey,attr"`
GrandparentRatingKey string `json:"grandparentRatingKey" xml:"grandparentRatingKey,attr"`
GrandparentTheme string `json:"grandparentTheme" xml:"grandparentTheme,attr"`
GrandparentThumb string `json:"grandparentThumb" xml:"grandparentThumb,attr"`
GrandparentTitle string `json:"grandparentTitle" xml:"grandparentTitle,attr"`
Index string `json:"index" xml:"index,attr"`
LastViewedAt string `json:"lastViewedAt" xml:"lastViewedAt,attr"`
ParentIndex string `json:"parentIndex" xml:"parentIndex,attr"`
ParentKey string `json:"parentKey" xml:"parentKey,attr"`
ParentRatingKey string `json:"parentRatingKey" xml:"parentRatingKey,attr"`
ParentThumb string `json:"parentThumb" xml:"parentThumb,attr"`
ViewCount string `json:"viewCount" xml:"viewCount,attr"`
Session struct {
ID string `json:"id" xml:"id,attr"`
Bandwidth int `json:"bandwidth" xml:"bandwidth,attr"`
Location string `json:"location" xml:"location,attr"`
} `json:"Session" xml:"Session"`
TranscodeSession struct {
Key string `json:"key" xml:"key,attr"`
Throttled string `json:"throttled" xml:"throttled,attr"`
Progress string `json:"progress" xml:"progress,attr"`
Speed string `json:"speed" xml:"speed,attr"`
Duration string `json:"duration" xml:"duration,attr"`
Remaining string `json:"remaining" xml:"remaining,attr"`
Context string `json:"context" xml:"context,attr"`
VideoDecision string `json:"videoDecision" xml:"videoDecision,attr"`
AudioDecision string `json:"audioDecision" xml:"audioDecision,attr"`
Protocol string `json:"protocol" xml:"protocol,attr"`
Container string `json:"container" xml:"container,attr"`
VideoCodec string `json:"videoCodec" xml:"videoCodec,attr"`
AudioCodec string `json:"audioCodec" xml:"audioCodec,attr"`
AudioChannels string `json:"audioChannels" xml:"audioChannels,attr"`
Width string `json:"width" xml:"width,attr"`
Height string `json:"height" xml:"height,attr"`
} `json:"TranscodeSession" xml:"TranscodeSession"`
}
// CurrentSessions is xml because plex returns a dynamic type (string or number) for the duration field
type CurrentSessions struct {
MediaContainer struct {
Size int `json:"size" xml:"size,attr"`
Video []*CurrentSessionsVideo `json:"Video" xml:"Video"`
Track []struct {
AddedAt string `xml:"addedAt,attr"`
Art string `xml:"art,attr"`
ChapterSource string `xml:"chapterSource,attr"`
Duration string `xml:"duration,attr"`
GrandparentArt string `xml:"grandparentArt,attr"`
GrandparentKey string `xml:"grandparentKey,attr"`
GrandparentRatingKey string `xml:"grandparentRatingKey,attr"`
GrandparentThumb string `xml:"grandparentThumb,attr"`
GrandparentTitle string `xml:"grandparentTitle,attr"`
GUID string `xml:"guid,attr"`
Index string `xml:"index,attr"`
Key string `xml:"key,attr"`
LastViewedAt string `xml:"lastViewedAt,attr"`
LibrarySectionID string `xml:"librarySectionID,attr"`
ParentIndex string `xml:"parentIndex,attr"`
ParentKey string `xml:"parentKey,attr"`
ParentRatingKey string `xml:"parentRatingKey,attr"`
ParentTitle string `xml:"parentTitle,attr"`
RatingKey string `xml:"ratingKey,attr"`
SessionKey string `xml:"sessionKey,attr"`
Summary string `xml:"summary,attr"`
Tagline string `xml:"tagline,attr"`
Thumb string `xml:"thumb,attr"`
Title string `xml:"title,attr"`
Type string `xml:"type,attr"`
UpdatedAt string `xml:"updatedAt,attr"`
ViewCount int `xml:"viewCount,attr"`
ViewOffset int `xml:"viewOffset,attr"`
Media struct {
AudioChannels string `xml:"audioChannels,attr"`
AudioCodec string `xml:"audioCodec,attr"`
Bitrate string `xml:"bitrate,attr"`
Container string `xml:"container,attr"`
Duration string `xml:"duration,attr"`
ID string `xml:"id,attr"`
Part struct {
Container string `xml:"container,attr"`
Duration string `xml:"duration,attr"`
File string `xml:"file,attr"`
ID string `xml:"id,attr"`
Key string `xml:"key,attr"`
Size string `xml:"size,attr"`
Stream []struct {
AudioChannelLayout string `xml:"audioChannelLayout,attr"`
Bitrate string `xml:"bitrate,attr"`
BitrateMode string `xml:"bitrateMode,attr"`
Channels string `xml:"channels,attr"`
Codec string `xml:"codec,attr"`
Duration string `xml:"duration,attr"`
ID string `xml:"id,attr"`
Index string `xml:"index,attr"`
SamplingRate string `xml:"samplingRate,attr"`
Selected string `xml:"selected,attr"`
StreamType string `xml:"streamType,attr"`
} `xml:"Stream"`
} `xml:"Part"`
} `xml:"Media"`
User struct {
ID int `xml:"id,attr"`
Title string `xml:"title,attr"`
Thumb string `xml:"thumb,attr"`
} `xml:"User"`
Player struct {
Address string `xml:"address,attr"`
Device string `xml:"device,attr"`
MachineIdentifier string `xml:"machineIdentifier,attr"`
Model string `xml:"model,attr"`
Platform string `xml:"platform,attr"`
PlatformVersion string `xml:"platformVersion,attr"`
Product string `xml:"product,attr"`
Profile string `xml:"profile,attr"`
State string `xml:"state,attr"`
Title string `xml:"title,attr"`
Vendor string `xml:"vendor,attr"`
Version string `xml:"version,attr"`
} `xml:"Player"`
TranscodeSession struct {
Key string `xml:"key,attr"`
Throttled string `xml:"throttled,attr"`
Progress string `xml:"progress,attr"`
Speed string `xml:"speed,attr"`
Duration string `xml:"duration,attr"`
Remaining string `xml:"remaining,attr"`
Context string `xml:"context,attr"`
VideoDecision string `xml:"videoDecision,attr"`
AudioDecision string `xml:"audioDecision,attr"`
Protocol string `xml:"protocol,attr"`
Container string `xml:"container,attr"`
VideoCodec string `xml:"videoCodec,attr"`
AudioCodec string `xml:"audioCodec,attr"`
AudioChannels string `xml:"audioChannels,attr"`
Width string `xml:"width,attr"`
Height string `xml:"height,attr"`
} `xml:"TranscodeSession"`
} `json:"Track" xml:"Track"`
} `json:"MediaContainer"`
}
type History struct {
MediaContainer struct {
Size int `json:"size"`
Video []*HistoricView `json:"Video"`
Playlist []*HistoricView `json:"Playlist"`
Track []*HistoricView `json:"Track"`
}
}
type HistoricView struct {
Key string `json:"key"`
ParentKey string `json:"parentKey"`
GrandparentKey string `json:"grandparentKey"`
Title string `json:"title"`
GrandparentTitle string `json:"grandparentTitle"`
Type string `json:"type"`
Thumb string `json:"thumb"`
ParentThumb string `json:"parentThumb"`
GrandparentThumb string `json:"grandparentThumb"`
GrandparentArt string `json:"grandparentArt"`
Index int `json:"index"`
ParentIndex int `json:"parentIndex"`
ViewedAt int `json:"viewedAt"`
User struct {
ID string `json:"id"`
Thumb string `json:"thumb"`
Title string `json:"title"`
} `json:"User"`
}
func (s *StatusService) ListCurrentSessions() (*CurrentSessions, *http.Response, error) {
req, err := s.client.NewRequest("GET", "status/sessions", nil)
if err != nil {
return nil, nil, err
}
currentSessions := new(CurrentSessions)
resp, err := s.client.Do(req, ¤tSessions)
if err != nil {
return nil, nil, err
}
return currentSessions, resp, nil
}
func (s *StatusService) ListAllHistoryViews() (*History, *http.Response, error) {
req, err := s.client.NewRequest("GET", "status/sessions/history/all", nil)
if err != nil {
return nil, nil, err
}
history := new(History)
resp, err := s.client.Do(req, &history)
if err != nil {
return nil, nil, err
}
return history, resp, nil
}
|
package config
import (
"errors"
"io/ioutil"
"gopkg.in/yaml.v2"
)
// Values of the configuration
type Values struct {
Stripe struct {
PublicKey string `yaml:"public_key"`
SecretKey string `yaml:"secret_key"`
WebhookSecret string `yaml:"webhook_secret"`
} `yaml:"stripe"`
}
// Read the configuration values and rerturn them
func Read() (Values, error) {
data, err := ioutil.ReadFile("./config.yaml")
if err != nil {
return Values{}, errors.New("could not read configuration file")
}
v := Values{}
err = yaml.Unmarshal([]byte(data), &v)
if err != nil {
return Values{}, errors.New("could not parse configuration file")
}
return v, nil
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package arc
import (
"context"
"encoding/json"
"strconv"
"strings"
"time"
"chromiumos/tast/common/android/ui"
"chromiumos/tast/common/perf"
"chromiumos/tast/errors"
"chromiumos/tast/local/arc"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: SensorPerf,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Test ARC sensor system performance",
Contacts: []string{"arc-performance@google.com", "wvk@google.com"},
Attr: []string{"group:crosbolt", "crosbolt_perbuild"},
SoftwareDeps: []string{"chrome"},
Params: []testing.Param{{
ExtraSoftwareDeps: []string{"android_p"},
}, {
Name: "vm",
ExtraSoftwareDeps: []string{"android_vm"},
}},
Fixture: "arcBooted",
Timeout: 2 * time.Minute,
})
}
// latencyResult represents the average latency for a single sensor device in
// Android.
type latencyResult struct {
Name string `json:"name"`
Type string `json:"type"`
NumEvents int `json:"numEvents"`
AvgLatencyNs float64 `json:"avgLatencyNs"`
AvgDelayNs float64 `json:"avgDelayNs"`
}
func SensorPerf(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(*arc.PreData).Chrome
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Could not open Test API connection: ", err)
}
a := s.FixtValue().(*arc.PreData).ARC
d := s.FixtValue().(*arc.PreData).UIDevice
const (
apkName = "ArcSensorLatencyTest.apk"
appName = "org.chromium.arc.testapp.sensorlatency"
activityName = ".MainActivity"
)
s.Log("Installing " + apkName)
if err := a.Install(ctx, arc.APKPath(apkName)); err != nil {
s.Fatal("Failed to install the APK: ", err)
}
s.Logf("Launching %s/%s", appName, activityName)
act, err := arc.NewActivity(a, appName, activityName)
if err != nil {
s.Fatalf("Unable to create new activity %s/%s: %v", appName, activityName, err)
}
defer act.Close()
if err := act.StartWithDefaultOptions(ctx, tconn); err != nil {
s.Fatalf("Unable to launch %s/%s: %v", appName, activityName, err)
}
defer act.Stop(ctx, tconn)
s.Log("Recording sensor events")
startButton := d.Object(ui.ID("org.chromium.arc.testapp.sensorlatency:id/start_button"))
if err := startButton.Click(ctx); err != nil {
s.Fatal("Unable to click start button: ", err)
}
// Poll until the event count >= minEvents.
const minEvents = 10000
countView := d.Object(ui.ID("org.chromium.arc.testapp.sensorlatency:id/count"))
if err := testing.Poll(ctx, func(ctx context.Context) error {
txt, err := countView.GetText(ctx)
if err != nil {
return err
}
num, err := strconv.ParseInt(txt, 10, 64)
if err != nil {
return err
}
if num < minEvents {
return errors.Errorf("not enough events; got %d, want >%d", num, minEvents)
}
return nil
}, &testing.PollOptions{Interval: time.Second}); err != nil {
s.Fatal("Failed to wait for events: ", err)
}
s.Log("Stopping recording")
stopButton := d.Object(ui.ID("org.chromium.arc.testapp.sensorlatency:id/stop_button"))
if err := stopButton.Click(ctx); err != nil {
s.Fatal("Unable to click stop button: ", err)
}
// Poll until results view is non-empty.
resultsView := d.Object(ui.ID("org.chromium.arc.testapp.sensorlatency:id/results"))
var resultTxt string
if err := testing.Poll(ctx, func(ctx context.Context) error {
txt, err := resultsView.GetText(ctx)
if err != nil {
return err
}
if len(txt) == 0 {
return errors.New("results view is empty")
}
resultTxt = txt
return nil
}, nil); err != nil {
s.Fatal("Failed to wait for results: ", err)
}
var results []latencyResult
if err := json.Unmarshal([]byte(resultTxt), &results); err != nil {
s.Logf("Unable to unmarshal text: %q", resultTxt)
s.Fatal("Failed to unmarshal latency results: ", err)
}
pv := perf.NewValues()
for _, result := range results {
s.Logf("%s(%s): n %d, latency %fms", result.Name, result.Type, result.NumEvents, result.AvgLatencyNs/float64(time.Millisecond))
metricName := strings.Replace(result.Name, " ", "", -1)
pv.Set(perf.Metric{
Name: metricName,
Unit: "milliseconds",
Direction: perf.SmallerIsBetter,
}, result.AvgLatencyNs/float64(time.Millisecond))
}
if err := pv.Save(s.OutDir()); err != nil {
s.Fatal("Failed saving perf data: ", err)
}
}
|
package main
import (
"bufio"
"fmt"
"net"
"os"
)
func main() {
conn, err := net.Dial("tcp", ":8080")
if err != nil {
fmt.Printf("net.Dial error:%s", err)
os.Exit(1)
}
defer conn.Close()
inputReader := bufio.NewReader(os.Stdin)
go readConn(conn)
for {
input, err := inputReader.ReadString('\n')
if err != nil {
fmt.Printf("read form console faild, err:%s", err)
break
}
fmt.Printf("send msg:%s", input)
_, err = conn.Write([]byte(input))
if err != nil {
fmt.Printf("write failed, err:%s", err)
break
}
}
}
func readConn(conn net.Conn) {
reader := bufio.NewReader(conn)
for {
msg, err := reader.ReadString('\n')
if err != nil {
break
}
fmt.Print(msg)
}
}
|
package metricstore_client_test
import (
"bytes"
"context"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"net/url"
"sync"
"testing"
"time"
metricstore_client "github.com/cloudfoundry/metric-store-release/src/pkg/client"
rpc "github.com/cloudfoundry/metric-store-release/src/pkg/rpc/metricstore_v1"
"google.golang.org/grpc"
test "github.com/cloudfoundry/metric-store-release/src/pkg/testing"
)
func TestClientPromQLRange(t *testing.T) {
t.Parallel()
metricStore := newStubMetricStore()
client := metricstore_client.NewClient(metricStore.addr())
hourAgo := time.Now().Truncate(time.Hour)
result, err := client.PromQLRange(
context.Background(),
`some-query`,
metricstore_client.WithPromQLStart(hourAgo),
metricstore_client.WithPromQLEnd(hourAgo.Add(time.Minute)),
metricstore_client.WithPromQLStep("5m"),
)
if err != nil {
t.Fatal(err.Error())
}
series := result.GetMatrix().GetSeries()
if len(series) != 1 {
t.Fatalf("expected to receive 1 series, got %d", len(series))
}
if series[0].GetPoints()[0].Value != 99 || series[0].GetPoints()[0].Time != 1234 {
t.Fatalf("point[0] is incorrect; got %v", series[0].GetPoints()[0])
}
if series[0].GetPoints()[1].Value != 100 || series[0].GetPoints()[1].Time != 5678 {
t.Fatalf("point[1] is incorrect; got %v", series[0].GetPoints()[1])
}
if len(metricStore.reqs) != 1 {
t.Fatalf("expected have 1 request, have %d", len(metricStore.reqs))
}
if metricStore.reqs[0].URL.Path != "/api/v1/query_range" {
t.Fatalf("expected Path '/api/v1/query_range' but got '%s'", metricStore.reqs[0].URL.Path)
}
assertQueryParam(t, metricStore.reqs[0].URL, "query", "some-query")
assertQueryParam(t, metricStore.reqs[0].URL, "step", "5m")
assertQueryParam(t, metricStore.reqs[0].URL, "start", test.FormatTimeWithDecimalMillis(hourAgo))
assertQueryParam(t, metricStore.reqs[0].URL, "end", test.FormatTimeWithDecimalMillis(hourAgo.Add(time.Minute)))
if len(metricStore.reqs[0].URL.Query()) != 4 {
t.Fatalf("expected only a single query parameter, but got %d", len(metricStore.reqs[0].URL.Query()))
}
}
func TestClientPromQL(t *testing.T) {
t.Parallel()
metricStore := newStubMetricStore()
client := metricstore_client.NewClient(metricStore.addr())
result, err := client.PromQL(
context.Background(),
`some-query`,
)
if err != nil {
t.Fatal(err.Error())
}
samples := result.GetVector().GetSamples()
if len(samples) != 1 {
t.Fatalf("expected to receive 1 sample, got %d", len(samples))
}
if samples[0].Point.Value != 99 || samples[0].Point.Time != 1234 {
t.Fatalf("samples[0].Point is incorrect; got %v", samples[0].GetPoint())
}
if len(metricStore.reqs) != 1 {
t.Fatalf("expected have 1 request, have %d", len(metricStore.reqs))
}
if metricStore.reqs[0].URL.Path != "/api/v1/query" {
t.Fatalf("expected Path '/api/v1/query' but got '%s'", metricStore.reqs[0].URL.Path)
}
assertQueryParam(t, metricStore.reqs[0].URL, "query", "some-query")
if len(metricStore.reqs[0].URL.Query()) != 1 {
t.Fatalf("expected only a single query parameter, but got %d", len(metricStore.reqs[0].URL.Query()))
}
}
func TestClientPromQLWithOptions(t *testing.T) {
t.Parallel()
metricStore := newStubMetricStore()
client := metricstore_client.NewClient(metricStore.addr())
_, err := client.PromQL(
context.Background(),
"some-query",
metricstore_client.WithPromQLTime(time.Unix(101, 123000000)),
)
if err != nil {
t.Fatal(err.Error())
}
if len(metricStore.reqs) != 1 {
t.Fatalf("expected have 1 request, have %d", len(metricStore.reqs))
}
if metricStore.reqs[0].URL.Path != "/api/v1/query" {
t.Fatalf("expected Path '/api/v1/query' but got '%s'", metricStore.reqs[0].URL.Path)
}
assertQueryParam(t, metricStore.reqs[0].URL, "time", "101.123")
if len(metricStore.reqs[0].URL.Query()) != 2 {
t.Fatalf("expected 2 query parameters, but got %d", len(metricStore.reqs[0].URL.Query()))
}
}
func TestClientPromQLNon200(t *testing.T) {
t.Parallel()
metricStore := newStubMetricStore()
metricStore.statusCode = 500
client := metricstore_client.NewClient(metricStore.addr())
_, err := client.PromQL(context.Background(), "some-query")
if err == nil {
t.Fatal("expected an error")
}
}
func TestClientPromQLInvalidResponse(t *testing.T) {
t.Parallel()
metricStore := newStubMetricStore()
metricStore.result["GET/api/v1/query"] = []byte("invalid")
client := metricstore_client.NewClient(metricStore.addr())
_, err := client.PromQL(context.Background(), "some-query")
if err == nil {
t.Fatal("expected an error")
}
}
func TestClientPromQLUnknownAddr(t *testing.T) {
t.Parallel()
client := metricstore_client.NewClient("http://invalid.url")
_, err := client.PromQL(context.Background(), "some-query")
if err == nil {
t.Fatal("expected an error")
}
}
func TestClientPromQLInvalidAddr(t *testing.T) {
t.Parallel()
client := metricstore_client.NewClient("-:-invalid")
_, err := client.PromQL(context.Background(), "some-query")
if err == nil {
t.Fatal("expected an error")
}
}
func TestClientPromQLCancelling(t *testing.T) {
t.Parallel()
metricStore := newStubMetricStore()
metricStore.block = true
client := metricstore_client.NewClient(metricStore.addr())
ctx, cancel := context.WithCancel(context.Background())
cancel()
_, err := client.PromQL(
ctx,
"some-query",
)
if err == nil {
t.Fatal("expected an error")
}
}
func TestGrpcClientPromQL(t *testing.T) {
t.Parallel()
metricStore := newStubGrpcMetricStore()
client := metricstore_client.NewClient(metricStore.addr(), metricstore_client.WithViaGRPC(grpc.WithInsecure()))
result, err := client.PromQL(context.Background(), "some-query",
metricstore_client.WithPromQLTime(time.Unix(99, 123000000)),
)
if err != nil {
t.Fatal(err.Error())
}
scalar := result.GetScalar()
if scalar.Time != 99 || scalar.Value != 101 {
t.Fatalf("wrong scalar")
}
if len(metricStore.promInstantReqs) != 1 {
t.Fatalf("expected have 1 request, have %d", len(metricStore.promInstantReqs))
}
if metricStore.promInstantReqs[0].Query != "some-query" {
t.Fatalf("expected Query (%s) to equal %s", metricStore.promInstantReqs[0].Query, "some-query")
}
if metricStore.promInstantReqs[0].Time != "99.123" {
t.Fatalf("expected Time (%s) to equal %s", metricStore.promInstantReqs[0].Time, "99.123")
}
}
func TestGrpcClientPromQLCancelling(t *testing.T) {
t.Parallel()
metricStore := newStubGrpcMetricStore()
metricStore.block = true
client := metricstore_client.NewClient(metricStore.addr(), metricstore_client.WithViaGRPC(grpc.WithInsecure()))
ctx, cancel := context.WithCancel(context.Background())
cancel()
_, err := client.PromQL(
ctx,
"some-query",
)
if err == nil {
t.Fatal("expected an error")
}
}
func TestClientAlwaysClosesBody(t *testing.T) {
t.Parallel()
spyHTTPClient := newSpyHTTPClient()
client := metricstore_client.NewClient("", metricstore_client.WithHTTPClient(spyHTTPClient))
client.PromQL(context.Background(), "some-query")
if !spyHTTPClient.body.closed {
t.Fatal("expected body to be closed")
}
}
type stubMetricStore struct {
statusCode int
server *httptest.Server
reqs []*http.Request
bodies [][]byte
result map[string][]byte
block bool
}
func newStubMetricStore() *stubMetricStore {
s := &stubMetricStore{
statusCode: http.StatusOK,
result: map[string][]byte{
"GET/api/v1/query": []byte(`
{
"status": "success",
"data": {
"resultType": "vector",
"result": [
{
"metric": {
"deployment": "cf"
},
"value": [ 1.234, "99" ]
}
]
}
}
`),
"GET/api/v1/query_range": []byte(`
{
"status": "success",
"data": {
"resultType": "matrix",
"result": [
{
"metric": {
"deployment": "cf"
},
"values": [
[ 1.234, "99" ],
[ 5.678, "100" ]
]
}
]
}
}
`),
},
}
s.server = httptest.NewServer(s)
return s
}
func (s *stubMetricStore) addr() string {
return s.server.URL
}
func (s *stubMetricStore) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if s.block {
var block chan struct{}
<-block
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
panic(err)
}
s.bodies = append(s.bodies, body)
s.reqs = append(s.reqs, r)
w.WriteHeader(s.statusCode)
w.Write(s.result[r.Method+r.URL.Path])
}
func assertQueryParam(t *testing.T, u *url.URL, name string, values ...string) {
t.Helper()
for _, value := range values {
var found bool
for _, actual := range u.Query()[name] {
if actual == value {
found = true
break
}
}
if !found {
t.Fatalf("expected query parameter '%s' to contain '%s', but got '%v'", name, value, u.Query()[name])
}
}
}
type stubGrpcMetricStore struct {
mu sync.Mutex
promInstantReqs []*rpc.PromQL_InstantQueryRequest
promRangeReqs []*rpc.PromQL_RangeQueryRequest
lis net.Listener
block bool
}
func newStubGrpcMetricStore() *stubGrpcMetricStore {
s := &stubGrpcMetricStore{}
lis, err := net.Listen("tcp", ":0")
if err != nil {
panic(err)
}
s.lis = lis
srv := grpc.NewServer()
rpc.RegisterPromQLAPIServer(srv, s)
go srv.Serve(lis)
return s
}
func (s *stubGrpcMetricStore) addr() string {
return s.lis.Addr().String()
}
func (s *stubGrpcMetricStore) InstantQuery(c context.Context, r *rpc.PromQL_InstantQueryRequest) (*rpc.PromQL_InstantQueryResult, error) {
if s.block {
var block chan struct{}
<-block
}
s.mu.Lock()
defer s.mu.Unlock()
s.promInstantReqs = append(s.promInstantReqs, r)
return &rpc.PromQL_InstantQueryResult{
Result: &rpc.PromQL_InstantQueryResult_Scalar{
Scalar: &rpc.PromQL_Point{
Time: 99,
Value: 101,
},
},
}, nil
}
func (s *stubGrpcMetricStore) RangeQuery(c context.Context, r *rpc.PromQL_RangeQueryRequest) (*rpc.PromQL_RangeQueryResult, error) {
if s.block {
var block chan struct{}
<-block
}
s.mu.Lock()
defer s.mu.Unlock()
s.promRangeReqs = append(s.promRangeReqs, r)
return &rpc.PromQL_RangeQueryResult{
Result: &rpc.PromQL_RangeQueryResult_Matrix{
Matrix: &rpc.PromQL_Matrix{
Series: []*rpc.PromQL_Series{
{
Metric: map[string]string{
"__name__": "test",
},
Points: []*rpc.PromQL_Point{
{
Time: 99,
Value: 101,
},
},
},
},
},
},
}, nil
}
func (s *stubGrpcMetricStore) SeriesQuery(ctx context.Context, req *rpc.PromQL_SeriesQueryRequest) (*rpc.PromQL_SeriesQueryResult, error) {
panic("stub :(")
}
func (s *stubGrpcMetricStore) LabelsQuery(ctx context.Context, req *rpc.PromQL_LabelsQueryRequest) (*rpc.PromQL_LabelsQueryResult, error) {
panic("stub :(")
}
func (s *stubGrpcMetricStore) LabelValuesQuery(ctx context.Context, req *rpc.PromQL_LabelValuesQueryRequest) (*rpc.PromQL_LabelValuesQueryResult, error) {
panic("stub :(")
}
type stubBufferCloser struct {
*bytes.Buffer
closed bool
}
func newStubBufferCloser() *stubBufferCloser {
return &stubBufferCloser{}
}
func (s *stubBufferCloser) Close() error {
s.closed = true
return nil
}
type spyHTTPClient struct {
body *stubBufferCloser
}
func newSpyHTTPClient() *spyHTTPClient {
return &spyHTTPClient{
body: newStubBufferCloser(),
}
}
func (s *spyHTTPClient) Do(req *http.Request) (*http.Response, error) {
return &http.Response{
Body: s.body,
}, nil
}
|
package mappy
import (
"encoding/json"
"fmt"
)
type innerJsonObject struct {
innerMap map[string]interface{}
innerValue interface{}
}
func (i innerJsonObject) Contains(key string) bool {
_, contains := i.innerMap[key]
return contains
}
func (i innerJsonObject) Int(key string) (int, error) {
if !i.Contains(key) {
return 0, DoesNotContainError
}
value := i.innerMap[key]
switch value.(type) {
case int:
return value.(int), nil
default:
return 0, InvalidTypeError
}
}
func (i innerJsonObject) Bool(key string) (bool, error) {
if !i.Contains(key) {
return false, DoesNotContainError
}
value := i.innerMap[key]
switch value.(type) {
case bool:
return value.(bool), nil
default:
return false, InvalidTypeError
}
}
func (i innerJsonObject) String(key string) (string, error) {
if !i.Contains(key) {
return "", DoesNotContainError
}
value := i.innerMap[key]
switch value.(type) {
case string:
return value.(string), nil
default:
return "", InvalidTypeError
}
}
func (i innerJsonObject) JsonArray(key string) ([]JsonObject, error) {
if !i.Contains(key) {
return nil, DoesNotContainError
}
value := i.innerMap[key]
switch value.(type) {
case []interface{}:
fmt.Println("gets jsonarray here:", value)
bits, err := json.Marshal(value)
if err != nil {
return nil, err
}
fmt.Println("value", string(bits))
return JsonArrayFromBytes(bits)
default:
return nil, InvalidTypeError
}
}
func (i innerJsonObject) JsonObject(key string) (JsonObject, error) {
if !i.Contains(key) {
return nil, DoesNotContainError
}
value := i.innerMap[key]
switch value.(type) {
case JsonObject:
return value.(JsonObject), nil
default:
return nil, InvalidTypeError
}
}
func (i innerJsonObject) Value() (interface{}, error) {
if i.innerValue == nil {
return nil, MissingValueError
}
return i.innerValue, nil
}
func (i innerJsonObject) IsValue() bool {
return i.innerValue != nil
}
func (i innerJsonObject) JsonString() (string, error) {
bits, err := json.Marshal(i.innerMap)
if err != nil {
return "", err
}
return string(bits), nil
}
func (i innerJsonObject) Keys() ([]string, error) {
if i.innerValue != nil {
return nil, ValuePresentError
}
keys := []string{}
for key, _ := range i.innerMap {
keys = append(keys, key)
}
return keys, nil
} |
package model
type (
Response struct {
Result string
Error error
}
)
|
package common
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"strconv"
"strings"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/azure/auth"
"github.com/golang-jwt/jwt/v4"
)
// List of management information
const (
AzureDatabricksResourceID string = "2ff814a6-3304-4ab8-85cb-cd0e6f879c1d"
)
type tokenRequest struct {
LifetimeSeconds int64 `json:"lifetime_seconds,omitempty"`
Comment string `json:"comment,omitempty"`
}
type tokenResponse struct {
TokenValue string `json:"token_value,omitempty"`
TokenInfo *tokenInfo `json:"token_info,omitempty"`
}
// tokenInfo is a struct that contains metadata about a given token
type tokenInfo struct {
TokenID string `json:"token_id,omitempty"`
CreationTime int64 `json:"creation_time,omitempty"`
ExpiryTime int64 `json:"expiry_time,omitempty"`
Comment string `json:"comment,omitempty"`
}
//
func (aa *DatabricksClient) GetAzureJwtProperty(key string) (interface{}, error) {
if !aa.IsAzure() {
return "", fmt.Errorf("can't get Azure JWT token in non-Azure environment")
}
if key == "tid" && aa.AzureTenantID != "" {
return aa.AzureTenantID, nil
}
err := aa.Authenticate(context.TODO())
if err != nil {
return nil, err
}
request, err := http.NewRequest("GET", aa.Host, nil)
if err != nil {
return nil, err
}
if err = aa.authVisitor(request); err != nil {
return nil, err
}
header := request.Header.Get("Authorization")
var stoken string
if len(header) > 0 && strings.HasPrefix(string(header), "Bearer ") {
log.Printf("[DEBUG] Got Bearer token")
stoken = strings.TrimSpace(strings.TrimPrefix(string(header), "Bearer "))
}
if stoken == "" {
return nil, fmt.Errorf("can't obtain Azure JWT token")
}
if strings.HasPrefix(stoken, "dapi") {
return nil, fmt.Errorf("can't use Databricks PAT")
}
parser := jwt.Parser{SkipClaimsValidation: true}
token, _, err := parser.ParseUnverified(stoken, jwt.MapClaims{})
if err != nil {
return nil, err
}
claims := token.Claims.(jwt.MapClaims)
v, ok := claims[key]
if !ok {
return nil, fmt.Errorf("can't find field '%s' in parsed JWT", key)
}
return v, nil
}
func (aa *DatabricksClient) getAzureEnvironment() (azure.Environment, error) {
if aa.AzureEnvironment != nil {
// used for testing purposes
return *aa.AzureEnvironment, nil
}
if aa.AzurermEnvironment == "" {
return azure.PublicCloud, nil
}
envName := fmt.Sprintf("AZURE%sCLOUD", strings.ToUpper(aa.AzurermEnvironment))
return azure.EnvironmentFromName(envName)
}
func (aa *DatabricksClient) resourceID() string {
if aa.AzureDatabricksResourceID != "" {
if aa.AzureSubscriptionID == "" || aa.AzureResourceGroup == "" {
res, err := azure.ParseResourceID(aa.AzureDatabricksResourceID)
if err != nil {
log.Printf("[ERROR] %s", err)
return ""
}
aa.AzureSubscriptionID = res.SubscriptionID
aa.AzureResourceGroup = res.ResourceGroup
aa.AzureWorkspaceName = res.ResourceName
}
return aa.AzureDatabricksResourceID
}
if aa.AzureSubscriptionID == "" || aa.AzureResourceGroup == "" || aa.AzureWorkspaceName == "" {
return ""
}
r := azure.Resource{
SubscriptionID: aa.AzureSubscriptionID,
ResourceGroup: aa.AzureResourceGroup,
Provider: "Microsoft.Databricks",
ResourceType: "workspaces",
ResourceName: aa.AzureWorkspaceName,
}
aa.AzureDatabricksResourceID = r.String()
return aa.AzureDatabricksResourceID
}
// IsAzureClientSecretSet returns true if client id/secret and tenand id are supplied
func (aa *DatabricksClient) IsAzureClientSecretSet() bool {
return aa.AzureClientID != "" && aa.AzureClientSecret != "" && aa.AzureTenantID != ""
}
func (aa *DatabricksClient) configureWithAzureClientSecret(ctx context.Context) (func(*http.Request) error, error) {
if !aa.IsAzure() {
return nil, nil
}
if !aa.IsAzureClientSecretSet() {
return nil, nil
}
log.Printf("[INFO] Using Azure Service Principal client secret authentication")
if aa.AzureUsePATForSPN {
log.Printf("[INFO] Generating PAT token Azure Service Principal client secret authentication")
return func(r *http.Request) error {
pat, err := aa.acquirePAT(r.Context(), aa.getClientSecretAuthorizer, aa.addSpManagementTokenVisitor)
if err != nil {
return fmt.Errorf("cannot acquire PAT: %w", err)
}
r.Header.Set("Authorization", fmt.Sprintf("Bearer %s", pat.TokenValue))
return nil
}, nil
}
log.Printf("[INFO] Generating AAD token for Azure Service Principal")
return aa.simpleAADRequestVisitor(ctx, aa.getClientSecretAuthorizer, aa.addSpManagementTokenVisitor)
}
func (aa *DatabricksClient) configureWithAzureManagedIdentity(ctx context.Context) (func(*http.Request) error, error) {
if !aa.IsAzure() {
return nil, nil
}
if !aa.AzureUseMSI {
return nil, nil
}
if !adal.MSIAvailable(ctx, aa.httpClient.HTTPClient) {
return nil, fmt.Errorf("managed identity is not available")
}
log.Printf("[INFO] Using Azure Managed Identity authentication")
return aa.simpleAADRequestVisitor(ctx, func(resource string) (autorest.Authorizer, error) {
return auth.MSIConfig{
Resource: resource,
}.Authorizer()
}, aa.addSpManagementTokenVisitor)
}
func (aa *DatabricksClient) addSpManagementTokenVisitor(r *http.Request, management autorest.Authorizer) error {
log.Printf("[DEBUG] Setting 'X-Databricks-Azure-SP-Management-Token' header")
ba, ok := management.(*autorest.BearerAuthorizer)
if !ok {
return fmt.Errorf("supposed to get BearerAuthorizer, but got %#v", management)
}
tokenProvider := ba.TokenProvider()
if tokenProvider == nil {
return fmt.Errorf("token provider is nil")
}
var err error
switch rf := tokenProvider.(type) {
case adal.RefresherWithContext:
err = rf.EnsureFreshWithContext(r.Context())
case adal.Refresher:
err = rf.EnsureFresh()
}
if err != nil {
return fmt.Errorf("cannot refresh AAD token: %w", err)
}
accessToken := tokenProvider.OAuthToken()
r.Header.Set("X-Databricks-Azure-SP-Management-Token", accessToken)
return nil
}
// go nolint
func (aa *DatabricksClient) simpleAADRequestVisitor(
ctx context.Context,
authorizerFactory func(resource string) (autorest.Authorizer, error),
visitors ...func(r *http.Request, ma autorest.Authorizer) error) (func(r *http.Request) error, error) {
managementAuthorizer, err := authorizerFactory(aa.AzureEnvironment.ServiceManagementEndpoint)
if err != nil {
return nil, fmt.Errorf("cannot authorize management: %w", err)
}
err = aa.ensureWorkspaceURL(ctx, managementAuthorizer)
if err != nil {
return nil, fmt.Errorf("cannot get workspace: %w", err)
}
platformAuthorizer, err := authorizerFactory(AzureDatabricksResourceID)
if err != nil {
return nil, fmt.Errorf("cannot authorize databricks: %w", err)
}
return func(r *http.Request) error {
if len(visitors) > 0 {
err = visitors[0](r, managementAuthorizer)
if err != nil {
return err
}
}
resourceID := aa.resourceID()
if resourceID != "" {
r.Header.Set("X-Databricks-Azure-Workspace-Resource-Id", resourceID)
}
_, err = autorest.Prepare(r, platformAuthorizer.WithAuthorization())
if err != nil {
return fmt.Errorf("cannot prepare request: %w", err)
}
return nil
}, nil
}
func (aa *DatabricksClient) acquirePAT(
ctx context.Context,
factory func(resource string) (autorest.Authorizer, error),
visitors ...func(r *http.Request, ma autorest.Authorizer) error) (*tokenResponse, error) {
if aa.temporaryPat != nil {
// todo: add IsExpired
return aa.temporaryPat, nil
}
if aa.temporaryPat != nil {
return aa.temporaryPat, nil
}
management, err := factory(aa.AzureEnvironment.ServiceManagementEndpoint)
if err != nil {
return nil, err
}
err = aa.ensureWorkspaceURL(ctx, management)
if err != nil {
return nil, err
}
token, err := aa.createPAT(ctx, func(r *http.Request) error {
if len(visitors) > 0 {
err = visitors[0](r, management)
if err != nil {
return err
}
}
platform, err := factory(AzureDatabricksResourceID)
if err != nil {
return err
}
resourceID := aa.resourceID()
if resourceID != "" {
r.Header.Set("X-Databricks-Azure-Workspace-Resource-Id", resourceID)
}
_, err = autorest.Prepare(r, platform.WithAuthorization())
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
aa.temporaryPat = &token
return aa.temporaryPat, nil
}
func (aa *DatabricksClient) patRequest() tokenRequest {
seconds, err := strconv.ParseInt(aa.AzurePATTokenDurationSeconds, 10, 64)
if err != nil {
seconds = 60 * 60
}
return tokenRequest{
LifetimeSeconds: seconds,
Comment: "Secret made via Terraform",
}
}
func maybeExtendAuthzError(err error) error {
fmtString := "Azure authorization error. Does your SPN have Contributor access to Databricks workspace? %v"
if e, ok := err.(APIError); ok && e.StatusCode == 403 {
return fmt.Errorf(fmtString, err)
} else if strings.Contains(err.Error(), "does not have authorization to perform action") {
return fmt.Errorf(fmtString, err)
}
return err
}
func (aa *DatabricksClient) ensureWorkspaceURL(ctx context.Context,
managementAuthorizer autorest.Authorizer) error {
if aa.Host != "" {
return nil
}
resourceID := aa.resourceID()
if resourceID == "" {
return fmt.Errorf("somehow resource id is not set")
}
log.Println("[DEBUG] Getting Workspace ID via management token.")
// All azure endpoints typically end with a trailing slash removing it because resourceID starts with slash
managementResourceURL := strings.TrimSuffix(aa.AzureEnvironment.ResourceManagerEndpoint, "/") + resourceID
var workspace azureDatabricksWorkspace
resp, err := aa.genericQuery(ctx, http.MethodGet,
managementResourceURL,
map[string]string{
"api-version": "2018-04-01",
}, func(r *http.Request) error {
_, err := autorest.Prepare(r, managementAuthorizer.WithAuthorization())
if err != nil {
return maybeExtendAuthzError(err)
}
return nil
})
if err != nil {
return maybeExtendAuthzError(err)
}
err = json.Unmarshal(resp, &workspace)
if err != nil {
return err
}
aa.Host = fmt.Sprintf("https://%s/", workspace.Properties.WorkspaceURL)
return nil
}
func (aa *DatabricksClient) createPAT(ctx context.Context,
interceptor func(r *http.Request) error) (tr tokenResponse, err error) {
log.Println("[DEBUG] Creating workspace token")
url := fmt.Sprintf("%sapi/2.0/token/create", aa.Host)
body, err := aa.genericQuery(ctx,
http.MethodPost, url, aa.patRequest(), interceptor)
if err != nil {
return
}
err = aa.unmarshall("/api/2.0/token/create", body, &tr)
return
}
func (aa *DatabricksClient) getClientSecretAuthorizer(resource string) (autorest.Authorizer, error) {
if aa.azureAuthorizer != nil {
return aa.azureAuthorizer, nil
}
if resource != AzureDatabricksResourceID {
es := auth.EnvironmentSettings{
Values: map[string]string{
auth.ClientID: aa.AzureClientID,
auth.ClientSecret: aa.AzureClientSecret,
auth.TenantID: aa.AzureTenantID,
auth.Resource: resource,
},
Environment: *aa.AzureEnvironment,
}
return es.GetAuthorizer()
}
platformTokenOAuthCfg, err := adal.NewOAuthConfigWithAPIVersion(
aa.AzureEnvironment.ActiveDirectoryEndpoint,
aa.AzureTenantID,
nil)
if err != nil {
return nil, maybeExtendAuthzError(err)
}
spt, err := adal.NewServicePrincipalToken(
*platformTokenOAuthCfg,
aa.AzureClientID,
aa.AzureClientSecret,
AzureDatabricksResourceID)
if err != nil {
return nil, maybeExtendAuthzError(err)
}
return autorest.NewBearerAuthorizer(spt), nil
}
type azureDatabricksWorkspace struct {
Name string `json:"name"`
ID string `json:"id"`
Type string `json:"type"`
Sku struct {
Name string `json:"name"`
} `json:"sku"`
Location string `json:"location"`
Properties struct {
ManagedResourceGroupID string `json:"managedResourceGroupId"`
Parameters interface{} `json:"parameters"`
ProvisioningState string `json:"provisioningState"`
UIDefinitionURI string `json:"uiDefinitionUri"`
Authorizations []struct {
PrincipalID string `json:"principalId"`
RoleDefinitionID string `json:"roleDefinitionId"`
} `json:"authorizations"`
CreatedBy struct {
Oid string `json:"oid"`
Puid string `json:"puid"`
ApplicationID string `json:"applicationId"`
} `json:"createdBy"`
UpdatedBy struct {
Oid string `json:"oid"`
Puid string `json:"puid"`
ApplicationID string `json:"applicationId"`
} `json:"updatedBy"`
CreatedDateTime time.Time `json:"createdDateTime"`
WorkspaceID string `json:"workspaceId"`
WorkspaceURL string `json:"workspaceUrl"`
} `json:"properties"`
}
|
/*
* Copyright 2020 Kaiserpfalz EDV-Service, Roland T. Lichti.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package aws_provider_test
import (
"errors"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"net"
"strconv"
)
var _ = Describe("MoveIP", func() {
BeforeEach(func() {
initMock()
})
AfterEach(func() {
mockCtrl.Finish()
})
It("should move the IP from old instance to new instance", func() {
secondaryIPs := make([]*net.IP, sut.MaxIPsPerInstance-1)
for i := 0; i < sut.MaxIPsPerInstance-1; i++ {
s := strconv.Itoa(20 + i)
ip := net.ParseIP("10.0.1." + s)
secondaryIPs[i] = &ip
}
awsDirect.
EXPECT().DescribeInstances(createDescribeInstancesInput(hostName)).
Return(
createDescribeInstancesOutput(hostName, hostId, networkInterfaceId, mainIP, secondaryIPs),
nil,
)
targetHostName := "target"
targetMainIP := net.ParseIP("10.2.1.33")
targetSecondaryIPs := make([]*net.IP, 2)
for i := 0; i < 2; i++ {
s := strconv.Itoa(50 + i)
ip := net.ParseIP("10.243.1." + s)
targetSecondaryIPs[i] = &ip
}
awsDirect.
EXPECT().DescribeInstances(createDescribeInstancesInput(targetHostName)).
Return(
createDescribeInstancesOutput(targetHostName, "vm-2", "eni-2", &targetMainIP, targetSecondaryIPs),
nil,
)
allowReassignement := true
awsDirect.
EXPECT().AssignPrivateIpAddresses(&ec2.AssignPrivateIpAddressesInput{
AllowReassignment: &allowReassignement,
NetworkInterfaceId: aws.String("eni-2"),
PrivateIpAddresses: aws.StringSlice([]string{secondaryIPs[1].String()}),
}).
Return(
&ec2.AssignPrivateIpAddressesOutput{
AssignedPrivateIpAddresses: []*ec2.AssignedPrivateIpAddress{
{
PrivateIpAddress: aws.String(secondaryIPs[1].String()),
},
},
NetworkInterfaceId: aws.String("eni-2"),
},
nil,
)
err := sut.MoveIP(secondaryIPs[1], hostName, targetHostName)
Expect(err).To(BeNil())
})
It("should throw an error when the moving of the IP failed", func() {
expectedErr := errors.New("can not move IP to target host")
secondaryIPs := make([]*net.IP, sut.MaxIPsPerInstance-1)
for i := 0; i < sut.MaxIPsPerInstance-1; i++ {
s := strconv.Itoa(20 + i)
ip := net.ParseIP("10.0.1." + s)
secondaryIPs[i] = &ip
}
awsDirect.
EXPECT().DescribeInstances(createDescribeInstancesInput(hostName)).
Return(
createDescribeInstancesOutput(hostName, hostId, networkInterfaceId, mainIP, secondaryIPs),
nil,
)
targetHostName := "target"
targetMainIP := net.ParseIP("10.2.1.33")
targetSecondaryIPs := make([]*net.IP, 2)
for i := 0; i < 2; i++ {
s := strconv.Itoa(50 + i)
ip := net.ParseIP("10.243.1." + s)
targetSecondaryIPs[i] = &ip
}
awsDirect.
EXPECT().DescribeInstances(createDescribeInstancesInput(targetHostName)).
Return(
createDescribeInstancesOutput(targetHostName, "vm-2", "eni-2", &targetMainIP, targetSecondaryIPs),
nil,
)
allowReassignement := true
awsDirect.
EXPECT().AssignPrivateIpAddresses(&ec2.AssignPrivateIpAddressesInput{
AllowReassignment: &allowReassignement,
NetworkInterfaceId: aws.String("eni-2"),
PrivateIpAddresses: aws.StringSlice([]string{secondaryIPs[1].String()}),
}).
Return(
nil,
errors.New("can not move IP to target host"),
)
err := sut.MoveIP(secondaryIPs[1], hostName, targetHostName)
Expect(err).To(MatchError(expectedErr))
})
It("should throw an error when the IP is not assigned to the specified host", func() {
unassignedIP := net.ParseIP("10.1.2.3")
expectedErr := fmt.Errorf(
"ip '%v' is not assigned to instance '%v'",
unassignedIP.String(), hostId,
)
awsDirect.
EXPECT().DescribeInstances(createDescribeInstancesInput(hostName)).
Return(
createDescribeInstancesOutput(hostName, hostId, networkInterfaceId, mainIP, []*net.IP{}),
nil,
)
err := sut.CheckIP(&unassignedIP, hostName)
Expect(err).To(MatchError(expectedErr))
})
})
|
package main
import (
"os"
cli "github.com/openshift/oc-mirror/v2/pkg/cli"
clog "github.com/openshift/oc-mirror/v2/pkg/log"
)
func main() {
// setup pluggable logger
// feel free to plugin you own logger
// just use the PluggableLoggerInterface
// in the file pkg/log/logger.go
log := clog.New("info")
rootCmd := cli.NewMirrorCmd(log)
err := rootCmd.Execute()
if err != nil {
log.Error("[Executor] %v ", err)
os.Exit(1)
}
}
|
// Sample program to show how to unmarshal a JSON document into
// a user defined struct type.
package main
import (
"encoding/json"
"fmt"
)
// document contains a JSON document.
var document = `{
"workshop": {
"college_name": "Maharaja Institute of Technology Mysore",
"company_name": "Qwinix"
},
"user":{
"user_id": "4VV10IS045",
"user_name": "praveen menon",
"branch": "Information_science"
}
}`
// Fields to be encoded/decoded must be exported else the
// json encoding functions can't see the fields.
type (
// UserDetails contains information about the user
User_details struct {
UserId string `json:"user_id"`
UserName string `json:"user_name"`
Branch string `json:"branch"`
}
// Workshop contains information for the Workshop for a User.
Workshop struct {
WorkshopDetails struct {
CollegeName string `json:"college_name"`
CompanyName string `json:"company_name"`
} `json:"workshop"`
UserDetails User_details `json:"user"`
}
)
// main is the entry point for the application.
func main() {
// Declare a variable of type Workshop.
var uc Workshop
// Unmarshal the JSON document into the variable.
if err := json.Unmarshal([]byte(document), &uc); err != nil {
fmt.Println(err)
return
}
// Print the JSON
fmt.Printf("%+v\n\n", uc)
// Print required values from JSON
fmt.Println("College Name:",uc.WorkshopDetails.CollegeName)
fmt.Println("Company Name:",uc.WorkshopDetails.CompanyName)
fmt.Println("User Name:",uc.UserDetails.UserName)
fmt.Println("User Id:",uc.UserDetails.UserId)
}
|
package flog_test
import (
"testing"
. "github.com/coder/flog"
)
func TestLogger(t *testing.T) {
// Short-hand
Infof("something happened")
Errorf("something bad happened")
Successf("something good happened")
}
|
package main
import "fmt"
func main() {
slice := make([]int, 5, 10) // 长度为5,容量为10
slice[2] = 2 // 索引为2的元素赋值为2
fmt.Println(slice)
}
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package debug
import (
"net/http"
"net/http/pprof"
)
// Options used to provide configuration options
type Options struct {
CmdLine bool
Profile bool
Symbol bool
Trace bool
}
// DefaultOptions returns default options configuration
func DefaultOptions() *Options {
return &Options{
CmdLine: true,
Profile: true,
Symbol: true,
Trace: true,
}
}
// RegisterEndpoint used to register the different debug endpoints
func RegisterEndpoint(register func(string, http.Handler) error, options *Options) error {
err := register("/debug/pprof", http.HandlerFunc(pprof.Index))
if err != nil {
return err
}
if options == nil {
options = DefaultOptions()
}
if options.CmdLine {
err := register("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline))
if err != nil {
return err
}
}
if options.Profile {
err := register("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
if err != nil {
return err
}
}
if options.Symbol {
err := register("/debug/pprof/symobol", http.HandlerFunc(pprof.Symbol))
if err != nil {
return err
}
}
if options.Trace {
err := register("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
if err != nil {
return err
}
}
return nil
}
|
package generator
import (
"fmt"
"html/template"
"io/ioutil"
"os"
"strings"
"github.com/RomanosTrechlis/blog-generator/config"
"github.com/RomanosTrechlis/blog-generator/util/fs"
)
// staticsGenerator object
type staticsGenerator struct {
fileToDestination map[string]string
templateToFile map[string]string
template *template.Template
siteInfo *config.SiteInformation
}
// Generate creates the static pages
func (g *staticsGenerator) Generate() error {
fmt.Println("\tCopying Statics...")
err := g.resolveFileToDestination()
if err != nil {
return err
}
err = g.resolveTemplateToFile()
if err != nil {
return err
}
fmt.Println("\tFinished copying statics...")
return nil
}
func (g *staticsGenerator) resolveFileToDestination() error {
if len(g.fileToDestination) == 0 {
return nil
}
for k, v := range g.fileToDestination {
err := createFolderIfNotExist(getFolder(v))
if err != nil {
return err
}
err = fs.CopyFile(k, v)
if err != nil {
return err
}
}
return nil
}
func (g *staticsGenerator) resolveTemplateToFile() error {
if len(g.templateToFile) == 0 {
return nil
}
for k, v := range g.templateToFile {
err := createFolderIfNotExist(getFolder(v))
if err != nil {
return err
}
content, err := ioutil.ReadFile(k)
if err != nil {
return fmt.Errorf("error reading file %s: %v", k, err)
}
c := htmlConfig{
path: getFolder(v),
pageTitle: getTitle(k),
pageNum: 0,
maxPageNum: 0,
isPost: false,
temp: g.template,
content: template.HTML(content),
siteInfo: g.siteInfo,
}
err = c.writeHTML()
if err != nil {
return err
}
}
return nil
}
func createFolderIfNotExist(path string) (err error) {
_, err = os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
err = os.Mkdir(path, os.ModePerm)
if err != nil {
return fmt.Errorf("error creating directory %s: %v", path, err)
}
} else {
return fmt.Errorf("error accessing directory %s: %v", path, err)
}
}
return nil
}
func getTitle(path string) (title string) {
fileName := path[strings.LastIndex(path, "/")+1 : strings.LastIndex(path, ".")]
title = fmt.Sprintf("%s%s", strings.ToUpper(string(fileName[0])), fileName[1:])
return title
}
|
package main
import "sync"
type relayAccessor interface {
getRelay(id string) *streamRelay
deleteRelay(id string)
createRelay(id string) *streamRelay
}
type relayManager struct {
store sync.Map
}
func newRelayManager() *relayManager {
return &relayManager{}
}
func (m *relayManager) getRelay(id string) *streamRelay {
if relay, ok := m.store.Load(id); ok {
return relay.(*streamRelay)
}
return nil
}
func (m *relayManager) createRelay(id string) *streamRelay {
relay, err := newStreamRelay(id)
if err != nil {
logger.Errorf("failed to create relay, %v\n", err)
return nil
}
m.store.Store(id, relay)
return relay
}
func (m *relayManager) deleteRelay(id string) {
m.store.Delete(id)
}
|
package main
import (
"bufio"
"fmt"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/AcalephStorage/go_check/Godeps/_workspace/src/github.com/newrelic/go_nagios"
"github.com/AcalephStorage/go_check/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v1"
)
var (
warnLevel = kingpin.Flag("warn-level", "warn level").Default("80").Float()
critLevel = kingpin.Flag("crit-level", "critical level").Default("90").Float()
)
func main() {
kingpin.Version("1.0.0")
kingpin.Parse()
checkCpu(*warnLevel, *critLevel)
}
func checkCpu(warnLevel, critLevel float64) {
before := readCpuStat()
if before == nil {
nagios.Unknown("Unable to check CPU status")
}
time.Sleep(1 * time.Second)
after := readCpuStat()
total, _, each := compute(before, after)
status := &nagios.NagiosStatus{}
switch {
case total >= critLevel:
status.Value = nagios.NAGIOS_CRITICAL
case total >= warnLevel:
status.Value = nagios.NAGIOS_WARNING
default:
status.Value = nagios.NAGIOS_OK
}
status.Message = fmt.Sprintf("total=%0.2f user=%0.2f nice=%0.2f system=%0.2f idle=%0.2f iowait=%0.2f irq=%0.2f softirq=%0.2f steal=%0.2f guest=%0.2f guest_nice=%0.2f", total, each[0], each[1], each[2], each[3], each[4], each[5], each[6], each[7], each[8], each[9])
nagios.ExitWithStatus(status)
}
func compute(before []int64, after []int64) (total, free float64, each []float64) {
diff := make([]int64, len(after))
totalDiff := int64(0)
for i := range after {
a := after[i]
b := before[i]
diff[i] = a - b
totalDiff += diff[i]
}
each = make([]float64, len(after))
for i, d := range diff {
each[i] = 100 * (float64(d) / float64(totalDiff))
}
free = each[3]
total = 100.0 - free
return total, free, each
}
// [user, nice, system, idle, iowait, irq, softirq, steal, guest, guest_nice]
func readCpuStat() []int64 {
file, _ := os.Open("/proc/stat")
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
stat, _ := regexp.MatchString("^cpu ", line)
if stat {
arr := strings.Fields(line)
return toIntArray(arr[1:])
}
}
return nil
}
func toIntArray(arr []string) []int64 {
intArr := make([]int64, len(arr))
for i, str := range arr {
val, _ := strconv.ParseInt(str, 10, 64)
intArr[i] = val
}
return intArr
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package dlp
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"time"
"chromiumos/tast/common/fixture"
"chromiumos/tast/common/policy"
"chromiumos/tast/common/policy/fakedms"
"chromiumos/tast/common/testexec"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/policyutil"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: DataLeakPreventionRulesListPrivacyScreen,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Test behavior of DataLeakPreventionRulesList policy with privacy screen blocked restriction",
Contacts: []string{
"ayaelattar@google.com",
"chromeos-dlp@google.com",
},
SoftwareDeps: []string{"chrome"},
HardwareDeps: hwdep.D(hwdep.PrivacyScreen()),
Attr: []string{"group:mainline", "informational"},
Params: []testing.Param{{
Fixture: fixture.ChromePolicyLoggedIn,
Val: browser.TypeAsh,
}, {
Name: "lacros",
ExtraAttr: []string{"informational"},
ExtraSoftwareDeps: []string{"lacros"},
Fixture: fixture.LacrosPolicyLoggedIn,
Val: browser.TypeLacros,
}},
})
}
func DataLeakPreventionRulesListPrivacyScreen(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(chrome.HasChrome).Chrome()
fakeDMS := s.FixtValue().(fakedms.HasFakeDMS).FakeDMS()
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Hello DLP client you navigated to ", r.URL.Path)
}))
defer server.Close()
// DLP policy with privacy screen blocked restriction.
policyDLP := []policy.Policy{&policy.DataLeakPreventionRulesList{
Val: []*policy.DataLeakPreventionRulesListValue{
{
Name: "Enable privacy screen for confidential content in restricted source",
Description: "Privacy screen should be enabled when on restricted site",
Sources: &policy.DataLeakPreventionRulesListValueSources{
Urls: []string{
server.URL + "/blocked",
},
},
Restrictions: []*policy.DataLeakPreventionRulesListValueRestrictions{
{
Class: "PRIVACY_SCREEN",
Level: "BLOCK",
},
},
},
},
},
}
// Update the policy blob.
pb := policy.NewBlob()
pb.AddPolicies(policyDLP)
// Update policy.
if err := policyutil.ServeBlobAndRefresh(ctx, fakeDMS, cr, pb); err != nil {
s.Fatal("Failed to serve and refresh: ", err)
}
// Connect to Test API.
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to connect to test API: ", err)
}
for _, param := range []struct {
name string
wantAllowed bool
url string
}{
{
name: "blocked",
wantAllowed: false,
url: server.URL + "/blocked",
},
{
name: "allowed",
wantAllowed: true,
url: server.URL + "/allowed",
},
} {
s.Run(ctx, param.name, func(ctx context.Context, s *testing.State) {
br, closeBrowser, err := browserfixt.SetUp(ctx, cr, s.Param().(browser.Type))
if err != nil {
s.Fatal("Failed to open the browser: ", err)
}
defer closeBrowser(cleanupCtx)
ui := uiauto.New(tconn)
conn, err := br.NewConn(ctx, param.url)
if err != nil {
s.Fatal("Failed to open page: ", err)
}
defer conn.Close()
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "ui_tree_"+param.name)
if err := checkPrivacyScreenOnBubble(ctx, ui, param.wantAllowed); err != nil {
s.Error("Couldn't check for notification: ", err)
}
value, err := privacyScreenValue(ctx)
if err != nil {
s.Fatal("Couldn't check value for privacy screen prop: ", err)
}
if !param.wantAllowed && !value {
s.Errorf("Privacy screen prop value: got %v; want true", value)
}
if param.wantAllowed && value {
s.Errorf("Privacy screen prop value: got %v; want false", value)
}
if _, err := br.NewConn(ctx, "https://www.google.com"); err != nil {
s.Error("Failed to open page: ", err)
}
if err := checkPrivacyScreenOffBubble(ctx, ui, param.wantAllowed); err != nil {
s.Error("Couldn't check for notification: ", err)
}
// Wait for privacy screen to be disabled.
if err := testing.Sleep(ctx, time.Second); err != nil {
s.Fatal("Failed to sleep: ", err)
}
value, err = privacyScreenValue(ctx)
// Privacy screen should be disabled.
if value {
s.Errorf("Privacy screen prop value: got %v; want false", value)
}
})
}
}
func checkPrivacyScreenOnBubble(ctx context.Context, ui *uiauto.Context, wantAllowed bool) error {
// Message name - IDS_ASH_STATUS_TRAY_PRIVACY_SCREEN_TOAST_ACCESSIBILITY_TEXT
bubbleMessage := nodewith.NameContaining("Privacy screen is on. Enforced by your administrator").First()
err := ui.WaitUntilExists(bubbleMessage)(ctx)
if err != nil && !wantAllowed {
return errors.Wrap(err, "failed to check for privacy screen on bubble")
}
if err == nil && wantAllowed {
return errors.New("Privacy screen on bubble found expected none")
}
return nil
}
func checkPrivacyScreenOffBubble(ctx context.Context, ui *uiauto.Context, wantAllowed bool) error {
// Message name - IDS_ASH_STATUS_TRAY_PRIVACY_SCREEN_OFF_STATE
bubbleMessage := nodewith.NameContaining("Privacy screen is off").First()
err := ui.WaitUntilExists(bubbleMessage)(ctx)
if err != nil && !wantAllowed {
return errors.Wrap(err, "failed to check for privacy screen off bubble bubble existence")
}
if err == nil && wantAllowed {
return errors.New("Privacy screen off bubble found expected none")
}
return nil
}
// privacyScreenValue retrieves value of privacy screen prop.
func privacyScreenValue(ctx context.Context) (bool, error) {
// modetest -c get list of connectors
output, err := testexec.CommandContext(ctx, "modetest", "-c").Output()
if err != nil {
return false, err
}
// Get privacy-screen connector.
outputSlice := strings.Split(string(output), "privacy-screen:")
if len(outputSlice) <= 1 {
return false, errors.New("failed to find privacy screen prop")
}
for _, line := range strings.Split(outputSlice[1], "\n") {
// Check for prop value.
matches := strings.Contains(line, "value:")
if matches {
if found := strings.Contains(line, "1"); found {
return true, nil
}
if found := strings.Contains(line, "0"); found {
return false, nil
}
// Need to check for prop value only once.
return false, errors.New("failed to find value for privacy screen prop")
}
}
return false, nil
}
|
package function
import (
"encoding/json"
"github.com/hecatoncheir/Storage"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
)
func TestFAASFunctions_ReadCitiesByName(t *testing.T) {
LanguageForTest := "ru"
CityNameForTest := "TestCityName"
DatabaseGatewayForTest := "http://TestDatabaseGateway"
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
encodedBody, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Errorf("Read body of request error: %v", err)
}
var responseBodyEncoded map[string]string
err = json.Unmarshal(encodedBody, &responseBodyEncoded)
if err != nil {
t.Errorf("Unmarshal body of request error: %v", err)
}
if responseBodyEncoded["Language"] != LanguageForTest {
t.Fatalf("Expected: \"%v\", but got: %v", LanguageForTest, responseBodyEncoded["Language"])
}
if responseBodyEncoded["CityName"] != CityNameForTest {
t.Fatalf("Expected: \"%v\", but got: %v", CityNameForTest, responseBodyEncoded["CityName"])
}
if responseBodyEncoded["DatabaseGateway"] != DatabaseGatewayForTest {
t.Fatalf(
"Expected: \"%v\", but got: %v", DatabaseGatewayForTest, responseBodyEncoded["DatabaseGateway"])
}
existedCitiesInStorage := []storage.City{
{
ID: "0x12",
Name: "Test city name",
IsActive: true},
{
ID: "0x13",
Name: "Other test city name",
IsActive: true}}
encodedExistedCitiesInStorage, err := json.Marshal(existedCitiesInStorage)
if err != nil {
t.Error(err.Error())
}
response := Response{Data: string(encodedExistedCitiesInStorage)}
encodedResponse, err := json.Marshal(response)
if err != nil {
t.Error(err.Error())
}
_, err = io.WriteString(w, string(encodedResponse))
if err != nil {
t.Error(err.Error())
}
})
testServer := httptest.NewServer(testHandler)
defer testServer.Close()
faas := &FAASFunctions{FunctionsGateway: testServer.URL, DatabaseGateway: DatabaseGatewayForTest}
cities := faas.ReadCitiesByName(CityNameForTest, LanguageForTest)
if len(cities) < 1 {
t.Fatalf("Expect more cities that 1, but got: %v", len(cities))
}
}
func TestFAASFunctions_ReadCompanyByID(t *testing.T) {
LanguageForTest := "ru"
CityIDForTest := "0x12"
DatabaseGatewayForTest := "http://TestDatabaseGateway"
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
encodedBody, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Errorf("Read body of request error: %v", err)
}
var responseBodyEncoded map[string]string
err = json.Unmarshal(encodedBody, &responseBodyEncoded)
if err != nil {
t.Errorf("Unmarshal body of request error: %v", err)
}
if responseBodyEncoded["Language"] != LanguageForTest {
t.Fatalf("Expected: \"%v\", but got: %v", LanguageForTest, responseBodyEncoded["Language"])
}
if responseBodyEncoded["CityID"] != CityIDForTest {
t.Fatalf("Expected: \"%v\", but got: %v", CityIDForTest, responseBodyEncoded["CityID"])
}
if responseBodyEncoded["DatabaseGateway"] != DatabaseGatewayForTest {
t.Fatalf(
"Expected: \"%v\", but got: %v", DatabaseGatewayForTest, responseBodyEncoded["DatabaseGateway"])
}
existedCityInStorage := storage.City{
ID: "0x12",
Name: "Test city name",
IsActive: true}
encodedExistedCityInStorage, err := json.Marshal(existedCityInStorage)
if err != nil {
t.Error(err.Error())
}
response := Response{Data: string(encodedExistedCityInStorage)}
encodedResponse, err := json.Marshal(response)
if err != nil {
t.Error(err.Error())
}
_, err = io.WriteString(w, string(encodedResponse))
if err != nil {
t.Error(err.Error())
}
})
testServer := httptest.NewServer(testHandler)
defer testServer.Close()
faas := &FAASFunctions{FunctionsGateway: testServer.URL, DatabaseGateway: DatabaseGatewayForTest}
city := faas.ReadCityByID(CityIDForTest, LanguageForTest)
if city.ID != "0x12" {
t.Fatalf("Expect city id: %v, but got: %v", CityIDForTest, city.ID)
}
}
|
// Copyright (c) KwanJunWen
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
package estemplate
import "encoding/json"
// MetaFieldMeta Other Meta-Field which sets application specific metadata.
// A mapping type can have custom meta data associated with it. THese are not
// used at all by Elasticsearch, but can be used to store application-specific
// metadata, such as the class that a document belongs to.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/mapping-meta-field.html
// for details.
type MetaFieldMeta struct {
value interface{}
rawJSON string
}
// NewMetaFieldMeta initializes a new MetaFieldMeta.
func NewMetaFieldMeta() *MetaFieldMeta {
return &MetaFieldMeta{}
}
// Value sets a value (interface{}) for the meta data, in which will later
// be marshalled into JSON string.
func (m *MetaFieldMeta) Value(value interface{}) *MetaFieldMeta {
m.value = value
return m
}
// RawJSON sets the Raw JSON string for the meta data.
func (m *MetaFieldMeta) RawJSON(rawJSON string) *MetaFieldMeta {
m.rawJSON = rawJSON
return m
}
// Validate validates MetaFieldMeta.
func (m *MetaFieldMeta) Validate() error {
return nil
}
// Source returns the serializable JSON for the source builder.
func (m *MetaFieldMeta) Source(includeName bool) (interface{}, error) {
// {
// "_meta": {
// "class": "MyApp::User",
// "version": {
// "min": "1.0",
// "max": "1.3"
// }
// }
// }
options := make(map[string]interface{})
var (
b []byte
err error
)
if m.value != nil {
b, err = json.Marshal(m.value)
if err != nil {
return nil, err
}
}
if m.rawJSON != "" {
b = []byte(m.rawJSON)
}
if b != nil {
err = json.Unmarshal(b, &options)
if err != nil {
return nil, err
}
}
if !includeName {
return options, nil
}
source := make(map[string]interface{})
source["_meta"] = options
return source, nil
}
|
// Copyright (c) 2020 - for information on the respective copyright owner
// see the NOTICE file and/or the repository at
// https://github.com/hyperledger-labs/perun-node
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package session
import (
"path/filepath"
"time"
"github.com/spf13/viper"
)
// WalletConfig defines the parameters required to configure a wallet.
type WalletConfig struct {
KeystorePath string
Password string
}
// UserConfig defines the parameters required to configure a user.
// Address strings should be parsed using the wallet backend.
type UserConfig struct {
Alias string
OnChainAddr string
OnChainWallet WalletConfig
PartAddrs []string
OffChainAddr string
OffChainWallet WalletConfig
CommAddr string
CommType string
}
// Config defines the parameters required to configure a session.
type Config struct {
User UserConfig
IDProviderType string // Type of ID provider.
IDProviderURL string // URL for accessing the ID provider.
ChainURL string // URL of the blockchain node.
Asset, Adjudicator string // Address of the Asset and Adjudicator contracts.
ChainConnTimeout time.Duration // Timeout for connecting to blockchain node.
OnChainTxTimeout time.Duration // Timeout to wait for confirmation of on-chain tx.
ResponseTimeout time.Duration // Timeout to wait for a response from the peer / user.
DatabaseDir string // Path to directory containing persistence database.
// Timeout for re-establishing all open channels (if any) that was persisted during the
// previous running instance of the node.
PeerReconnTimeout time.Duration
}
// ParseConfig parses the session configuration from a file.
func ParseConfig(configFile string) (Config, error) {
v := viper.New()
v.SetConfigFile(filepath.Clean(configFile))
var cfg Config
err := v.ReadInConfig()
if err != nil {
return Config{}, err
}
return cfg, v.Unmarshal(&cfg)
}
|
package problem
import "fmt"
type LinkNode struct {
Value int
Next *LinkNode
}
func LinkSort(head *LinkNode) {
if head == nil || head.Next == nil {
return
}
end := head.Next
for {
if end.Next != nil {
end = end.Next
} else {
break
}
}
linkSort(head, end)
}
func linkSort(head, end *LinkNode) {
if head == nil || head == end {
return
}
great := head
less := head.Next
for {
for {
if great.Next.Value < head.Value {
great = great.Next
if great.Next == end.Next {
break
}
} else {
break
}
}
less = great.Next
for {
if less == end.Next {
break
}
if less.Value > head.Value {
less = less.Next
} else {
break
}
}
if less != end.Next {
swapNode(great.Next, less)
great = great.Next
} else {
break
}
}
if great != head {
swapNode(head, great)
}
linkSort(head, great)
linkSort(great.Next, end)
}
func swapNode(a, b *LinkNode) {
tmp := a.Value
a.Value = b.Value
b.Value = tmp
}
func PrintLinkNode(head *LinkNode) {
for p := head; p != nil; p = p.Next {
fmt.Println(p.Value)
}
}
|
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package tenant
import (
"sigs.k8s.io/controller-runtime/pkg/client"
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
)
type IngressHostnames struct {
}
func (IngressHostnames) Object() client.Object {
return &capsulev1beta1.Tenant{}
}
func (IngressHostnames) Field() string {
return ".spec.ingressHostnames"
}
func (IngressHostnames) Func() client.IndexerFunc {
return func(object client.Object) (out []string) {
tenant := object.(*capsulev1beta1.Tenant)
if tenant.Spec.IngressHostnames != nil {
out = append(out, tenant.Spec.IngressHostnames.Exact...)
}
return
}
}
|
package frontend
import (
"net/http"
"time"
"github.com/gorilla/mux"
"services"
"models"
"controllers"
"repository"
"controllers/viewmodels"
)
type ArticlesController struct {
r *mux.Router
service *services.ArticleService
}
type Author struct {
FullName string
Website string
Bio string
}
type Article struct {
Title string
Slug string
Content string
Metadata models.Metadata
UpdatedAt time.Time
}
func NewArticlesController(r *mux.Router) *ArticlesController {
return &ArticlesController{
r: r.StrictSlash(true).PathPrefix("/articles").Subrouter(),
service: services.NewArticleService(),
}
}
func (ac *ArticlesController) RegisterEndpoints() {
ac.r.Path("/").
Methods(http.MethodGet).
HandlerFunc(ac.ShowArticles)
ac.r.Path("/{slug}").
Methods(http.MethodGet).
HandlerFunc(ac.ShowArticle)
}
func (c *ArticlesController) ShowArticles(w http.ResponseWriter, r *http.Request) {
data := struct {
Title string
Articles []*viewmodels.Article
Error string
}{
Title: "Articles",
}
articles, err := c.service.GetArticlesByQuery(repository.NewDefaultQuery())
if err != nil {
data.Title = http.StatusText(http.StatusInternalServerError)
data.Error = err.Error()
controllers.Renderer.HTML(w, http.StatusOK, "error", &data)
return
}
data.Articles = articles
controllers.Renderer.HTML(w, http.StatusOK, "articles", &data)
}
func (c *ArticlesController) ShowArticle(w http.ResponseWriter, r *http.Request) {
var data struct {
Title string
Article *viewmodels.Article
Error string
}
vars := mux.Vars(r)
article, err := c.service.GetArticleBySlug(vars["slug"])
if err != nil {
data.Title = http.StatusText(http.StatusNotFound)
data.Error = err.Error()
controllers.Renderer.HTML(w, http.StatusNotFound, "404", &data)
return
}
data.Title = article.Title
data.Article = article
controllers.Renderer.HTML(w, http.StatusOK, "article", &data)
}
|
package monitor
import (
"yunion.io/x/jsonutils"
"yunion.io/x/onecloud/pkg/mcclient/options"
)
type CommonAlertListOptions struct {
options.BaseListOptions
// 报警类型
AlertType string `help:"common alert type" choices:"normal|system"`
Level string `help:"common alert notify level" choices:"normal|important|fatal"`
}
func (o *CommonAlertListOptions) Params() (jsonutils.JSONObject, error) {
return options.ListStructToParams(o)
}
type CommonAlertShowOptions struct {
ID string `help:"ID of alart " json:"-"`
}
func (o *CommonAlertShowOptions) Params() (jsonutils.JSONObject, error) {
return options.StructToParams(o)
}
func (o *CommonAlertShowOptions) GetId() string {
return o.ID
}
type CommonAlertUpdateOptions struct {
ID string `help:"ID of alart " json:"-"`
Period string `help:"exec period of alert" json:"period"`
Comparator string `help:"Alarm policy threshold comparison method" json:"comparator" `
Threshold string `help:"Alarm policy threshold" json:"threshold"`
}
func (o *CommonAlertUpdateOptions) Params() (jsonutils.JSONObject, error) {
return options.StructToParams(o)
}
func (o *CommonAlertUpdateOptions) GetId() string {
return o.ID
}
type CommonAlertDeleteOptions struct {
ID []string `help:"ID of alart"`
Force bool `help:"force to delete alert"`
}
func (o *CommonAlertDeleteOptions) GetIds() []string {
return o.ID
}
func (o *CommonAlertDeleteOptions) Params() (jsonutils.JSONObject, error) {
return options.StructToParams(o)
}
|
package batch
import "time"
// Config configuration for a batch
type Config struct {
maxItems int
maxAge time.Duration
consumers int
}
|
package main
import (
"net/http"
)
func main() {
objectlist.MakeList()
//去登陆
http.HandleFunc("/hello", handler)
http.HandleFunc("/regis", regis)
http.HandleFunc("/login", login)
http.HandleFunc("/sign", sign)
//创建路由
http.ListenAndServe(":8080", nil)
}
|
package errors
import (
"bytes"
"errors"
"fmt"
"runtime"
"strconv"
"strings"
)
var prefix string
type erx struct {
pc uintptr
error error
parent *erx
}
// Implementation of the error interface.
func (e erx) Error() string {
x, flat := &e, make([]*erx, 0, 16)
for {
flat = append(flat, x)
if x.parent == nil {
break
}
x = x.parent
}
max := len(flat) - 1
var buf bytes.Buffer
for i := 0; i <= max; i++ {
x = flat[max-i]
if i == 0 {
buf.WriteString("E ")
} else {
buf.WriteString("\n")
buf.WriteString(prefix)
buf.WriteString("+ ")
}
fn := runtime.FuncForPC(x.pc)
if fn == nil {
buf.WriteString("unknown")
} else {
file, line := fn.FileLine(x.pc)
buf.WriteString(trimSourcePath(fn.Name(), file))
buf.WriteRune(':')
buf.WriteString(strconv.Itoa(line))
}
if x.error != nil {
buf.WriteRune(' ')
buf.WriteString(x.error.Error())
}
}
return buf.String()
}
// New returns a new error with the given printf-formatted error message.
func New(text string, a ...interface{}) error {
x := &erx{
pc: getPC(3),
error: errors.New(text),
}
if len(a) == 0 {
x.error = errors.New(text)
} else {
x.error = fmt.Errorf(text, a...)
}
return x
}
// Append returns a new error with the parent error
// and given printf-formatted error message.
func Append(parent error, text string, a ...interface{}) error {
pc := getPC(3)
p := extend(parent, pc)
var err error
if len(a) == 0 {
err = errors.New(text)
} else {
err = fmt.Errorf(text, a...)
}
if parent == nil {
p.error = err
return p
}
e := &erx{
pc: pc,
error: err,
parent: p,
}
return e
}
// Wrap returns wrapped one or more errors.
func Wrap(errs ...error) error {
if len(errs) == 0 {
return nil
}
pc := getPC(3)
var x, parent *erx
loop:
for _, err := range errs {
if err == nil {
continue
}
parent = x
x = extend(err, pc)
if parent == nil {
continue
}
e := x
for e.parent != nil {
if e == parent {
x = parent
continue loop
}
e = e.parent
}
e.parent = parent
}
if x == nil {
return nil
}
return x
}
// Sets the prefix for child errors.
func SetPrefix(s string) {
prefix = s
}
func getPC(calldepth int) uintptr {
var pcs [1]uintptr
runtime.Callers(calldepth, pcs[:])
return pcs[0]
}
func extend(err error, pc uintptr) *erx {
e, ok := err.(*erx)
if !ok {
e = &erx{
pc: pc,
error: err,
}
}
return e
}
func trimSourcePath(name, path string) string {
const sep = '/'
indexName := strings.LastIndexFunc(name, func(r rune) bool {
return r == sep
})
n := 2
if indexName == -1 {
n = 1
}
indexPath := strings.LastIndexFunc(path, func(r rune) bool {
if r == sep {
n--
}
return n == 0
})
if indexName == -1 {
return path[indexPath+1:]
}
return name[:indexName] + path[indexPath:]
}
|
package app
import (
"github.com/alidevjimmy/go-echo-train/controllers"
"github.com/alidevjimmy/go-echo-train/middlewares"
)
func router() {
e.GET("/", controllers.Index)
e.GET("/product/:id", controllers.GetProductById, middlewares.OnlyUsers)
} |
package main
import (
"test-spider/engine"
"test-spider/parse"
"test-spider/scheduler"
)
func main(){
//一下是多通道模式
e:= engine.ConcurrentEngine{
&scheduler.QueueScheduler{},
2,
}
e.Run(engine.Request_q{
Url: "https://book.douban.com",
//Url:"http://www.zhenai.com/zhenghun",
//ParseFunc:zhengai.ParseCity,
ParseFunc:parse.ParseTag,
})
/*e:= engine.ConcurrentEngine{
&scheduler.QueueScheduler{},
2,
}
e.Run(engine.Request_q{
Url: "https://book.douban.com",
//Url:"http://www.zhenai.com/zhenghun",
//ParseFunc:zhengai.ParseCity,
ParseFunc:parse.ParseTag,
})*/
//一下是单通道模式
/*engine.Run(engine.Request_q{
Url: "https://book.douban.com",
//Url:"http://www.zhenai.com/zhenghun",
ParseFunc: parse.ParseTag,
//ParseFunc: zhengai.ParseCityList,
})*/
/*engine.Run(engine.Request_q{
Url: "https://book.douban.com",
ParseFunc: parse.ParseTag,
})*/
}
|
package utils
// AddStringBytes 拼接字符串, 返回 bytes from bytes.Join()
func AddStringBytes(s ...string) []byte {
switch len(s) {
case 0:
return []byte{}
case 1:
return []byte(s[0])
}
n := 0
for _, v := range s {
n += len(v)
}
b := make([]byte, n)
bp := copy(b, s[0])
for _, v := range s[1:] {
bp += copy(b[bp:], v)
}
return b
}
// AddString 拼接字符串
func AddString(s ...string) string {
switch len(s) {
case 0:
return ""
case 1:
return s[0]
default:
return B2S(AddStringBytes(s...))
}
}
|
package main
import (
"context"
"fmt"
"net"
"net/http"
"reflect"
"strings"
"github.com/bradleyfalzon/ghinstallation"
v1 "github.com/csweichel/werft/pkg/api/v1"
plugin "github.com/csweichel/werft/pkg/plugin/client"
"github.com/google/go-github/v35/github"
log "github.com/sirupsen/logrus"
)
var (
werftGithubContextPrefix = "ci/werft"
werftResultChannelPrefix = "github-check-"
// annotationStatusUpdate is set on jobs whoose status needs to be updated on GitHub.
// This is set only on jobs created through GitHub events.
annotationStatusUpdate = "updateGitHubStatus"
defaultGitHubHost = "github.com"
commandHelp = `You can interact with werft using: ` + "`" + `/werft command <args>` + "`" + `.
Available commands are:
- ` + "`" + `/werft run [annotation=value]` + "`" + ` which starts a new werft job from this context.
You can optionally pass multiple whitespace-separated annotations.
- ` + "`" + `/werft help` + "`" + ` displays this help
`
)
// Config configures this plugin
type Config struct {
BaseURL string `yaml:"baseURL"`
WebhookSecret string `yaml:"webhookSecret"`
PrivateKeyPath string `yaml:"privateKeyPath"`
InstallationID int64 `yaml:"installationID,omitempty"`
AppID int64 `yaml:"appID"`
PRComments struct {
Enabled bool `yaml:"enabled"`
// If this is a non-empty list, the commenting user needs to be in at least one
// of the organisations listed here for the build to start.
RequiresOrganisation []string `yaml:"requiresOrg"`
// If true, we'll update the comment to give feedback about what werft understood.
UpdateComment bool `yaml:"updateComment"`
} `yaml:"pullRequestComments"`
}
func main() {
plg := &githubTriggerPlugin{}
plugin.Serve(&Config{},
plugin.WithIntegrationPlugin(plg),
plugin.WithProxyPass(plg),
)
}
type githubTriggerPlugin struct {
Config *Config
Werft v1.WerftServiceClient
Github *github.Client
}
func (p *githubTriggerPlugin) Run(ctx context.Context, config interface{}, srv v1.WerftServiceClient) error {
cfg, ok := config.(*Config)
if !ok {
return fmt.Errorf("config has wrong type %s", reflect.TypeOf(config))
}
ghtr, err := ghinstallation.NewKeyFromFile(http.DefaultTransport, cfg.AppID, cfg.InstallationID, cfg.PrivateKeyPath)
if err != nil {
return err
}
ghClient := github.NewClient(&http.Client{Transport: ghtr})
p.Config = cfg
p.Werft = srv
p.Github = ghClient
errchan := make(chan error)
sub, err := srv.Subscribe(ctx, &v1.SubscribeRequest{})
if err != nil {
return fmt.Errorf("cannot subscribe for notification: %w", err)
}
log.Infof("status updates for GitHub set up")
go func() {
for {
inc, err := sub.Recv()
if err != nil {
errchan <- err
return
}
err = p.updateGitHubStatus(inc.Result)
if err != nil {
log.WithError(err).Error("cannot update GitHub status")
}
}
}()
select {
case err := <-errchan:
return err
case <-ctx.Done():
return ctx.Err()
}
}
func (p *githubTriggerPlugin) updateGitHubStatus(job *v1.JobStatus) error {
var (
wantsUpdate bool
statusDstRepo string
)
for _, a := range job.Metadata.Annotations {
if a.Key == annotationStatusUpdate {
wantsUpdate = true
statusDstRepo = a.Value
break
}
}
if !wantsUpdate {
return nil
}
var (
state string
desc string
)
switch job.Phase {
case v1.JobPhase_PHASE_PREPARING, v1.JobPhase_PHASE_STARTING, v1.JobPhase_PHASE_RUNNING:
state = "pending"
desc = "build is " + strings.TrimPrefix(strings.ToLower(job.Phase.String()), "phase_")
default:
if job.Conditions.Success {
state = "success"
desc = "The build succeeded!"
} else {
state = "failure"
desc = "The build failed!"
}
}
url := fmt.Sprintf("%s/job/%s", p.Config.BaseURL, job.Name)
jobGHctx := werftGithubContextPrefix + "/" + job.Metadata.JobSpecName
ghstatus := &github.RepoStatus{
State: &state,
Description: &desc,
Context: &jobGHctx,
TargetURL: &url,
}
var (
segs = strings.Split(statusDstRepo, "/")
owner string
repo string
)
if len(segs) == 2 {
owner, repo = segs[0], segs[1]
} else {
owner, repo = job.Metadata.Owner, job.Metadata.Repository.Repo
}
log.WithField("status", ghstatus).Debugf("updating GitHub status for %s", job.Name)
ctx := context.Background()
_, _, err := p.Github.Repositories.CreateStatus(ctx, owner, repo, job.Metadata.Repository.Revision, ghstatus)
if err != nil {
return err
}
// update all result statuses
var idx int
for _, r := range job.Results {
var (
ok bool
ghctx string
)
for _, c := range r.Channels {
if c == "github" {
ok = true
ghctx = fmt.Sprintf("%s/results/%03d", jobGHctx, idx)
idx++
break
}
if strings.HasPrefix(c, werftResultChannelPrefix) {
ok = true
ghctx = fmt.Sprintf("%s/results/%s", jobGHctx, strings.TrimPrefix(c, werftResultChannelPrefix))
break
}
}
if !ok {
continue
}
resultURL := url
if r.Type == "url" {
resultURL = r.Payload
}
success := "success"
if r.Type == "conclusion" {
success = r.Payload
}
_, _, err := p.Github.Repositories.CreateStatus(ctx,
owner,
repo,
job.Metadata.Repository.Revision,
&github.RepoStatus{
State: &success,
TargetURL: &resultURL,
Description: &r.Description,
Context: &ghctx,
},
)
if err != nil {
log.WithError(err).WithField("job", job.Name).Warn("cannot update result status")
}
}
return nil
}
func (p *githubTriggerPlugin) Serve(ctx context.Context, l net.Listener) error {
mux := http.NewServeMux()
mux.HandleFunc("/", http.HandlerFunc(p.HandleGithubWebhook))
http.Serve(l, mux)
<-ctx.Done()
return nil
}
// HandleGithubWebhook handles incoming Github events
func (p *githubTriggerPlugin) HandleGithubWebhook(w http.ResponseWriter, r *http.Request) {
var err error
defer func(err *error) {
if *err == nil {
return
}
log.WithError(*err).Warn("GitHub webhook error")
http.Error(w, (*err).Error(), http.StatusInternalServerError)
}(&err)
if r.Method == "GET" {
http.Redirect(w, r, "/github?"+r.URL.Query().Encode(), 301)
return
}
payload, err := github.ValidatePayload(r, []byte(p.Config.WebhookSecret))
if err != nil && strings.Contains(err.Error(), "unknown X-Github-Event") {
err = nil
return
}
if err != nil {
return
}
event, err := github.ParseWebHook(github.WebHookType(r), payload)
if err != nil {
return
}
switch event := event.(type) {
case *github.PushEvent:
p.processPushEvent(event)
case *github.InstallationEvent:
p.processInstallationEvent(event)
case *github.IssueCommentEvent:
p.processIssueCommentEvent(r.Context(), event)
default:
log.WithField("event", event).Debug("unhandled GitHub event")
http.Error(w, "unhandled event", http.StatusInternalServerError)
}
}
func (p *githubTriggerPlugin) processPushEvent(event *github.PushEvent) {
ctx := context.Background()
rev := *event.After
trigger := v1.JobTrigger_TRIGGER_PUSH
if event.Deleted != nil && *event.Deleted {
trigger = v1.JobTrigger_TRIGGER_DELETED
}
metadata := v1.JobMetadata{
Owner: *event.Pusher.Name,
Repository: &v1.Repository{
Host: defaultGitHubHost,
Owner: event.Repo.Owner.GetName(),
Repo: event.Repo.GetName(),
Ref: event.GetRef(),
Revision: rev,
},
Trigger: trigger,
Annotations: []*v1.Annotation{
{
Key: annotationStatusUpdate,
Value: event.Repo.Owner.GetName() + "/" + event.Repo.GetName(),
},
},
}
_, err := p.Werft.StartGitHubJob(ctx, &v1.StartGitHubJobRequest{
Metadata: &metadata,
})
if err != nil {
log.WithError(err).Warn("GitHub webhook error")
}
}
func (p *githubTriggerPlugin) processInstallationEvent(event *github.InstallationEvent) {
if *event.Action != "created" {
return
}
log.WithFields(log.Fields{
"action": *event.Action,
"sender": event.Sender.Name,
"installationID": *event.Installation.ID,
"appID": *event.Installation.AppID,
}).Info("someone just installed a GitHub app for this webhook")
}
func (p *githubTriggerPlugin) processIssueCommentEvent(ctx context.Context, event *github.IssueCommentEvent) {
if !p.Config.PRComments.Enabled {
return
}
if event.GetAction() != "created" {
return
}
if !event.GetIssue().IsPullRequest() {
return
}
var (
segs = strings.Split(event.GetRepo().GetFullName(), "/")
prDstOwner = segs[0]
prDstRepo = segs[1]
)
var feedback struct {
Success bool
Message string
}
defer func() {
if !p.Config.PRComments.UpdateComment {
return
}
icon := ":+1:"
if !feedback.Success {
icon = ":-1:"
}
comment := event.GetComment()
lines := strings.Split(comment.GetBody(), "\n")
newlines := make([]string, 0, len(lines)+2)
for _, l := range lines {
newlines = append(newlines, l)
if strings.HasPrefix(strings.TrimSpace(l), "/werft ") {
newlines = append(newlines, "", fmt.Sprintf("%s %s", icon, feedback.Message))
}
body := strings.Join(newlines, "\n")
comment.Body = &body
}
p.Github.Issues.EditComment(ctx, prDstOwner, prDstRepo, event.GetComment().GetID(), comment)
}()
pr, _, err := p.Github.PullRequests.Get(ctx, prDstOwner, prDstRepo, event.GetIssue().GetNumber())
if err != nil {
log.WithError(err).Warn("GitHub webhook error")
feedback.Success = false
feedback.Message = "cannot find corresponding PR"
return
}
var (
sender = event.GetSender().GetLogin()
allowed = true
)
if len(p.Config.PRComments.RequiresOrganisation) > 0 {
allowed = false
for _, org := range p.Config.PRComments.RequiresOrganisation {
ok, _, err := p.Github.Organizations.IsMember(ctx, org, sender)
if err != nil {
log.WithError(err).WithField("org", org).WithField("user", sender).Warn("cannot check organisation membership")
}
if ok {
allowed = true
break
}
}
}
permissions, _, err := p.Github.Repositories.GetPermissionLevel(ctx, prDstOwner, prDstRepo, sender)
if err != nil {
log.WithError(err).WithField("repo", fmt.Sprintf("%s/%s", prDstOwner, prDstRepo)).WithField("user", sender).Warn("cannot get permission level")
}
switch permissions.GetPermission() {
case "admin", "write":
// leave allowed as it stands
default:
allowed = false
}
if !allowed {
feedback.Success = false
feedback.Message = "not authorized"
return
}
lines := strings.Split(event.GetComment().GetBody(), "\n")
for _, l := range lines {
cmd, args, err := parseCommand(l)
if err != nil {
feedback.Success = false
feedback.Message = fmt.Sprintf("cannot parse %s: %v", l, err)
continue
}
if cmd == "" {
continue
}
var resp string
switch cmd {
case "run":
resp, err = p.handleCommandRun(ctx, event, pr, args)
case "help":
resp = commandHelp
default:
err = fmt.Errorf("unknown command: %s\nUse `/werft help` to list the available commands", cmd)
}
if err != nil {
log.WithError(err).Warn("GitHub webhook error")
feedback.Success = false
feedback.Message = err.Error()
return
}
feedback.Success = true
feedback.Message = resp
}
}
func (p *githubTriggerPlugin) handleCommandRun(ctx context.Context, event *github.IssueCommentEvent, pr *github.PullRequest, args []string) (msg string, err error) {
segs := strings.Split(pr.GetHead().GetRepo().GetFullName(), "/")
var (
prSrcOwner = segs[0]
prSrcRepo = segs[1]
)
segs = strings.Split(event.GetRepo().GetFullName(), "/")
var (
prDstOwner = segs[0]
prDstRepo = segs[1]
)
argm := make(map[string]string)
for _, arg := range args {
var key, value string
if segs := strings.Split(arg, "="); len(segs) == 1 {
key = arg
} else {
key, value = segs[0], strings.Join(segs[1:], "=")
}
argm[key] = value
}
argm[annotationStatusUpdate] = prDstOwner + "/" + prDstRepo
annotations := make([]*v1.Annotation, 0, len(argm))
for k, v := range argm {
annotations = append(annotations, &v1.Annotation{
Key: k,
Value: v,
})
}
ref := pr.GetHead().GetRef()
if !strings.HasPrefix(ref, "refs/") {
// we assume this is a branch
ref = "refs/heads/" + ref
}
metadata := v1.JobMetadata{
Owner: event.GetSender().GetLogin(),
Repository: &v1.Repository{
Host: defaultGitHubHost,
Owner: prSrcOwner,
Repo: prSrcRepo,
Ref: ref,
Revision: pr.GetHead().GetSHA(),
},
Trigger: v1.JobTrigger_TRIGGER_MANUAL,
Annotations: annotations,
}
var nameSuffix string
if prDstOwner != prSrcOwner {
nameSuffix = "fork"
}
resp, err := p.Werft.StartGitHubJob(ctx, &v1.StartGitHubJobRequest{
Metadata: &metadata,
NameSuffix: nameSuffix,
})
if err != nil {
log.WithError(err).Warn("GitHub webhook error")
return "", fmt.Errorf("cannot start job - please talk to whoever's in charge of your Werft installation")
}
return fmt.Sprintf("started the job as [%s](%s/job/%s)", resp.Status.Name, p.Config.BaseURL, resp.Status.Name), nil
}
func parseCommand(l string) (cmd string, args []string, err error) {
l = strings.TrimSpace(l)
if !strings.HasPrefix(l, "/werft") {
return
}
l = strings.TrimPrefix(l, "/werft")
l = strings.TrimSpace(l)
segs := strings.Fields(l)
if len(segs) < 1 {
return "", nil, fmt.Errorf("missing command")
}
return segs[0], segs[1:], nil
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"context"
"fmt"
"os"
"path/filepath"
"github.com/google/subcommands"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/state/statefile"
"gvisor.dev/gvisor/runsc/cmd/util"
"gvisor.dev/gvisor/runsc/config"
"gvisor.dev/gvisor/runsc/container"
"gvisor.dev/gvisor/runsc/flag"
"gvisor.dev/gvisor/runsc/specutils"
)
// File containing the container's saved image/state within the given image-path's directory.
const checkpointFileName = "checkpoint.img"
// Checkpoint implements subcommands.Command for the "checkpoint" command.
type Checkpoint struct {
imagePath string
leaveRunning bool
compression CheckpointCompression
}
// Name implements subcommands.Command.Name.
func (*Checkpoint) Name() string {
return "checkpoint"
}
// Synopsis implements subcommands.Command.Synopsis.
func (*Checkpoint) Synopsis() string {
return "checkpoint current state of container (experimental)"
}
// Usage implements subcommands.Command.Usage.
func (*Checkpoint) Usage() string {
return `checkpoint [flags] <container id> - save current state of container.
`
}
// SetFlags implements subcommands.Command.SetFlags.
func (c *Checkpoint) SetFlags(f *flag.FlagSet) {
f.StringVar(&c.imagePath, "image-path", "", "directory path to saved container image")
f.BoolVar(&c.leaveRunning, "leave-running", false, "restart the container after checkpointing")
f.Var(newCheckpointCompressionValue(statefile.CompressionLevelFlateBestSpeed, &c.compression), "compression", "compress checkpoint image on disk. Values: none|flate-best-speed.")
// Unimplemented flags necessary for compatibility with docker.
var wp string
f.StringVar(&wp, "work-path", "", "ignored")
}
// Execute implements subcommands.Command.Execute.
func (c *Checkpoint) Execute(_ context.Context, f *flag.FlagSet, args ...any) subcommands.ExitStatus {
if f.NArg() != 1 {
f.Usage()
return subcommands.ExitUsageError
}
id := f.Arg(0)
conf := args[0].(*config.Config)
waitStatus := args[1].(*unix.WaitStatus)
cont, err := container.Load(conf.RootDir, container.FullID{ContainerID: id}, container.LoadOpts{})
if err != nil {
util.Fatalf("loading container: %v", err)
}
if c.imagePath == "" {
util.Fatalf("image-path flag must be provided")
}
if err := os.MkdirAll(c.imagePath, 0755); err != nil {
util.Fatalf("making directories at path provided: %v", err)
}
fullImagePath := filepath.Join(c.imagePath, checkpointFileName)
// Create the image file and open for writing.
file, err := os.OpenFile(fullImagePath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0644)
if err != nil {
util.Fatalf("os.OpenFile(%q) failed: %v", fullImagePath, err)
}
defer file.Close()
if err := cont.Checkpoint(file, statefile.Options{Compression: c.compression.Level()}); err != nil {
util.Fatalf("checkpoint failed: %v", err)
}
if !c.leaveRunning {
return subcommands.ExitSuccess
}
// TODO(b/110843694): Make it possible to restore into same container.
// For now, we can fake it by destroying the container and making a
// new container with the same ID. This hack does not work with docker
// which uses the container pid to ensure that the restore-container is
// actually the same as the checkpoint-container. By restoring into
// the same container, we will solve the docker incompatibility.
// Restore into new container with same ID.
bundleDir := cont.BundleDir
if bundleDir == "" {
util.Fatalf("setting bundleDir")
}
spec, err := specutils.ReadSpec(bundleDir, conf)
if err != nil {
util.Fatalf("reading spec: %v", err)
}
specutils.LogSpecDebug(spec, conf.OCISeccomp)
if cont.ConsoleSocket != "" {
log.Warningf("ignoring console socket since it cannot be restored")
}
if err := cont.Destroy(); err != nil {
util.Fatalf("destroying container: %v", err)
}
contArgs := container.Args{
ID: id,
Spec: spec,
BundleDir: bundleDir,
}
cont, err = container.New(conf, contArgs)
if err != nil {
util.Fatalf("restoring container: %v", err)
}
defer cont.Destroy()
if err := cont.Restore(conf, fullImagePath); err != nil {
util.Fatalf("starting container: %v", err)
}
ws, err := cont.Wait()
if err != nil {
util.Fatalf("Error waiting for container: %v", err)
}
*waitStatus = ws
return subcommands.ExitSuccess
}
// CheckpointCompression represents checkpoint image writer behavior. The
// default behavior is to compress because the default behavior used to be to
// always compress.
type CheckpointCompression statefile.CompressionLevel
func newCheckpointCompressionValue(val statefile.CompressionLevel, p *CheckpointCompression) *CheckpointCompression {
*p = CheckpointCompression(val)
return (*CheckpointCompression)(p)
}
// Set implements flag.Value.
func (g *CheckpointCompression) Set(v string) error {
t, err := statefile.CompressionLevelFromString(v)
if err != nil {
return fmt.Errorf("invalid checkpoint compression type %q", v)
}
*g = CheckpointCompression(t)
return nil
}
// Get implements flag.Getter.
func (g *CheckpointCompression) Get() any {
return *g
}
// String implements flag.Value.
func (g CheckpointCompression) String() string {
return string(g)
}
// Level returns corresponding statefile.CompressionLevel value.
func (g CheckpointCompression) Level() statefile.CompressionLevel {
return statefile.CompressionLevel(g)
}
|
/*
B1 Yönetim Sistemleri Yazılım ve Danışmanlık Ltd. Şti.
User : ICI
Name : Ibrahim ÇOBANİ
Date : 25.07.2019 11:47
Notes :
*/
package models
type Sources struct {
DataType string `json:"DataType,omitempty"`
Source string `json:"Source,omitempty"`
SourceId int32 `json:"SourceId,omitempty"`
}
|
package cmd
import (
e "github.com/cloudposse/atmos/internal/exec"
u "github.com/cloudposse/atmos/pkg/utils"
"github.com/spf13/cobra"
)
// awsEksCmdUpdateKubeconfigCmd executes 'aws eks update-kubeconfig' command
var awsEksCmdUpdateKubeconfigCmd = &cobra.Command{
Use: "update-kubeconfig",
Short: "Execute 'aws eks update-kubeconfig' command",
Long: `This command executes 'aws eks update-kubeconfig' to download 'kubeconfig' from an EKS cluster and saves it to a file. The command executes 'aws eks update-kubeconfig' in three different ways:
1. If all the required parameters (cluster name and AWS profile/role) are provided on the command-line,
then 'atmos' executes the command without requiring the 'atmos.yaml' CLI config and context.
For example: atmos aws eks update-kubeconfig --profile=<profile> --name=<cluster_name>
2. If 'component' and 'stack' are provided on the command-line,
then 'atmos' executes the command using the 'atmos.yaml' CLI config and stack's context by searching for the following settings:
- 'components.helmfile.cluster_name_pattern' in the 'atmos.yaml' CLI config (and calculates the '--name' parameter using the pattern)
- 'components.helmfile.helm_aws_profile_pattern' in the 'atmos.yaml' CLI config (and calculates the '--profile' parameter using the pattern)
- 'components.helmfile.kubeconfig_path' in the 'atmos.yaml' CLI config
- the variables for the component in the provided stack
- 'region' from the variables for the component in the stack
For example: atmos aws eks update-kubeconfig <component> -s <stack>
3. Combination of the above. Provide a component and a stack, and override other parameters on the command line.
For example: atmos aws eks update-kubeconfig <component> -s <stack> --kubeconfig=<path_to_kubeconfig> --region=<region>
See https://docs.aws.amazon.com/cli/latest/reference/eks/update-kubeconfig.html for more information.`,
FParseErrWhitelist: struct{ UnknownFlags bool }{UnknownFlags: false},
Run: func(cmd *cobra.Command, args []string) {
err := e.ExecuteAwsEksUpdateKubeconfigCommand(cmd, args)
if err != nil {
u.LogErrorAndExit(err)
}
},
}
// https://docs.aws.amazon.com/cli/latest/reference/eks/update-kubeconfig.html
func init() {
awsEksCmdUpdateKubeconfigCmd.DisableFlagParsing = false
awsEksCmdUpdateKubeconfigCmd.PersistentFlags().StringP("stack", "s", "", "atmos aws eks update-kubeconfig <component> -s <stack>")
awsEksCmdUpdateKubeconfigCmd.PersistentFlags().String("profile", "", "atmos aws eks update-kubeconfig --profile <profile>")
awsEksCmdUpdateKubeconfigCmd.PersistentFlags().String("name", "", "atmos aws eks update-kubeconfig --name <cluster name>")
awsEksCmdUpdateKubeconfigCmd.PersistentFlags().String("region", "", "atmos aws eks update-kubeconfig --region <region>")
awsEksCmdUpdateKubeconfigCmd.PersistentFlags().String("kubeconfig", "", "atmos aws eks update-kubeconfig --kubeconfig <path_to_kubeconfig>")
awsEksCmdUpdateKubeconfigCmd.PersistentFlags().String("role-arn", "", "atmos aws eks update-kubeconfig --role-arn <ARN>")
awsEksCmdUpdateKubeconfigCmd.PersistentFlags().Bool("dry-run", false, "atmos aws eks update-kubeconfig --dry-run=true")
awsEksCmdUpdateKubeconfigCmd.PersistentFlags().Bool("verbose", false, "atmos aws eks update-kubeconfig --verbose=true")
awsEksCmdUpdateKubeconfigCmd.PersistentFlags().String("alias", "", "atmos aws eks update-kubeconfig --alias <alias for the cluster context name>")
awsEksCmd.AddCommand(awsEksCmdUpdateKubeconfigCmd)
}
|
package main
type Group struct {
ID string `json:"groupid"`
Name string `json:"name"`
}
|
package query
import (
"reflect"
"testing"
"github.com/golang/protobuf/descriptor"
test "github.com/osechet/go-datastore/_proto/osechet/test"
datastore "google.golang.org/genproto/googleapis/datastore/v1"
)
func TestApply(t *testing.T) {
data := []descriptor.Message{
&test.Tested{Int32Value: 55, Int64Value: 35},
&test.Tested{Int32Value: 68, Int64Value: 43},
&test.Tested{Int32Value: 42, Int64Value: 56},
&test.Tested{Int32Value: 47, Int64Value: 43},
}
type args struct {
storage Storage
query datastore.Query
t reflect.Type
results ResultSet
}
tests := []struct {
name string
args args
want []descriptor.Message
}{
{
"empty storage",
args{
SliceStorage{},
datastore.Query{},
reflect.TypeOf(test.Tested{}),
NewSliceResultSet(),
},
[]descriptor.Message{},
},
{
"no order - no filter",
args{
SliceStorage{data},
datastore.Query{},
reflect.TypeOf(test.Tested{}),
NewSliceResultSet(),
},
[]descriptor.Message{
&test.Tested{Int32Value: 55, Int64Value: 35},
&test.Tested{Int32Value: 68, Int64Value: 43},
&test.Tested{Int32Value: 42, Int64Value: 56},
&test.Tested{Int32Value: 47, Int64Value: 43},
},
},
{
"order on ascending int32_value - no filter",
args{
SliceStorage{data},
datastore.Query{
Order: []*datastore.PropertyOrder{
&datastore.PropertyOrder{
Property: &datastore.PropertyReference{Name: "int32_value"},
Direction: datastore.PropertyOrder_ASCENDING,
},
},
},
reflect.TypeOf(test.Tested{}),
NewSliceResultSet(),
},
[]descriptor.Message{
&test.Tested{Int32Value: 42, Int64Value: 56},
&test.Tested{Int32Value: 47, Int64Value: 43},
&test.Tested{Int32Value: 55, Int64Value: 35},
&test.Tested{Int32Value: 68, Int64Value: 43},
},
},
{
"no order - filter on int64_value",
args{
SliceStorage{data},
datastore.Query{
Filter: &datastore.Filter{
FilterType: &datastore.Filter_CompositeFilter{
CompositeFilter: &datastore.CompositeFilter{
Filters: []*datastore.Filter{
&datastore.Filter{
FilterType: &datastore.Filter_PropertyFilter{
PropertyFilter: &datastore.PropertyFilter{
Property: &datastore.PropertyReference{Name: "int64_value"},
Op: datastore.PropertyFilter_GREATER_THAN_OR_EQUAL,
Value: &datastore.Value{
ValueType: &datastore.Value_IntegerValue{IntegerValue: 43},
},
},
},
},
&datastore.Filter{
FilterType: &datastore.Filter_PropertyFilter{
PropertyFilter: &datastore.PropertyFilter{
Property: &datastore.PropertyReference{Name: "int64_value"},
Op: datastore.PropertyFilter_LESS_THAN,
Value: &datastore.Value{
ValueType: &datastore.Value_IntegerValue{IntegerValue: 50},
},
},
},
},
},
},
},
},
},
reflect.TypeOf(test.Tested{}),
NewSliceResultSet(),
},
[]descriptor.Message{
&test.Tested{Int32Value: 68, Int64Value: 43},
&test.Tested{Int32Value: 47, Int64Value: 43},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
Apply(tt.args.storage, tt.args.query, tt.args.t, tt.args.results)
if got := tt.args.results.(*SliceResultSet).Items; !reflect.DeepEqual(got, tt.want) {
t.Errorf("Apply() = %v, want %v", got, tt.want)
}
})
}
}
|
package gbytes_test
import (
"os/exec"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"github.com/pivotal-cf/on-demand-service-broker/system_tests/test_helpers/gbytes"
)
var _ = Describe("gbytes.AnySay", func() {
var (
session *gexec.Session
)
Context("passing something other than a gexec.Session", func() {
It("should return an error", func() {
_, err := gbytes.AnySay("foo").Match("foo")
Expect(err).To(MatchError("expected to match on a session"))
})
})
Context("when stdout matches", func() {
BeforeEach(func() {
cmd := exec.Command("/bin/bash", "-c", "echo foo")
var err error
session, err = gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
Eventually(session).Should(gexec.Exit())
})
It("returns true", func() {
res, err := gbytes.AnySay("foo").Match(session)
Expect(err).NotTo(HaveOccurred())
Expect(res).To(BeTrue())
})
})
Context("when neither matches", func() {
BeforeEach(func() {
cmd := exec.Command("/bin/bash", "-c", "echo bar; echo sha >& 2")
var err error
session, err = gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
Eventually(session).Should(gexec.Exit())
})
It("returns false", func() {
matcher := gbytes.AnySay("foo")
res, err := matcher.Match(session)
Expect(err).NotTo(HaveOccurred())
Expect(res).To(BeFalse())
message := matcher.FailureMessage(session)
expectedMessage := `Expected to match on STDOUT or STDERR.
STDOUT:
Got stuck at:
bar
Waiting for:
foo
STDERR:
Got stuck at:
sha
Waiting for:
foo`
Expect(message).To(ContainSubstring(expectedMessage))
})
})
Context("when stderr matches", func() {
BeforeEach(func() {
cmd := exec.Command("/bin/bash", "-c", "echo foo >& 2")
var err error
session, err = gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
Eventually(session).Should(gexec.Exit())
})
It("returns true", func() {
res, err := gbytes.AnySay("foo").Match(session)
Expect(err).NotTo(HaveOccurred())
Expect(res).To(BeTrue())
})
})
Context("negated failure messages", func() {
BeforeEach(func() {
cmd := exec.Command("/bin/bash", "-c", "echo foo ; echo bar >& 2")
var err error
session, err = gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
Eventually(session).Should(gexec.Exit())
})
It("stdout unexpected match returns correct message", func() {
matcher := gbytes.AnySay("foo")
_, err := matcher.Match(session)
Expect(err).NotTo(HaveOccurred())
expectedMessage := `Expected to not match on STDOUT or STDERR.
STDOUT:
Saw:
foo
Which matches the unexpected:
foo
STDERR:
`
Expect(matcher.NegatedFailureMessage(session)).To(ContainSubstring(expectedMessage))
})
It("stdout unexpected match returns correct message", func() {
matcher := gbytes.AnySay("bar")
_, err := matcher.Match(session)
Expect(err).NotTo(HaveOccurred())
expectedMessage := `Expected to not match on STDOUT or STDERR.
STDOUT:
STDERR:
Saw:
bar
Which matches the unexpected:
bar
`
Expect(matcher.NegatedFailureMessage(session)).To(ContainSubstring(expectedMessage))
})
})
})
|
package main
import (
"encoding/json"
"errors"
"fmt"
"github.com/fatih/color"
"github.com/go-ini/ini"
"github.com/urfave/cli"
"io/ioutil"
"math"
"net/http"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"time"
)
// Application name
var APPNAME = "gitlab-ci-linter"
// Version of the program
var VERSION = "0.0.0-dev"
// Revision of the program
var REVISION = "HEAD"
// Build date and time of the program
var BUILDTIME = ""
// Name of the git repo directory
const gitRepoDirectory = ".git"
// Name of the git repo config file in a git repo directory
const gitRepoConfigFilename = "config"
// Filename of a gitlab-ci file. Used to find the gitlab-ci file if no path are given at calls
const gitlabCiFileName = ".gitlab-ci.yml"
// Default Gitlab instance URL to use
const defaultGitlabRootUrl = "https://gitlab.com"
// Path of the Gitlab CI lint API, to be used on the root url
const gitlabApiCiLintPath = "/api/v4/ci/lint"
// The Gitlab instance root URL to use.
var gitlabRootUrl string
// The full path of the gitlab-ci file to check, if given at calls.
// If no path is given at call, the variable will be an empty string, and the program will search for the file
// using gitlabCiFileName.
// Search start on the directoryRoot, and goes up in the directory hierarchy until a file is found or the root is reach
var gitlabCiFilePath string
// Directory to start searching for gitlab-ci file and git repository
var directoryRoot string
// Timeout in seconds for HTTP request to the Gitlab API
// Request will fail if lasting more than the timeout
var httpRequestTimeout uint = 5
// Tells if output should be colorized or not
var colorMode = true
// Tells if verbose mode is on or off
var verboseMode = false
type GitlabAPILintRequest struct {
Content string `json:"content"`
}
type GitlabAPILintResponse struct {
Status string `json:"status,omitempty"`
Error string `json:"error,omitempty"`
Errors []string `json:"errors,omitempty"`
}
const (
HookError = iota
HookCreated
HookAlreadyCreated
HookAlreadyExists
HookDeleted
HookNotExisting
HookNotMatching
)
// Search in the given directory a git repository directory
// It goes up in the filesystem hierarchy until a repository is found, or the root is reach
// A git repository directory is a '.git' folder (gitRepoDirectory constant) containing a 'config' file
// (gitRepoConfigFilename constant)
func findGitRepo(directory string) (string, error) {
candidate := path.Join(directory, gitRepoDirectory)
fileInfo, err := os.Stat(candidate)
if !os.IsNotExist(err) && fileInfo.IsDir() {
// Found a git directory, check of it has a config file
fileInfo, err = os.Stat(path.Join(candidate, gitRepoConfigFilename))
if !os.IsNotExist(err) && !fileInfo.IsDir() {
return candidate, nil
}
}
// If we are at the root of the filesystem, it means we did not find any gitlab-ci file
if directory[len(directory)-1] == filepath.Separator {
return "", errors.New("not found")
} else { // else check the parent directory
return findGitRepo(filepath.Dir(directory))
}
}
// Search in the given directory a git repository directory
// It goes up in the filesystem hierarchy until a repository is found, or the root is reach
func findGitlabCiFile(directory string) (string, error) {
candidate := path.Join(directory, gitlabCiFileName)
fileInfo, err := os.Stat(candidate)
if !os.IsNotExist(err) && !fileInfo.IsDir() {
return candidate, nil
}
// If we are at the root of the filesystem, it means we did not find any gitlab-ci file
if directory[len(directory)-1] == filepath.Separator {
return "", errors.New("not found")
} else { // else check the parent directory
return findGitlabCiFile(filepath.Dir(directory))
}
}
// Extract the orign remote remote url from a git repo directory
func getGitOriginRemoteUrl(gitDirectory string) (string, error) {
cfg, err := ini.Load(path.Join(gitDirectory, gitRepoConfigFilename))
if err != nil {
fmt.Println(err)
return "", err
}
remote, err := cfg.GetSection("remote \"origin\"")
if err == nil && remote.HasKey("url") {
return remote.Key("url").String(), nil
}
return "", nil
}
// Transform a git remote url, that can be a full http ou ssh url, to a simple http FQDN host
func httpiseRemoteUrl(remoteUrl string) string {
re := regexp.MustCompile("^(https?://[^/]*).*$")
if re.MatchString(remoteUrl) { // http remote
matches := re.FindStringSubmatch(remoteUrl)
if len(matches) >= 2 {
return matches[1]
}
} else { // ssh remote
re = regexp.MustCompile("^([^@]*@)?([^:]+)")
matches := re.FindStringSubmatch(remoteUrl)
if len(matches) >= 3 {
return "http://" + matches[2]
}
}
return ""
}
func initGitlabHttpClientRequest(method string, url string, content string) (*http.Client, *http.Request, error) {
var httpClient *http.Client
var req *http.Request
httpClient = &http.Client{
Timeout: time.Second * time.Duration(httpRequestTimeout),
}
req, err := http.NewRequest(method, url, strings.NewReader(content))
req.Header.Add("Accept", "*/*")
req.Header.Add("Content-Type", "application/json")
req.Header.Add("User-Agent", fmt.Sprintf("%s/%s", APPNAME, VERSION))
return httpClient, req, err
}
// Check if we can get a response with the rootUrl on the API CI Lint endpoint, and if a redirection occurs
// If a redirection is detected, return the redirected root URL.
// This is needed as redirection response only occurs when the API is call using en HTTP GET, but in the en the API
// has to be called in POST
func checkGitlabAPIUrl(rootUrl string) (string, error) {
newRootUrl := rootUrl
lintURL := rootUrl + gitlabApiCiLintPath
if verboseMode {
fmt.Printf("Checking '%s' (using '%s')...\n", rootUrl, lintURL)
}
httpClient, req, err := initGitlabHttpClientRequest("GET", lintURL, "")
if err != nil {
return newRootUrl, err
}
resp, err := httpClient.Do(req)
if err != nil {
return newRootUrl, err
}
defer resp.Body.Close()
// Getting the full URL used for the last query, after following potential redirection
lastUrl := resp.Request.URL.String()
// Let's try to get the redirected root URL by removing the gitlab API path from the last use URL
lastRootUrl := strings.TrimSuffix(lastUrl, gitlabApiCiLintPath)
// If the result is not empty or unchanged, it means
if lastRootUrl != "" && lastRootUrl != lastUrl {
newRootUrl = lastRootUrl
}
if verboseMode {
fmt.Printf("Url '%s' validated\n", newRootUrl)
}
return newRootUrl, nil
}
// Send the content of a gitlab-ci file to a Gitlab instance lint API to check its validity
// In case of invalid, lint error messages are returned in `msgs`
func lintGitlabCIUsingAPI(rootUrl string, ciFileContent string) (status bool, msgs []string, err error) {
msgs = []string{}
status = false
// Prepare the JSON content of the POST request:
// {
// "content": "<ESCAPED CONTENT OF THE GITLAB-CI FILE>"
// }
var reqParams = GitlabAPILintRequest{Content: ciFileContent}
reqBody, _ := json.Marshal(reqParams)
// Prepare requesting the API
lintURL := rootUrl + gitlabApiCiLintPath
if verboseMode {
fmt.Printf("Querying %s...\n", lintURL)
}
httpClient, req, err := initGitlabHttpClientRequest("POST", lintURL, string(reqBody))
// Make the request to the API
resp, err := httpClient.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
// Get the results
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
var result GitlabAPILintResponse
err = json.Unmarshal([]byte(body), &result)
if err != nil {
return
}
// Analyse the results
if result.Status == "valid" {
status = true
return
}
if result.Status == "invalid" {
msgs = result.Errors
}
if result.Error != "" {
err = errors.New(result.Error)
}
return
}
// Analyse a PATH argument, that can be a directory or file, to use it as a gitlab-ci file a a directory
// where to start searching
func processPathArgument(path string) {
fileInfo, err := os.Stat(path)
if !os.IsNotExist(err) {
if fileInfo.IsDir() {
directoryRoot, _ = filepath.Abs(path)
} else {
gitlabCiFilePath, _ = filepath.Abs(path)
}
}
}
func guessGitlabAPIFromGitRepo(gitRepoPath string) (apiRootUrl string, err error) {
remoteUrl, err := getGitOriginRemoteUrl(gitRepoPath)
if err == nil {
httpRemoteUrl, err := checkGitlabAPIUrl(httpiseRemoteUrl(remoteUrl))
if err != nil {
return "", err
}
if httpRemoteUrl != "" {
apiRootUrl = httpRemoteUrl
if verboseMode {
fmt.Printf("API url found: %s\n", httpRemoteUrl)
}
} else {
return "", errors.New("Unknown error occurs")
}
}
return
}
// 'check' command of the program, which is the main action
// It aims to validate the syntax of a .gitlab-ci.yml file, using the CI Lint API of a Gitlab instance
// First it search for a gitlab-ci file if no one is given
// Then it search for a .git repository directory
// If a .git repository is found, its origin remote is analysed to extract and guess a the Gitlab root url to use for
// the API. If no valid origin remote or API is found, the defaultGitlabRootUrl is used
// Finally, it call the API with the gitlab-ci file content. If the content if syntax valid, it silently stop. Else it
// display the error messages returned by the API and exit with an error
func commandCheck(c *cli.Context) error {
if c.Args().Present() && c.Args().Get(0) != "" {
processPathArgument(c.Args().Get(0))
}
if verboseMode {
fmt.Printf("Settings:\n directoryRoot: %s\n gitlabCiFilePath: %s\n", directoryRoot, gitlabCiFilePath)
}
// Find gitlab-ci file, if not given
if gitlabCiFilePath == "" {
file, err := findGitlabCiFile(directoryRoot)
if err != nil {
fmt.Println("No gitlab-ci file found")
return nil
}
gitlabCiFilePath = file
}
cwd, _ := os.Getwd()
relativeGitlabCiFilePath, _ := filepath.Rel(cwd, gitlabCiFilePath)
// Find git repository. First, start from gitlab-ci file location
gitRepoPath, err := findGitRepo(filepath.Dir(gitlabCiFilePath))
if err == nil {
// if not found, search from directoryRoot
gitRepoPath, _ = findGitRepo(directoryRoot)
}
// Extract origin remote from repository en guess gitlab url
if gitRepoPath != "" {
gitlabRootUrl, err = guessGitlabAPIFromGitRepo(gitRepoPath)
if err != nil {
return cli.NewExitError(fmt.Sprintf("No valid and responding Gitlab API URL found from repository's origin remote: %s", err), 5)
}
} else {
yellow := color.New(color.FgYellow).SprintFunc()
fmt.Fprintf(color.Output, yellow("No GIT repository found, using default Gitlab API '%s'\n"), gitlabRootUrl)
}
fmt.Printf("Validating %s... ", relativeGitlabCiFilePath)
if verboseMode {
fmt.Printf("\n")
}
// Call the API to validate the gitlab-ci file
ciFileContent, err := ioutil.ReadFile(gitlabCiFilePath)
if err != nil {
return cli.NewExitError(fmt.Sprintf("Error while reading '%s' file content: %s", relativeGitlabCiFilePath, err), 5)
}
status, errorMessages, err := lintGitlabCIUsingAPI(gitlabRootUrl, string(ciFileContent))
if err != nil {
return cli.NewExitError(fmt.Sprintf("Error querying Gitlab API '%s' for CI lint: %s", gitlabRootUrl, err), 5)
}
if !status {
if verboseMode {
fmt.Printf("%s ", relativeGitlabCiFilePath)
}
red := color.New(color.FgRed).SprintFunc()
fmt.Fprintf(color.Output, "%s\n", red("KO"))
messages := red(strings.Join(errorMessages, "\n"))
os.Stderr.WriteString(fmt.Sprintf("%s\n", messages))
return cli.NewExitError("", 10)
}
if verboseMode {
fmt.Printf("%s ", relativeGitlabCiFilePath)
}
green := color.New(color.FgGreen).SprintFunc()
fmt.Fprintf(color.Output, "%s\n", green("OK"))
return nil
}
func createGitHookLink(gitRepoPath string, hookName string) (int, error) {
currentExe, err := os.Executable()
if err != nil {
return HookError, err
}
err = os.MkdirAll(path.Join(gitRepoPath, "hooks"), 0755)
if err != nil {
return HookError, err
}
hookPath := path.Join(gitRepoPath, "hooks", hookName)
// There is no hook already
fi, err := os.Lstat(hookPath)
if os.IsNotExist(err) {
err = os.Symlink(currentExe, hookPath)
if err != nil {
return HookError, err
}
} else {
// If there is a hook, maybe it's already ourself?
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
return HookAlreadyExists, nil
} else {
linkDest, err := os.Readlink(hookPath)
if err != nil {
return HookError, err
}
linkDest, err = filepath.Abs(linkDest)
if err != nil {
return HookError, err
}
if linkDest == currentExe {
return HookAlreadyCreated, nil
} else {
return HookAlreadyExists, nil
}
}
}
return HookCreated, nil
}
func deleteGitHookLink(gitRepoPath string, hookName string) (int, error) {
hookPath := path.Join(gitRepoPath, "hooks", hookName)
fi, err := os.Lstat(hookPath)
if os.IsNotExist(err) {
return HookNotExisting, nil
} else {
currentExe, err := os.Executable()
if err != nil {
return HookError, err
}
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
return HookNotMatching, nil
} else {
linkDest, err := os.Readlink(hookPath)
if err != nil {
return HookError, err
}
linkDest, err = filepath.Abs(linkDest)
if err != nil {
return HookError, err
}
if verboseMode {
fmt.Println(linkDest)
}
if linkDest == currentExe {
err = os.Remove(hookPath)
if err != nil {
return HookError, err
}
} else {
return HookNotMatching, nil
}
}
}
return HookDeleted, nil
}
// 'install' command of the program
func commandInstall(c *cli.Context) error {
if c.Args().Present() && c.Args().Get(0) != "" {
processPathArgument(c.Args().Get(0))
}
// Find git repository. First, start from gitlab-ci file location
gitRepoPath, err := findGitRepo(filepath.Dir(gitlabCiFilePath))
if err == nil {
// if not found, search from directoryRoot
gitRepoPath, _ = findGitRepo(directoryRoot)
}
if gitRepoPath == "" {
return cli.NewExitError(fmt.Sprintf("No GIT repository found, can't install a hook"), 5)
}
if verboseMode {
fmt.Printf("Git repository found: %s\n", gitRepoPath)
}
// Extract origin remote from repository en guess gitlab url
_, err = guessGitlabAPIFromGitRepo(gitRepoPath)
if err != nil {
return cli.NewExitError(fmt.Sprintf("No valid and responding Gitlab API URL found from repository's origin remote, can't install a hook"), 5)
}
status, err := createGitHookLink(gitRepoPath, "pre-commit")
if err != nil {
return cli.NewExitError(err, 5)
}
switch status {
case HookAlreadyExists:
yellow := color.New(color.FgYellow).SprintFunc()
msg := fmt.Sprintf(yellow("A pre-commit hook already exists\nPlease install manually by adding a call to me in your pre-commit script."))
return cli.NewExitError(msg, 4)
case HookAlreadyCreated:
cyan := color.New(color.FgCyan).SprintFunc()
fmt.Fprintf(color.Output, cyan("Already installed.\n"))
case HookCreated:
green := color.New(color.FgGreen).SprintFunc()
fmt.Fprintf(color.Output, green("Git pre-commit hook installed in %s\n"), filepath.Dir(gitRepoPath))
default:
return cli.NewExitError("Unkown error", 5)
}
return nil
}
// 'uninstall' command of the program
func commandUninstall(c *cli.Context) error {
if c.Args().Present() && c.Args().Get(0) != "" {
processPathArgument(c.Args().Get(0))
}
// Find git repository. First, start from gitlab-ci file location
gitRepoPath, err := findGitRepo(filepath.Dir(gitlabCiFilePath))
if err == nil {
// if not found, search from directoryRoot
gitRepoPath, _ = findGitRepo(directoryRoot)
}
if gitRepoPath == "" {
return cli.NewExitError(fmt.Sprintf("No GIT repository found, can't install a hook"), 5)
}
if verboseMode {
fmt.Printf("Git repository found: %s\n", gitRepoPath)
}
status, err := deleteGitHookLink(gitRepoPath, "pre-commit")
if err != nil {
return cli.NewExitError(err, 5)
}
switch status {
case HookNotMatching:
red := color.New(color.FgRed).SprintFunc()
msg := fmt.Sprintf(red("Unknown pre-commit hook\nPlease uninstall manually."))
return cli.NewExitError(msg, 4)
case HookNotExisting:
yellow := color.New(color.FgYellow).SprintFunc()
fmt.Fprintf(color.Output, yellow("No pre-commit hook found.\n"))
case HookDeleted:
green := color.New(color.FgGreen).SprintFunc()
fmt.Fprintf(color.Output, green("Git pre-commit hook uinstalled.\n"))
default:
return cli.NewExitError("Unkown error", 5)
}
return nil
}
func main() {
cli.VersionPrinter = func(c *cli.Context) {
fmt.Printf("version=%s revision=%s built on=%s\n", VERSION, REVISION, BUILDTIME)
}
cli.AppHelpTemplate = `{{.Name}} - {{.Usage}}
version {{if .Version}}{{.Version}}{{end}}
{{if len .Authors}}{{range .Authors}}{{ . }}{{end}}{{end}} - https://gitlab.com/orobardet/gitlab-ci-linter
Usage:
{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} [command [command options]]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}
{{if .VisibleFlags}}Global options:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
{{if .Description}}Arguments:
{{.Description}}{{end}}
{{if .Commands}}Commands:
{{range .Commands}}{{if not .HideHelp}} {{join .Names ", "}}{{ "\t"}}{{.Usage}}{{ "\n" }}{{end}}{{end}}
If no command is given, 'check 'is used by default
{{end}}`
app := cli.NewApp()
app.Name = APPNAME
app.Version = fmt.Sprintf("%s (%s)", VERSION, REVISION[:int(math.Min(float64(len(REVISION)), 7))])
app.Authors = []cli.Author{
{Name: "Olivier ROBARDET"},
}
app.Usage = "lint your .gitlab-ci.yml using the Gitlab lint API"
app.EnableBashCompletion = true
pathArgumentDescription := `If PATH if given, it will depending of its type on filesystem:
- if a file, it will be used as the gitlab-ci file to check (similar to global --ci-file option)
- if a directory, it will be used as the folder from where to search for a ci file and a git repository (similar to global --directory option)
PATH have precedence over --ci-file and --directory options.`
app.ArgsUsage = "[PATH]"
app.Description = pathArgumentDescription
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "gitlab-url,u",
Value: defaultGitlabRootUrl,
Usage: "root `URL` of the Gitlab instance to use API",
EnvVar: "GCL_GITLAB_URL",
Destination: &gitlabRootUrl,
},
cli.StringFlag{
Name: "ci-file,f",
Usage: "`FILE` is the relative or absolute path to the gitlab-ci file",
EnvVar: "GCL_GITLAB_CI_FILE",
Destination: &gitlabCiFilePath,
},
cli.StringFlag{
Name: "directory,d",
Value: ".",
Usage: "`DIR` is the directory from where to search for gitlab-ci file and git repository",
EnvVar: "GCL_DIRECTORY",
Destination: &directoryRoot,
},
cli.UintFlag{
Name: "timeout,t",
Value: httpRequestTimeout,
Usage: "timeout in second after which http request to Gitlab API will timeout (and the program will fails)",
EnvVar: "GCL_TIMEOUT",
Destination: &httpRequestTimeout,
},
cli.BoolFlag{
Name: "no-color,n",
Usage: "don't color output. By defaults the output is colorized if a compatible terminal is detected.",
EnvVar: "GCL_NOCOLOR",
},
cli.BoolFlag{
Name: "verbose,v",
Usage: "verbose mode",
EnvVar: "GCL_VERBOSE",
Destination: &verboseMode,
},
}
cli.VersionFlag = cli.BoolFlag{
Name: "version, V",
Usage: "print the version information",
}
app.Commands = []cli.Command{
{
Name: "check",
Aliases: []string{"c"},
Usage: "Check the .gitlab-ci.yml (default command if none is given)",
Action: commandCheck,
ArgsUsage: "[PATH]",
Description: pathArgumentDescription,
},
{
Name: "install",
Aliases: []string{"i"},
Usage: "install as git pre-commit hook",
Action: commandInstall,
ArgsUsage: "[PATH]",
Description: pathArgumentDescription,
},
{
Name: "uninstall",
Aliases: []string{"u"},
Usage: "uninstall the git pre-commit hook",
Action: commandUninstall,
ArgsUsage: "[PATH]",
Description: pathArgumentDescription,
},
{
Name: "version",
Aliases: []string{"v"},
Usage: "Print the version information",
Action: func(c *cli.Context) {
cli.ShowVersion(c)
},
},
}
app.Before = func(c *cli.Context) error {
if c.Bool("no-color") {
colorMode = false
}
if !colorMode {
color.NoColor = true
}
// Check if the given directory path exists
if directoryRoot != "" {
directoryRoot, _ = filepath.Abs(directoryRoot)
fileInfo, err := os.Stat(directoryRoot)
if os.IsNotExist(err) {
return cli.NewExitError(fmt.Sprintf("'%s' does not exists", directoryRoot), 1)
}
if !fileInfo.IsDir() {
return cli.NewExitError(fmt.Sprintf("'%s' is not a directory", directoryRoot), 1)
}
}
// Check if the given gitlab-ci file path exists
if gitlabCiFilePath != "" {
gitlabCiFilePath, _ = filepath.Abs(gitlabCiFilePath)
fileInfo, err := os.Stat(gitlabCiFilePath)
if os.IsNotExist(err) {
return cli.NewExitError(fmt.Sprintf("'%s' does not exists", gitlabCiFilePath), 1)
}
if fileInfo.IsDir() {
return cli.NewExitError(fmt.Sprintf("'%s' is a directory, not a file", gitlabCiFilePath), 1)
}
}
return nil
}
app.Action = func(c *cli.Context) error {
return commandCheck(c)
}
app.Run(os.Args)
}
|
package main
func main() {
start:= StartState{}
game := GameContext{Next:&start}
for game.Next.executeState(&game) {}
}
|
var nodes [][] int
func reverse(a []int) []int{
b := make([]int, len(a))
copy(b, a)
for i := len(a)/2-1; i >= 0; i-- {
opp := len(a)-1-i
b[i], b[opp] = a[opp], a[i]
}
return b
}
func max(vals ...int) int {
max := 0
for _, val := range vals {
if val >= max{
max = val
}
}
return max
}
func traverseTree(node *TreeNode, level int) int {
if nil == node {
return level
}
nodes[level] = append(nodes[level], node.Val)
var level_l, level_r int
if nil != node.Left {
level_l = traverseTree(node.Left, level+1)
}
if nil != node.Right {
level_r = traverseTree(node.Right, level+1)
}
// 返回 Max_level
return max(level, level_l, level_r)
}
func sliceEqual(a, b []int) bool {
if len(a) != len(b) {
return false
}
if (a == nil) != (b == nil) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func isSymmetric(root *TreeNode) bool {
nodes = make([][]int, 1005)
max_level := traverseTree(root, 0)
for i:= 0; i<= max_level; i++ {
arr := nodes[i]
_arr := reverse(arr)
if !sliceEqual(arr, _arr){
return false
}
}
return true
} |
package offheapstorage
import (
"bytes"
"testing"
)
func BenchmarkOffheapAPI(b *testing.B){
// addrs := make([]uint64, 0, 1024)
data1 := make([]byte, 32767)
for i:=0; i<len(data1); i++ {
data1[i] = byte(i % 256)
}
data2 := make([]byte, 32768)
for i:=0; i<len(data2); i++ {
data2[i] = byte(i % 256)
}
data3 := make([]byte, 32769)
for i:=0; i<len(data3); i++ {
data3[i] = byte(i % 256)
}
data4 := make([]byte, 123456)
for i:=0; i<len(data4); i++ {
data4[i] = byte(i % 256)
}
s := NewOffHeapStorage()
b.ResetTimer()
for x:=0; x<b.N; x++ {
addr1,_ := s.Put(data1)
addr2,_ := s.Put(data2)
addr3,_ := s.Put(data3)
addr4,_ := s.Put(data4)
read1, _ := s.Get(addr1)
read2, _ := s.Get(addr2)
read3, _ := s.Get(addr3)
read4, _ := s.Get(addr4)
if !bytes.Equal(data1, read1) {
panic("Data1 not match")
}
if !bytes.Equal(data2, read2) {
panic("Data2 not match")
}
if !bytes.Equal(data3, read3) {
panic("Data3 not match")
}
if !bytes.Equal(data4, read4) {
panic("Data4 not match")
}
s.Delete(addr1)
s.Delete(addr2)
s.Delete(addr3)
s.Delete(addr4)
}
}
|
package users
import (
"fmt"
"reflect"
"regexp"
"testing"
"github.com/DATA-DOG/go-sqlmock"
)
// TestGetByID is a test function for the Mysqlstore's GetByID
func TestGetByID(t *testing.T) {
// Create a slice of test cases
cases := []struct {
name string
expectedUser *User
idToGet int64
expectError bool
}{
{
"User Found",
&User{
1,
"test@test.com",
[]byte("passhash123"),
"username",
"firstname",
"lastname",
"photourl",
},
1,
false,
},
{
"User Not Found",
&User{},
2,
true,
},
{
"User With Large ID Found",
&User{
1234567890,
"test@test.com",
[]byte("passhash123"),
"username",
"firstname",
"lastname",
"photourl",
},
1234567890,
false,
},
}
for _, c := range cases {
// Create a new mock database for each case
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("There was a problem opening a database connection: [%v]", err)
}
defer db.Close()
mainSQLStore := NewMysqlStore(db)
// Create an expected row to the mock DB
row := mock.NewRows([]string{
"ID",
"Email",
"PassHash",
"UserName",
"FirstName",
"LastName",
"PhotoURL"},
).AddRow(
c.expectedUser.ID,
c.expectedUser.Email,
c.expectedUser.PassHash,
c.expectedUser.UserName,
c.expectedUser.FirstName,
c.expectedUser.LastName,
c.expectedUser.PhotoURL,
)
query := regexp.QuoteMeta("SELECT id, email, pass_hash, usr_name, first_name, last_name, photo_url FROM Users WHERE id=?")
if c.expectError {
// Set up expected query that will expect an error
mock.ExpectQuery(query).WithArgs(c.idToGet).WillReturnError(ErrUserNotFound)
// Test GetByID()
user, err := mainSQLStore.GetByID(c.idToGet)
if user != nil || err == nil {
t.Errorf("Expected error [%v] but got [%v] instead", ErrUserNotFound, err)
}
} else {
// Set up an expected query with the expected row from the mock DB
mock.ExpectQuery(query).WithArgs(c.idToGet).WillReturnRows(row)
// Test GetByID()
user, err := mainSQLStore.GetByID(c.idToGet)
if err != nil {
t.Errorf("Unexpected error on successful test [%s]: %v", c.name, err)
}
if !reflect.DeepEqual(user, c.expectedUser) {
t.Errorf("Error, invalid match in test [%s]", c.name)
}
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("There were unfulfilled expectations: %s", err)
}
}
}
// TestGetByEmail is a test function for the Mysqlstore's GetByEmail
func TestGetByEmail(t *testing.T) {
// Create a slice of test cases
cases := []struct {
name string
expectedUser *User
emailToGet string
expectError bool
}{
{
"User Found",
&User{
1,
"test@test.com",
[]byte("passhash123"),
"username",
"firstname",
"lastname",
"photourl",
},
"test@test.com",
false,
},
{
"Invalid Email without at symbol",
&User{
22,
"testtesting.com",
[]byte("passhash123"),
"username",
"firstname",
"lastname",
"photourl",
},
"testtesting.com",
true,
},
{
"Invalid Email with multiple at symbol",
&User{
23,
"A@b@c@example.com",
[]byte("passhash123"),
"username",
"firstname",
"lastname",
"photourl",
},
"A@b@c@example.com",
true,
},
{
"User Not Found",
&User{},
"test@test.com",
true,
},
{
"User with long but valid email Found",
&User{
1234567890,
"disposable.style.email.with+symbol@example.com",
[]byte("passhash123"),
"username",
"firstname",
"lastname",
"photourl",
},
"disposable.style.email.with+symbol@example.com",
false,
},
{
"Check if SQL injection attack is prevented",
&User{
1234567890,
"test@test.com",
[]byte("passhash123"),
"username",
"firstname",
"lastname",
"photourl",
},
"; drop table users; select ",
true,
},
{
"Emails with strange character and TLD",
&User{
1234567890,
"' '@s.example",
[]byte("passhash123"),
"username",
"firstname",
"lastname",
"photourl",
},
"' '@s.example",
false,
},
}
for _, c := range cases {
// Create a new mock database for each case
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("There was a problem opening a database connection: [%v]", err)
}
defer db.Close()
mainSQLStore := NewMysqlStore(db)
// Create an expected row to the mock DB
row := mock.NewRows([]string{
"ID",
"Email",
"PassHash",
"UserName",
"FirstName",
"LastName",
"PhotoURL"},
).AddRow(
c.expectedUser.ID,
c.expectedUser.Email,
c.expectedUser.PassHash,
c.expectedUser.UserName,
c.expectedUser.FirstName,
c.expectedUser.LastName,
c.expectedUser.PhotoURL,
)
query := regexp.QuoteMeta("SELECT id, email, pass_hash, usr_name, first_name, last_name, photo_url FROM Users WHERE email=?")
if c.expectError {
// Set up expected query that will expect an error
mock.ExpectQuery(query).WithArgs(c.emailToGet).WillReturnError(ErrUserNotFound)
// Test GetByEmail()
user, err := mainSQLStore.GetByEmail(c.emailToGet)
if user != nil || err == nil {
t.Errorf("Expected error [%v] but got [%v] instead", ErrUserNotFound, err)
}
} else {
// Set up an expected query with the expected row from the mock DB
mock.ExpectQuery(query).WithArgs(c.emailToGet).WillReturnRows(row)
// Test GetByEmail()
user, err := mainSQLStore.GetByEmail(c.emailToGet)
if err != nil {
t.Errorf("Unexpected error on successful test [%s]: %v", c.name, err)
}
if !reflect.DeepEqual(user, c.expectedUser) {
t.Errorf("Error, invalid match in test [%s]", c.name)
}
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("There were unfulfilled expectations: %s", err)
}
}
}
// TestGetByUserName is a test function for the Mysqlstore's GetByUserName
func TestGetByUserName(t *testing.T) {
// Create a slice of test cases
cases := []struct {
name string
expectedUser *User
userNameToGet string
expectError bool
}{
{
"User Found",
&User{
1,
"test@test.com",
[]byte("passhash123"),
"username",
"firstname",
"lastname",
"photourl",
},
"username",
false,
},
{
"User not found, empty userName",
&User{
1,
"test@test.com",
[]byte("passhash123"),
"username",
"firstname",
"lastname",
"photourl",
},
"",
true,
},
{
"User not found, userName invalid, contain spaces",
&User{
1,
"test@test.com",
[]byte("passhash123"),
"username",
"firstname",
"lastname",
"photourl",
},
"user name",
true,
},
{
"User not found, userName invalid, contain multiple spaces",
&User{
1,
"test@test.com",
[]byte("passhash123"),
"username",
"firstname",
"lastname",
"photourl",
},
" ",
true,
},
{
"User Not Found",
&User{},
"username",
true,
},
{
"User with long but valid email Found",
&User{
1234567890,
"disposable.style.email.with+symbol@example.com",
[]byte("passhash123"),
"ThisIsToTestTheNumberOfCharactersThatTheSQLSchemaCanSupportICanKeepThisGoingForeverWhyNotTrySomeSpecialCharacters?!#%$.,p''",
"firstname",
"lastname",
"photourl",
},
"ThisIsToTestTheNumberOfCharactersThatTheSQLSchemaCanSupportICanKeepThisGoingForeverWhyNotTrySomeSpecialCharacters?!#%$.,p''",
false,
},
{
"Check if SQL injection attack is prevented",
&User{
1234567890,
"test@test.com",
[]byte("passhash123"),
"username",
"firstname",
"lastname",
"photourl",
},
"; drop table users; select ",
true,
},
}
for _, c := range cases {
// Create a new mock database for each case
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("There was a problem opening a database connection: [%v]", err)
}
defer db.Close()
mainSQLStore := NewMysqlStore(db)
// Create an expected row to the mock DB
row := mock.NewRows([]string{
"ID",
"Email",
"PassHash",
"UserName",
"FirstName",
"LastName",
"PhotoURL"},
).AddRow(
c.expectedUser.ID,
c.expectedUser.Email,
c.expectedUser.PassHash,
c.expectedUser.UserName,
c.expectedUser.FirstName,
c.expectedUser.LastName,
c.expectedUser.PhotoURL,
)
query := regexp.QuoteMeta("SELECT id, email, pass_hash, usr_name, first_name, last_name, photo_url FROM Users WHERE usr_name=?")
if c.expectError {
// Set up expected query that will expect an error
mock.ExpectQuery(query).WithArgs(c.userNameToGet).WillReturnError(ErrUserNotFound)
// Test GetByUserName()
user, err := mainSQLStore.GetByUserName(c.userNameToGet)
if user != nil || err == nil {
t.Errorf("Expected error [%v] but got [%v] instead", ErrUserNotFound, err)
}
} else {
// Set up an expected query with the expected row from the mock DB
mock.ExpectQuery(query).WithArgs(c.userNameToGet).WillReturnRows(row)
// Test GetByUserName()
user, err := mainSQLStore.GetByUserName(c.userNameToGet)
if err != nil {
t.Errorf("Unexpected error on successful test [%s]: %v", c.name, err)
}
if !reflect.DeepEqual(user, c.expectedUser) {
t.Errorf("Error, invalid match in test [%s]", c.name)
}
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("There were unfulfilled expectations: %s", err)
}
}
}
// TestInsert is a test function for the Mysqlstore's Insert
func TestInsert(t *testing.T) {
// Create a slice of test cases
cases := []struct {
name string
expectedUser *User
expectedID int64
expectError bool
}{
{
"A valid user",
&User{
123,
"test@test.com",
[]byte("passhash123"),
"username",
"firstname",
"lastname",
"photourl",
},
1,
false,
},
{
"Another valid user",
&User{
1,
"John.Smith@testing.com",
[]byte("moreThanAdaquate"),
"Adaquate",
"John",
"Smith",
"https://www.gravatar.com/avatar/cb5b989399d41e648ce246caa906b458",
},
2,
false,
},
{
"User not found, empty userName",
&User{
1,
"test@test.com",
[]byte("passhash123"),
"",
"firstname",
"lastname",
"photourl",
},
-1,
true,
},
{
"User not found, userName invalid, contain spaces",
&User{
1,
"test@test.com",
[]byte("passhash123"),
"user name",
"firstname",
"lastname",
"photourl",
},
-1,
true,
},
{
"User not found, userName invalid, contain multiple spaces",
&User{
1,
"test@test.com",
[]byte("passhash123"),
" ",
"firstname",
"lastname",
"photourl",
},
-1,
true,
},
{
"User Not Found",
&User{},
-1,
true,
},
{
"User with long but valid email and username",
&User{
1234567890,
"disposable.style.email.with+symbol@example.com",
[]byte("passhash123"),
"ThisIsToTestTheNumberOfCharactersThatTheSQLSchemaCanSupportICanKeepThisGoingForeverWhyNotTrySomeSpecialCharacters?!#%$.,p''",
"firstname",
"lastname",
"photourl",
},
3,
false,
},
{
"Check if SQL injection attack is prevented",
&User{
1234567890,
"test@test.com",
[]byte("passhash123"),
"; drop table users; select ",
"firstname",
"lastname",
"photourl",
},
-1,
true,
},
{
"Invalid Email without at symbol",
&User{
22,
"testtesting.com",
[]byte("passhash123"),
"username",
"firstname",
"lastname",
"photourl",
},
-1,
true,
},
{
"Invalid Email with multiple at symbol",
&User{
23,
"A@b@c@example.com",
[]byte("passhash123"),
"username",
"firstname",
"lastname",
"photourl",
},
-1,
true,
},
{
"valid Email with strange symbols",
&User{
23,
"' '@s.example",
[]byte("passhash123"),
"username",
"firstname",
"lastname",
"photourl",
},
4,
false,
},
}
for _, c := range cases {
// Create a new mock database for each case
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("There was a problem opening a database connection: [%v]", err)
}
defer db.Close()
mainSQLStore := NewMysqlStore(db)
query := regexp.QuoteMeta("INSERT INTO Users(email, pass_hash, usr_name, first_name, last_name, photo_url) VALUES (?,?,?,?,?,?)")
if c.expectError {
// Set up expected query that will expect an error
mock.ExpectExec(query).WithArgs(
c.expectedUser.Email,
c.expectedUser.PassHash,
c.expectedUser.UserName,
c.expectedUser.FirstName,
c.expectedUser.LastName,
c.expectedUser.PhotoURL).WillReturnError(fmt.Errorf("Some database error"))
// Test Inserting()
user, err := mainSQLStore.Insert(c.expectedUser)
if user != nil || err == nil {
t.Errorf("Expected an error here, test case is: %s", c.name)
}
} else {
// Set up an expected query with the expected row from the mock DB
mock.ExpectExec(query).WithArgs(
c.expectedUser.Email,
c.expectedUser.PassHash,
c.expectedUser.UserName,
c.expectedUser.FirstName,
c.expectedUser.LastName,
c.expectedUser.PhotoURL).WillReturnResult(sqlmock.NewResult(c.expectedID, 1))
// Test Inserting()
user, err := mainSQLStore.Insert(c.expectedUser)
if err != nil {
t.Errorf("Unexpected error on successful test [%s]: %v", c.name, err)
}
if !reflect.DeepEqual(user, c.expectedUser) {
t.Errorf("Error, invalid match in test [%s]", c.name)
}
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("There were unfulfilled expectations: %s", err)
}
}
}
// TestUpdate is a test function for the Mysqlstore's Update
func TestUpdate(t *testing.T) {
// Create a slice of test cases
cases := []struct {
name string
expectedUser *User
update *Updates
}{
{
"Update both names",
&User{
1,
"John.Smith@testing.com",
[]byte("moreThanAdaquate"),
"Adaquate",
"Johnny",
"Depp",
"https://www.gravatar.com/avatar/cb5b989399d41e648ce246caa906b458",
},
&Updates{
FirstName: "Johnny",
LastName: "Depp",
},
},
{
"Empty update",
&User{
1,
"John.Smith@testing.com",
[]byte("moreThanAdaquate"),
"Adaquate",
"",
"",
"https://www.gravatar.com/avatar/cb5b989399d41e648ce246caa906b458",
},
&Updates{
FirstName: "",
LastName: "",
},
},
{
"Make first name empty",
&User{
1,
"John.Smith@testing.com",
[]byte("moreThanAdaquate"),
"Adaquate",
"",
"Smith",
"https://www.gravatar.com/avatar/cb5b989399d41e648ce246caa906b458",
},
&Updates{
FirstName: "",
LastName: "Smith",
},
},
{
"Make last name empty",
&User{
1,
"John.Smith@testing.com",
[]byte("moreThanAdaquate"),
"Adaquate",
"John",
"",
"https://www.gravatar.com/avatar/cb5b989399d41e648ce246caa906b458",
},
&Updates{
FirstName: "John",
LastName: "",
},
},
{
"Empty struct",
&User{
1,
"John.Smith@testing.com",
[]byte("moreThanAdaquate"),
"Adaquate",
"",
"",
"https://www.gravatar.com/avatar/cb5b989399d41e648ce246caa906b458",
},
&Updates{},
},
{
"Update first name only",
&User{
1,
"John.Smith@testing.com",
[]byte("moreThanAdaquate"),
"Adaquate",
"Johnny",
"",
"https://www.gravatar.com/avatar/cb5b989399d41e648ce246caa906b458",
},
&Updates{
FirstName: "Johnny",
},
},
{
"Update first name only",
&User{
1,
"John.Smith@testing.com",
[]byte("moreThanAdaquate"),
"Adaquate",
"",
"Depp",
"https://www.gravatar.com/avatar/cb5b989399d41e648ce246caa906b458",
},
&Updates{
LastName: "Depp",
},
},
{
"Invalid update",
&User{
1,
"John.Smith@testing.com",
[]byte("moreThanAdaquate"),
"Adaquate",
"John",
"Smith",
"https://www.gravatar.com/avatar/cb5b989399d41e648ce246caa906b458",
},
&Updates{
FirstName: "John",
LastName: "Smith",
},
},
{
"Wrong Client Update ID",
&User{
1,
"John.Smith@testing.com",
[]byte("moreThanAdaquate"),
"Adaquate",
"Johnny",
"Depp",
"https://www.gravatar.com/avatar/cb5b989399d41e648ce246caa906b458",
},
&Updates{
FirstName: "Johnny",
LastName: "Depp",
},
},
{
"Update is nil",
&User{
1,
"John.Smith@testing.com",
[]byte("moreThanAdaquate"),
"Adaquate",
"Johnny",
"Depp",
"https://www.gravatar.com/avatar/cb5b989399d41e648ce246caa906b458",
},
nil,
},
}
for _, c := range cases {
// Create a new mock database for each case
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("There was a problem opening a database connection: [%v]", err)
}
defer db.Close()
mainSQLStore := NewMysqlStore(db)
// Create an expected row to the mock DB
row := mock.NewRows([]string{
"ID",
"Email",
"PassHash",
"UserName",
"FirstName",
"LastName",
"PhotoURL"},
).AddRow(
1,
"John.Smith@testing.com",
[]byte("moreThanAdaquate"),
"Adaquate",
"John",
"Smith",
"https://www.gravatar.com/avatar/cb5b989399d41e648ce246caa906b458",
)
selectQuery := regexp.QuoteMeta("SELECT id, email, pass_hash, usr_name, first_name, last_name, photo_url FROM Users WHERE id=?")
updateQuery := regexp.QuoteMeta("UPDATE Users SET first_name = ?, last_name = ? WHERE id = ?")
// Set up an expected query with the expected row from the mock DB
if c.name != "Update is nil" {
mock.ExpectQuery(selectQuery).
WithArgs(1).WillReturnRows(row)
mock.ExpectExec(updateQuery).
WithArgs(c.update.FirstName, c.update.LastName, 1).
WillReturnResult(sqlmock.NewResult(1, 1))
}
// Test Update()
if c.name == "Wrong Client Update ID" {
_, err2 := mainSQLStore.Update(2, c.update)
if err2 == nil {
t.Errorf("Expected an error here.")
}
}
user, err2 := mainSQLStore.Update(1, c.update)
if c.name == "Update is nil" && err2 == nil {
t.Errorf("Expected an error here.")
} else if err2 != nil && c.name != "Update is nil" {
t.Errorf("Unexpected error on successful test [%s]: %v", c.name, err)
}
if err2 == nil && !reflect.DeepEqual(user, c.expectedUser) {
t.Errorf("Error, invalid match in test [%s]", c.name)
}
if err := mock.ExpectationsWereMet(); err2 == nil && err != nil {
t.Errorf("There were unfulfilled expectations: %s", err)
}
}
}
// TestDelete is a test function for the Mysqlstore's Delete
func TestDelete(t *testing.T) {
cases := []struct {
name string
idToDelete int64
expectDeletionError bool
}{
{
"0",
0,
true,
},
{
"Negative number",
-1,
true,
},
{
"Super large number",
92493849820948902,
true,
},
{
"1",
1,
false,
},
{
"4",
1,
false,
},
{
"3",
1,
false,
},
}
for _, c := range cases {
// Create a new mock database for each case
db, mock, err := sqlmock.New()
if err != nil {
t.Errorf("There was a problem opening a database connection: [%v]", err)
}
defer db.Close()
mainSQLStore := NewMysqlStore(db)
mock.NewRows([]string{
"ID",
"Email",
"PassHash",
"UserName",
"FirstName",
"LastName",
"PhotoURL"},
).AddRow(
1,
"John.Smith@testing.com",
[]byte("moreThanAdaquate"),
"Adaquate",
"John",
"Smith",
"https://www.gravatar.com/avatar/cb5b989399d41e648ce246caa906b458",
).AddRow(
2,
"' '@s.example",
[]byte("SomeExamplePassword123"),
"username",
"",
"",
"FakeURL",
).AddRow(
3,
"disposable.style.email.with+symbol@example.com",
[]byte("passhash123"),
"ThisIsToTestTheNumberOfCharactersThatTheSQLSchemaCanSupportICanKeepThisGoingForeverWhyNotTrySomeSpecialCharacters?!#%$.,p''",
"firstname",
"lastname",
"photourl",
).AddRow(
4,
"welldone+@adomain",
[]byte(" "),
"TechnicallyPossible",
"Have A",
"Nice Day",
"https://www.gravatar.com/avatar/cb5b801239d41e648ce246ca1206b458",
)
deleteQuery := regexp.QuoteMeta("DELETE FROM Users WHERE id=?")
selectQuery := regexp.QuoteMeta("SELECT id, email, pass_hash, usr_name, first_name, last_name, photo_url FROM Users WHERE id=?")
// Set up expected query that will expect an error
if c.expectDeletionError {
mock.
ExpectExec(deleteQuery).WithArgs(c.idToDelete).
WillReturnError(fmt.Errorf("Some database error"))
err = mainSQLStore.Delete(c.idToDelete)
if err == nil {
t.Errorf("Expected error on invalid delete, id is: %d", c.idToDelete)
}
} else {
mock.ExpectExec(deleteQuery).WithArgs(c.idToDelete).WillReturnResult(sqlmock.NewResult(4, 1))
err = mainSQLStore.Delete(c.idToDelete)
if err != nil {
t.Errorf("Unexpected error on successful delete: %v", err)
}
mock.ExpectQuery(selectQuery).
WithArgs(c.idToDelete).WillReturnError(ErrUserNotFound)
_, err = mainSQLStore.GetByID(c.idToDelete)
if err == nil {
t.Errorf("Expected a UserNotFoundError, but got no error instead.")
}
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("There were unfulfilled expectations: %s", err)
}
}
}
|
/*
* Copyright 2018 The NATS Authors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"fmt"
"net/http"
"net/http/httptest"
"path/filepath"
"testing"
"github.com/nats-io/jwt"
"github.com/stretchr/testify/require"
)
func Test_AddImport(t *testing.T) {
ts := NewTestStore(t, "test")
defer ts.Done(t)
ts.AddAccount(t, "A")
ts.AddExport(t, "A", jwt.Stream, "foobar.>", false)
ts.AddAccount(t, "B")
token := ts.GenerateActivation(t, "A", "foobar.>", "B")
fp := filepath.Join(ts.Dir, "token.jwt")
require.NoError(t, Write(fp, []byte(token)))
tests := CmdTests{
{createAddImportCmd(), []string{"add", "import"}, nil, []string{"an account is required"}, true},
{createAddImportCmd(), []string{"add", "import", "--account", "B"}, nil, []string{"token is required"}, true},
{createAddImportCmd(), []string{"add", "import", "--account", "B", "--token", fp}, nil, []string{"added stream import"}, false},
}
tests.Run(t, "root", "add")
}
func Test_AddImportSelfImportsRejected(t *testing.T) {
ts := NewTestStore(t, "test")
defer ts.Done(t)
ts.AddAccount(t, "A")
ts.AddExport(t, "A", jwt.Stream, "foobar.>", false)
token := ts.GenerateActivation(t, "A", "foobar.>", "A")
fp := filepath.Join(ts.Dir, "token.jwt")
require.NoError(t, Write(fp, []byte(token)))
_, _, err := ExecuteCmd(createAddImportCmd(), "--token", fp)
require.Error(t, err)
require.Equal(t, "activation issuer is this account", err.Error())
}
func Test_AddImportFromURL(t *testing.T) {
ts := NewTestStore(t, "test")
defer ts.Done(t)
ts.AddAccount(t, "A")
ts.AddExport(t, "A", jwt.Stream, "foobar.>", false)
ts.AddAccount(t, "B")
token := ts.GenerateActivation(t, "A", "foobar.>", "B")
ht := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, token)
}))
defer ht.Close()
_, _, err := ExecuteCmd(createAddImportCmd(), "--account", "B", "--token", ht.URL)
require.NoError(t, err)
ac, err := ts.Store.ReadAccountClaim("B")
require.NoError(t, err)
require.NotNil(t, ac)
require.Len(t, ac.Imports, 1)
require.Equal(t, ac.Imports[0].Token, ht.URL)
}
func Test_AddImportInteractive(t *testing.T) {
ts := NewTestStore(t, "test")
defer ts.Done(t)
ts.AddAccount(t, "A")
ts.AddExport(t, "A", jwt.Stream, "foobar.>", false)
akp, err := ts.KeyStore.GetAccountKey("A")
require.NoError(t, err)
require.NotNil(t, akp)
apub, err := akp.PublicKey()
ts.AddAccount(t, "B")
token := ts.GenerateActivation(t, "A", "foobar.>", "B")
fp := filepath.Join(ts.Dir, "token.jwt")
require.NoError(t, Write(fp, []byte(token)))
cmd := createAddImportCmd()
HoistRootFlags(cmd)
input := []interface{}{1, fp, "my import", "barfoo.>"}
_, _, err = ExecuteInteractiveCmd(cmd, input, "-i")
require.NoError(t, err)
ac, err := ts.Store.ReadAccountClaim("B")
require.NoError(t, err)
require.Len(t, ac.Imports, 1)
require.Equal(t, "my import", ac.Imports[0].Name)
require.Equal(t, "barfoo.>", string(ac.Imports[0].To))
require.Equal(t, "foobar.>", string(ac.Imports[0].Subject))
require.Equal(t, apub, ac.Imports[0].Account)
}
|
package apir
import (
"encoding/json"
"fmt"
"net/http"
)
// Result contains require data
type Result struct {
w http.ResponseWriter
r *http.Request
message string
}
// New build a Result Instance
func New(w http.ResponseWriter, r *http.Request) Result {
return Result{w, r, "OK"}
}
// Throw will return an error message with an adequat HttpCode
func (r *Result) Throw(httpCode int, err string) {
r.message = err
fmt.Println(r.message)
r.w.WriteHeader(httpCode)
r.Finalize()
}
// ToJSON write the `data` as output
func (r *Result) ToJSON(data interface{}) {
json.NewEncoder(r.w).Encode(data)
}
// Finalize return the current result
func (r *Result) Finalize() {
r.w.Write([]byte(r.message))
json.NewEncoder(r.w)
}
|
package main
import (
"os"
"fmt"
"log"
"bufio"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sqs"
)
func main() {
session, err := session.NewSession(&aws.Config{
Region: aws.String("us-west-2"),
})
if err != nil {
log.Fatal(err)
}
service := sqs.New(session)
reader := bufio.NewReader(os.Stdin)
queueName := "ExampleQueue"
count := 0
println("Creating queue...")
queue, err := service.CreateQueue(&sqs.CreateQueueInput{
QueueName: &queueName,
})
if err != nil {
log.Fatal(err)
}
print("Queue URL: ")
println(*queue.QueueUrl)
defer (func() {
_, err = service.DeleteQueue(&sqs.DeleteQueueInput{
QueueUrl: queue.QueueUrl,
})
if err != nil {
log.Fatal(err)
}
})()
println("type and send messages to SQS by hitting enter (type 'q' to quit):")
for {
count += 1
fmt.Print("> ")
text, _ := reader.ReadString('\n')
message := strings.TrimRight(text, "\n")
if message == "q" {
break
}
_, err = service.SendMessage(&sqs.SendMessageInput{
MessageBody: &message,
QueueUrl: queue.QueueUrl,
})
if err != nil {
log.Fatal(err)
}
}
for ; count != 0; count-- {
messages, err := service.ReceiveMessage(&sqs.ReceiveMessageInput{
QueueUrl: queue.QueueUrl,
})
if err != nil {
log.Fatal(err)
}
for _, msg := range messages.Messages {
println(*msg.Body)
}
}
}
|
package individualparsers
import (
"encoding/base64"
)
type ReverseBase64MZHeader struct{}
func (b ReverseBase64MZHeader) Match(content []byte) (bool, error) {
// PE header base64 encoded
normalizedContent := reverse(string(content))
if len(normalizedContent) < 4 {
return false, nil
}
headerContents := normalizedContent[:4]
if headerContents == "TVpQ" || headerContents == "TVoA" || headerContents == "TVpB" || headerContents == "TVqA" || headerContents == "TVqQ" || headerContents == "TVro" {
return true, nil
}
return false, nil
}
func (b ReverseBase64MZHeader) Normalize(content []byte) (int, []byte, error) {
reversedContent := reverse(string(content))
content, err := base64.StdEncoding.DecodeString(reversedContent)
if err != nil {
return 0, nil, err
}
return KeyRawExecutable, content, err
}
|
// Copyright © 2018 moooofly <centos.sf@gmail.com>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cmd
import (
"encoding/json"
"fmt"
"os"
"strconv"
"time"
"github.com/moooofly/harborctl/utils"
"github.com/spf13/cobra"
)
var replicationURL string
var jobURL string
// replicationCmd represents the replication command
var replicationCmd = &cobra.Command{
Use: "replication",
Short: "'/replications' API",
Long: `The subcommand of '/replications' hierachy.`,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
replicationURL = utils.URLGen("/api/replications")
jobURL = utils.URLGen("/api/jobs")
},
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Use \"harborctl replication --help\" for more information about this command.")
},
}
func init() {
rootCmd.AddCommand(replicationCmd)
initJobReplicationList()
initJobStatusUpdate()
initJobReplicationDelete()
}
// jobReplicationListCmd represents the list command
var jobReplicationListCmd = &cobra.Command{
Use: "list",
Short: "List filters jobs according to the policy and repository.",
Long: `This endpoint let user list jobs according to policy_id, and filters output by repository/status/start_time/end_time.
NOTE: if 'start_time' and 'end_time' are both null, list jobs of last 10 days`,
Run: func(cmd *cobra.Command, args []string) {
listReplicationJob()
},
}
var jobReplicationList struct {
policyID int64
num int64
endTime string
startTime string
repository string
status string
page int64
pageSize int64
}
func initJobReplicationList() {
replicationCmd.AddCommand(jobReplicationListCmd)
jobReplicationListCmd.Flags().Int64VarP(&jobReplicationList.policyID,
"policy_id",
"i", 0,
"(REQUIRED) The ID of the policy that triggered this job.")
jobReplicationListCmd.MarkFlagRequired("policy_id")
jobReplicationListCmd.Flags().Int64VarP(&jobReplicationList.num,
"num",
"n", 1,
"The return list length number. (NOTE: not sure what it is for)")
jobReplicationListCmd.Flags().StringVarP(&jobReplicationList.endTime,
"endTime",
"", "",
"The end time of jobs done to filter. (Format: yyyymmdd)")
jobReplicationListCmd.Flags().StringVarP(&jobReplicationList.startTime,
"startTime",
"", "",
"The start time of jobs to filter. (Format: yyyymmdd)")
jobReplicationListCmd.Flags().StringVarP(&jobReplicationList.repository,
"repository",
"r", "",
"The returned jobs list filtered by repository name.")
jobReplicationListCmd.Flags().StringVarP(&jobReplicationList.status,
"status",
"", "",
"The returned jobs list filtered by status (one of [running|error|pending|retrying|stopped|finished|canceled])")
jobReplicationListCmd.Flags().Int64VarP(&jobReplicationList.page,
"page",
"p", 1,
"The page nubmer, default is 1.")
jobReplicationListCmd.Flags().Int64VarP(&jobReplicationList.pageSize,
"page_size",
"s", 10,
"The size of per page, default is 10, maximum is 100.")
}
func listReplicationJob() {
// NOTE: if start_time and end_time are both null, list jobs of last 10 days
if jobReplicationList.startTime == "" || jobReplicationList.endTime == "" {
now := time.Now()
jobReplicationList.startTime = now.AddDate(0, 0, -10).Format("20060102")
jobReplicationList.endTime = now.Format("20060102")
}
st, err := time.Parse("20060102", jobReplicationList.startTime)
if err != nil {
fmt.Println("error:", err)
os.Exit(1)
}
et, err := time.Parse("20060102", jobReplicationList.endTime)
if err != nil {
fmt.Println("error:", err)
os.Exit(1)
}
if jobReplicationList.status != "" &&
jobReplicationList.status != "running" &&
jobReplicationList.status != "error" &&
jobReplicationList.status != "pending" &&
jobReplicationList.status != "retrying" &&
jobReplicationList.status != "stopped" &&
jobReplicationList.status != "finished" &&
jobReplicationList.status != "canceled" {
fmt.Println("error: status must be one of [running|error|pending|retrying|stopped|finished|canceled].")
os.Exit(1)
}
targetURL := jobURL + "/replication?policy_id=" + strconv.FormatInt(jobReplicationList.policyID, 10) +
"&page=" + strconv.FormatInt(jobReplicationList.page, 10) +
"&page_size=" + strconv.FormatInt(jobReplicationList.pageSize, 10) +
"&status=" + jobReplicationList.status +
"&start_time=" + strconv.FormatInt(st.Unix(), 10) +
"&end_time=" + strconv.FormatInt(et.Unix(), 10) +
"&repository=" + jobReplicationList.repository +
"&num=" + strconv.FormatInt(jobReplicationList.num, 10)
utils.Get(targetURL)
}
// jobReplicationUpdateCmd represents the update command
var jobReplicationUpdateCmd = &cobra.Command{
Use: "update",
Short: "Update status of jobs. Only 'stop' is supported for now.",
Long: `The endpoint is used to stop the replication jobs of a policy.`,
Run: func(cmd *cobra.Command, args []string) {
updateJobStatus()
},
}
var jobStatusUpdate struct {
PolicyID int64 `json:"policy_id"`
Status string `json:"status"`
}
func initJobStatusUpdate() {
replicationCmd.AddCommand(jobReplicationUpdateCmd)
jobReplicationUpdateCmd.Flags().Int64VarP(&jobStatusUpdate.PolicyID,
"policy_id",
"i", 0,
"(REQUIRED) The ID of the policy that triggered this job.")
jobReplicationUpdateCmd.MarkFlagRequired("policy_id")
jobReplicationUpdateCmd.Flags().StringVarP(&jobStatusUpdate.Status,
"status",
"s", "stop",
"The status of jobs. NOTE: The only valid value is 'stop' for now.")
jobReplicationUpdateCmd.MarkFlagRequired("status")
}
func updateJobStatus() {
targetURL := jobURL + "/replication"
p, err := json.Marshal(&jobStatusUpdate)
if err != nil {
fmt.Println("error:", err)
return
}
utils.Put(targetURL, string(p))
}
// jobReplicationDeleteCmd represents the delete command
var jobReplicationDeleteCmd = &cobra.Command{
Use: "delete",
Short: "Delete a replication job by ID.",
Long: `This endpoint is aimed to remove a specific job from jobservice.`,
Run: func(cmd *cobra.Command, args []string) {
deleteReplicationJob()
},
}
var jobReplicationDelete struct {
ID int64
}
func initJobReplicationDelete() {
replicationCmd.AddCommand(jobReplicationDeleteCmd)
jobReplicationDeleteCmd.Flags().Int64VarP(&jobReplicationDelete.ID,
"id",
"i", 0,
"(REQUIRED) The ID of the job to delete.")
jobReplicationDeleteCmd.MarkFlagRequired("id")
}
func deleteReplicationJob() {
targetURL := jobURL + "/replication/" + strconv.FormatInt(jobReplicationDelete.ID, 10)
utils.Delete(targetURL)
}
|
package moxings
type Yinpinmimajius struct {
Id int
Xuliehao string `gorm:"not null;DEFAULT:0"`
Yinpinmima string `gorm:"not null;DEFAULT:0"`
}
func (Yinpinmimajius) TableName() string {
return "Yinpinmimajius"
}
|
//
// cloud@txthinking.com
package main
import (
"strconv"
"net"
"fmt"
"bufio"
"io"
"strings"
"regexp"
"crypto/tls"
)
func main(){
var userName string = ""
var password string = ""
var i int = 0;
var t string = "TX"
var tag string
var nc net.Conn //interface type
var err error
var in string
var out string
var ok bool
/*nc, err = net.Dial("tcp", "imap.163.com:143")*/
nc, err = tls.Dial("tcp", "imap.163.com:993", nil)
if err != nil {
fmt.Printf("E")
return
}else{
defer nc.Close()
}
i++
tag = t + strconv.Itoa(i)
in = tag + " CAPABILITY\r\n"
if _, ok = getCode(nc, in, tag); !ok{
return
}
i++
tag = t + strconv.Itoa(i)
in = tag + " LOGIN " + userName + " " +password+"\r\n"
if _, ok = getCode(nc, in, tag); !ok {
return
}
i++
tag = t + strconv.Itoa(i)
in = tag + " STATUS inbox (UNSEEN)\r\n"
if out, ok = getCode(nc, in, tag); !ok {
return
}
i++
tag = t + strconv.Itoa(i)
in = tag + " LOGOUT\r\n"
if _, ok = getCode(nc, in, tag); !ok {
return
}
var r string = `\(UNSEEN (\d+)\)`
rr, _ := regexp.Compile(r)
var ss [][]string = rr.FindAllStringSubmatch(out, 1)
var unseen string = ss[0][1]
unseenNumber, _ := strconv.Atoi(unseen)
if unseenNumber > 0{
//getmail
/*cmd := exec.Command("getmail")*/
/*cmd.Run()*/
//sound
cmd := exec.Command("aplay", "/home/tx/Music/gmail-sound.wav", "2>/dev/null")
cmd.Run()
}
}
func getCode(nc net.Conn, in string, tag string) (string, bool){
var out string = ""
yn := false
fmt.Fprintf(nc, in)
br := bufio.NewReader(nc)
for {
line, err := br.ReadString('\n')
if err==io.EOF {
break
}
out += line
if strings.Contains(line, tag + " OK"){
yn = true
break
}
if strings.Contains(line, tag + " NO"){
yn = false
break
}
if strings.Contains(line, tag + " BAD"){
yn = false
break
}
}
return out, yn
}
|
package bindings
import (
"bytes"
"path/filepath"
"reflect"
"os"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
)
func (d *DockerClient) PullImage(nameOrID string) (*PullImageReport, error) {
reader, err := d.Client.ImagePull(d.Context, nameOrID, types.ImagePullOptions{})
if err != nil {
return nil, err
}
defer reader.Close()
buffer := new(bytes.Buffer)
buffer.ReadFrom(reader)
return &PullImageReport{
Output: buffer.String(),
Tool: reflect.TypeOf(d).String(),
}, nil
}
// TODO: allow user to change directory to store archives via env variable.
// Also, add "/tmp" as default image path
func (d *DockerClient) SaveImage(nameOrID string) (*SaveImageReport, error) {
response := &SaveImageReport{
Filename: getTarballName(nameOrID),
Directory: imagePullPath(),
}
response.AbsPath = filepath.Join(response.Directory, response.Filename)
outfile, err := os.Create(response.AbsPath)
if err != nil {
return nil, err
}
defer outfile.Close()
reader, err := d.Client.ImageSave(d.Context, []string{nameOrID})
if err != nil {
return nil, err
}
defer reader.Close()
outfile.ReadFrom(reader)
return response, nil
}
func (d *DockerClient) InspectImage(nameOrID string) (*InspectImageReport, error) {
if _, err := d.PullImage(nameOrID); err != nil {
return nil, err
}
report, _, err := d.Client.ImageInspectWithRaw(d.Context, nameOrID)
if err != nil {
return nil, err
}
return dockerInspectImageReport(report), nil
}
func (d *DockerClient) ListImages() (*ListImageReport, error) {
list, err := d.Client.ImageList(d.Context, types.ImageListOptions{})
if err != nil {
return nil, err
}
images := dockerListImageReport(list)
return images, nil
}
func (d *DockerClient) RemoveImage(nameOrID string) (*RemoveImageReport, error) {
responses, err := d.Client.ImageRemove(d.Context, nameOrID, types.ImageRemoveOptions{})
if err != nil {
return nil, err
}
var deleted []string
for _, response := range responses {
deleted = append(deleted, response.Deleted)
}
return &RemoveImageReport{
IDs: deleted,
}, nil
}
func (d *DockerClient) RunContainer(nameOrID string, options RunOptions) (*RunContainerReport, error) {
if _, err := d.PullImage(nameOrID); err != nil {
return nil, err
}
spec := &container.Config{
Image: nameOrID,
}
if !reflect.DeepEqual(options, RunOptions{}) {
spec.Tty = options.Tty
if len(options.Entrypoint) > 0 {
spec.Entrypoint = options.Entrypoint
}
if len(options.Cmd) > 0 {
spec.Cmd = options.Cmd
}
}
containerCreated, err := d.Client.ContainerCreate(d.Context, spec, nil, nil, nil, "")
if err != nil {
return nil, err
}
if err := d.Client.ContainerStart(d.Context, containerCreated.ID, types.ContainerStartOptions{}); err != nil {
return nil, err
}
return &RunContainerReport{
ID: containerCreated.ID,
}, nil
}
func (d *DockerClient) InspectContainer(nameOrID string) (*InspectContainerReport, error) {
response, err := d.Client.ContainerInspect(d.Context, nameOrID)
if err != nil {
return nil, err
}
return dockerInspectContainerReport(response), nil
}
func (d *DockerClient) ListContainers() (*ListContainerReport, error) {
list, err := d.Client.ContainerList(d.Context, types.ContainerListOptions{})
if err != nil {
return nil, err
}
containerListReport := dockerListContainerReport(list)
return containerListReport, nil
}
func (d *DockerClient) RemoveContainer(nameOrID string) error {
return d.Client.ContainerRemove(d.Context, nameOrID, types.ContainerRemoveOptions{
Force: true,
})
}
|
// Copyright 2014 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package batcheval
import (
"context"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval/result"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/spanset"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
)
func init() {
RegisterReadWriteCommand(roachpb.ResolveIntent, declareKeysResolveIntent, ResolveIntent)
}
func declareKeysResolveIntentCombined(
rs ImmutableRangeState, req roachpb.Request, latchSpans *spanset.SpanSet,
) {
var status roachpb.TransactionStatus
var txnID uuid.UUID
var minTxnTS hlc.Timestamp
switch t := req.(type) {
case *roachpb.ResolveIntentRequest:
status = t.Status
txnID = t.IntentTxn.ID
minTxnTS = t.IntentTxn.MinTimestamp
case *roachpb.ResolveIntentRangeRequest:
status = t.Status
txnID = t.IntentTxn.ID
minTxnTS = t.IntentTxn.MinTimestamp
}
latchSpans.AddMVCC(spanset.SpanReadWrite, req.Header().Span(), minTxnTS)
if status == roachpb.ABORTED {
// We don't always write to the abort span when resolving an ABORTED
// intent, but we can't tell whether we will or not ahead of time.
latchSpans.AddNonMVCC(spanset.SpanReadWrite, roachpb.Span{Key: keys.AbortSpanKey(rs.GetRangeID(), txnID)})
}
}
func declareKeysResolveIntent(
rs ImmutableRangeState, _ roachpb.Header, req roachpb.Request, latchSpans, _ *spanset.SpanSet,
) {
declareKeysResolveIntentCombined(rs, req, latchSpans)
}
func resolveToMetricType(status roachpb.TransactionStatus, poison bool) *result.Metrics {
var typ result.Metrics
if status == roachpb.ABORTED {
if poison {
typ.ResolvePoison = 1
} else {
typ.ResolveAbort = 1
}
} else {
typ.ResolveCommit = 1
}
return &typ
}
// ResolveIntent resolves a write intent from the specified key
// according to the status of the transaction which created it.
func ResolveIntent(
ctx context.Context, readWriter storage.ReadWriter, cArgs CommandArgs, resp roachpb.Response,
) (result.Result, error) {
args := cArgs.Args.(*roachpb.ResolveIntentRequest)
h := cArgs.Header
ms := cArgs.Stats
if h.Txn != nil {
return result.Result{}, ErrTransactionUnsupported
}
update := args.AsLockUpdate()
ok, err := storage.MVCCResolveWriteIntent(ctx, readWriter, ms, update)
if err != nil {
return result.Result{}, err
}
var res result.Result
res.Local.ResolvedLocks = []roachpb.LockUpdate{update}
res.Local.Metrics = resolveToMetricType(args.Status, args.Poison)
if WriteAbortSpanOnResolve(args.Status, args.Poison, ok) {
if err := UpdateAbortSpan(ctx, cArgs.EvalCtx, readWriter, ms, args.IntentTxn, args.Poison); err != nil {
return result.Result{}, err
}
}
return res, nil
}
|
package http
import (
"bytes"
"encoding/json"
"io"
"log"
"net/http"
"net/url"
"github.com/BestPrice/backend/bp"
"github.com/gorilla/mux"
)
type handlerFunc func(rw http.ResponseWriter, req *http.Request) error
type statusError struct {
error
status int
}
type errorHandler handlerFunc
func (h errorHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if err := h(rw, req); err != nil {
log.Println(err)
switch err.(type) {
case statusError:
default:
code := http.StatusInternalServerError
http.Error(rw, http.StatusText(code), code)
}
}
}
type accessControlHandler struct {
http.Handler
}
func (h accessControlHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if origin := req.Header.Get("Origin"); origin != "" {
rw.Header().Set("Access-Control-Allow-Origin", origin)
rw.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
rw.Header().Set("Access-Control-Allow-Headers",
"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
}
// Stop here if its Preflighted OPTIONS request
if req.Method == "OPTIONS" {
return
}
h.Handler.ServeHTTP(rw, req)
}
type Handler struct {
*mux.Router
Service bp.Service
}
func NewHandler(service bp.Service) http.Handler {
h := &Handler{
Router: mux.NewRouter(),
Service: service,
}
h.Handle("/categories", errorHandler(h.categories)).Methods(http.MethodGet)
h.Handle("/chainstores", errorHandler(h.chainstores)).Methods(http.MethodGet)
h.Handle("/products", errorHandler(h.products)).Methods(http.MethodGet)
h.Handle("/stores", errorHandler(h.stores)).Methods(http.MethodGet)
h.Handle("/shop", errorHandler(h.shop)).Methods(http.MethodPost)
h.Handle("/api", errorHandler(h.api)).Methods(http.MethodGet)
return &accessControlHandler{h}
}
func encodeJSON(w io.Writer, v interface{}) error {
e := json.NewEncoder(w)
e.SetIndent("", "\t")
return e.Encode(v)
}
func (h Handler) categories(w http.ResponseWriter, r *http.Request) error {
v, err := h.Service.Categories()
if err != nil {
return err
}
return encodeJSON(w, v)
}
func (h Handler) chainstores(w http.ResponseWriter, r *http.Request) error {
v, err := h.Service.Chainstores()
if err != nil {
log.Println(err)
}
return encodeJSON(w, v)
}
func (h Handler) products(w http.ResponseWriter, r *http.Request) error {
phrase, err := url.QueryUnescape(r.URL.Query().Get("search"))
if err != nil {
return err
}
category, _ := bp.NewID(r.URL.Query().Get("category"))
v, err := h.Service.Products(category, phrase)
if err != nil {
return err
}
return encodeJSON(w, v)
}
func (h Handler) stores(w http.ResponseWriter, r *http.Request) error {
v, err := h.Service.Stores()
if err != nil {
return err
}
return encodeJSON(w, v)
}
func (h Handler) shop(w http.ResponseWriter, r *http.Request) error {
var req bp.ShopRequest
defer r.Body.Close()
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
return err
}
if err := req.Valid(); err != nil {
return encodeJSON(w, bp.Shop{Error: err.Error()})
}
shop, err := h.Service.Shop(&req)
if err != nil {
return encodeJSON(w, bp.Shop{Error: err.Error()})
}
return encodeJSON(w, shop)
}
func (h Handler) api(w http.ResponseWriter, r *http.Request) error {
var (
buf bytes.Buffer
enc = json.NewEncoder(&buf)
)
enc.SetIndent("", "\t")
buf.WriteString("GET /categories\n")
enc.Encode([]bp.Category{
bp.Category{
Subcategories: []bp.Category{bp.Category{}},
},
bp.Category{},
})
buf.WriteString("\n\nGET /chainstores\n")
enc.Encode([]bp.Chainstore{bp.Chainstore{}, bp.Chainstore{}})
buf.WriteString("\n\nGET /products?category=uuid;search=string\n")
enc.Encode([]bp.Product{bp.Product{}, bp.Product{}})
buf.WriteString("\n\nGET /stores\n")
enc.Encode([]bp.Store{bp.Store{}, bp.Store{}})
buf.WriteString("\n\nPOST /shop\n")
enc.Encode(bp.ShopRequest{
Products: []bp.ShopRequestProduct{
{ID: bp.RandID(), Count: 1},
{ID: bp.RandID(), Count: 1},
},
UserPreference: bp.UserPreference{
IDs: []bp.ID{bp.RandID(), bp.RandID()},
MaxStores: 3,
},
})
_, err := buf.WriteTo(w)
return err
}
|
package main
import (
"errors"
"fmt"
"os"
"path/filepath"
"github.com/768bit/promethium/lib/config"
"github.com/768bit/vutils"
"github.com/urfave/cli/v2"
)
var InstallCommand = cli.Command{
Name: "install",
Aliases: []string{"ls"},
Usage: "Install promethium to your system",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "binary-only",
},
&cli.StringFlag{
Name: "install-directory",
Aliases: []string{"d"},
Value: "/opt/promethium",
},
&cli.StringFlag{
Name: "config-directory",
Aliases: []string{"c"},
Value: config.PROMETHIUM_CONFIG_DIR,
},
&cli.StringFlag{
Name: "socket-path",
Aliases: []string{"s"},
Value: config.PROMETHIUM_SOCKET_PATH,
},
&cli.StringFlag{
Name: "daemon-user",
Aliases: []string{"u"},
Value: "promethium",
},
&cli.StringFlag{
Name: "daemon-group",
Aliases: []string{"g"},
Value: "promethium",
},
&cli.StringFlag{
Name: "jail-user",
Aliases: []string{"j"},
Value: "promethium_jail",
},
&cli.StringFlag{
Name: "jail-group",
Aliases: []string{"r"},
Value: "promethium_jail",
},
&cli.BoolFlag{
Name: "enable-http",
Value: false,
Usage: "Set this flag to enable setting --http-bind-address, -b and --http-bind-port, -p",
},
&cli.StringFlag{
Name: "http-bind-address, b",
Aliases: []string{"b"},
Value: "0.0.0.0",
Usage: "Can only be set when --enable-http flag is set",
},
&cli.IntFlag{
Name: "http-bind-port, p",
Aliases: []string{"p"},
Value: 8921,
Usage: "Can only be set when --enable-http flag is set",
},
},
Action: func(c *cli.Context) error {
if os.Getuid() != 0 {
return errors.New("Unable to run installation - this needs to be run as root using sudo or from a root shell.")
}
configPath := c.String("config-directory")
installPath := c.String("install-directory")
socketPath := c.String("socket-path")
daemonUser := c.String("daemon-user")
daemonGroup := c.String("daemon-user")
jailUser := c.String("jail-group")
jailGroup := c.String("jail-group")
enableHttp := c.Bool("enable-http")
httpAddr := c.String("http-bind-address")
httpPort := c.Int("http-bind-port")
fullDaemonConfigPath := filepath.Join(configPath, "daemon.json")
if !c.Bool("binary-only") {
//install everything needed for the service to run...
//this will need to be run with appropriate privs and will install the service, the config etc...
//will also check group membership for certain elements...
//first of all gen the config...
if vutils.Files.CheckPathExists(fullDaemonConfigPath) {
fmt.Println("Unable to generate Promethium daemon config as it already exists")
} else {
//gen and save config...
err := config.NewPromethiumDaemonConfig(configPath, installPath, socketPath, daemonUser, daemonGroup, jailUser, jailGroup, enableHttp, uint(httpPort), httpAddr, false, 0, "", "", "", "")
if err != nil {
return err
}
}
}
//now load the config...
conf, err := config.LoadPromethiumDaemonConfigAtPath(configPath)
if err != nil {
return err
}
DEST_EXEC_PATH := "/usr/bin/promethium"
//install *this* binary on the path /usr/bin/promethium
ex, err := os.Executable()
if err != nil {
return err
}
vutils.Files.Copy(ex, DEST_EXEC_PATH)
os.Chmod(DEST_EXEC_PATH, 0755)
//now we have valid config and binary in path install service...
return config.InstallServiceUnit("/etc/systemd/system", "promethium.service", DEST_EXEC_PATH, configPath, conf.User, conf.Group, false, false)
//return nil
},
}
|
package solutions
import (
"strings"
)
func wordPattern(pattern string, str string) bool {
list := strings.Split(str, " ")
firstDict, secondDict := make(map[byte]string), make(map[string]byte)
if len(pattern) != len(list) {
return false
}
for i := 0; i < len(pattern); i++ {
if value, ok := firstDict[pattern[i]]; !ok {
firstDict[pattern[i]] = list[i]
} else {
if value != list[i] {
return false
}
}
if word, ok := secondDict[list[i]]; !ok {
secondDict[list[i]] = pattern[i]
} else {
if word != pattern[i] {
return false
}
}
}
return true
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package feedback
import (
"context"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/feedbackapp"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/testing"
)
const defaultEmailName = "user email"
func init() {
testing.AddTest(&testing.Test{
Func: VerifyUserEmailIsDisplayedAndSelectedByDefault,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Verify user email is displayed and selected by default",
Contacts: []string{
"wangdanny@google.com",
"zhangwenyu@google.com",
"xiangdongkong@google.com",
"cros-feedback-app@google.com",
},
Fixture: "chromeLoggedInWithOsFeedback",
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
Timeout: 3 * time.Minute,
})
}
// VerifyUserEmailIsDisplayedAndSelectedByDefault verifies user email is
// displayed and selected by default.
func VerifyUserEmailIsDisplayedAndSelectedByDefault(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(*chrome.Chrome)
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to connect to Test API: ", err)
}
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr,
"ui_dump")
ui := uiauto.New(tconn).WithTimeout(20 * time.Second)
// Launch feedback app and go to share data page.
feedbackRootNode, err := feedbackapp.LaunchAndGoToShareDataPage(ctx, tconn)
if err != nil {
s.Fatal("Failed to launch feedback app and go to share data page: ", err)
}
// Verify user email is displayed by default.
emailDropdown := nodewith.Name("Select email").Role(
role.ListBox).Ancestor(feedbackRootNode)
emailDropdownInfo, err := ui.Info(ctx, emailDropdown)
if err != nil {
s.Fatal("Failed to get email dropdown info: ", err)
}
if emailDropdownInfo.Value != defaultEmailName {
s.Fatal("Failed to verify user email is displayed by default")
}
// Verify user email is selected by default.
userEmail := nodewith.Name(defaultEmailName).Role(role.ListBoxOption)
if err := ui.LeftClickUntil(emailDropdown, ui.WithTimeout(
2*time.Second).WaitUntilExists(userEmail))(ctx); err != nil {
s.Fatal("Failed to get user email: ", err)
}
userEmailInfo, err := ui.Info(ctx, userEmail)
if err != nil {
s.Fatal("Failed to get user email info: ", err)
}
if !userEmailInfo.Selected {
s.Fatal("Failed to verify user email is selected by default")
}
}
|
package leetcode
func findClosest(words []string, word1, word2 string) int {
const max = 666666
ans, idx1, idx2 := max, max, -max
for idx, word := range words {
if word == word1 {
idx1 = idx
} else if word == word2 {
idx2 = idx
}
ans = min(ans, abs(idx1-idx2))
}
return ans
}
func abs(a int) int {
b := a >> 63
return (a ^ b) - b
}
func min(a, b int) int {
return a + (b-a)>>31&(b-a)
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package geomfn
import (
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/geo"
"github.com/cockroachdb/cockroach/pkg/geo/geopb"
"github.com/stretchr/testify/require"
)
func TestCollect(t *testing.T) {
testCases := []struct {
wkt1 string
wkt2 string
expected string
}{
{"POINT EMPTY", "POINT EMPTY", "MULTIPOINT (EMPTY, EMPTY)"},
{"POINT (1 1)", "POINT EMPTY", "MULTIPOINT (1 1, EMPTY)"},
{"POINT EMPTY", "POINT (1 1)", "MULTIPOINT (EMPTY, 1 1)"},
{"POINT (1 1)", "POINT (1 1)", "MULTIPOINT (1 1, 1 1)"},
{"POINT (1 1)", "POINT (2 2)", "MULTIPOINT (1 1, 2 2)"},
{"MULTIPOINT EMPTY", "MULTIPOINT EMPTY", "GEOMETRYCOLLECTION (MULTIPOINT EMPTY, MULTIPOINT EMPTY)"},
{
"MULTIPOINT (1 1, 2 2)", "MULTIPOINT (3 3, 4 4)",
"GEOMETRYCOLLECTION (MULTIPOINT (1 1, 2 2), MULTIPOINT (3 3, 4 4))",
},
{"LINESTRING EMPTY", "LINESTRING EMPTY", "MULTILINESTRING (EMPTY, EMPTY)"},
{"LINESTRING (1 1, 2 2)", "LINESTRING (3 3, 4 4)", "MULTILINESTRING ((1 1, 2 2), (3 3, 4 4))"},
{
"MULTILINESTRING EMPTY", "MULTILINESTRING EMPTY",
"GEOMETRYCOLLECTION (MULTILINESTRING EMPTY, MULTILINESTRING EMPTY)",
},
{
"MULTILINESTRING ((1 1, 2 2), (3 3, 4 4))", "MULTILINESTRING ((5 5, 6 6), (7 7, 8 8))",
"GEOMETRYCOLLECTION (MULTILINESTRING ((1 1, 2 2), (3 3, 4 4)), MULTILINESTRING ((5 5, 6 6), (7 7, 8 8)))",
},
{"POLYGON EMPTY", "POLYGON EMPTY", "MULTIPOLYGON (EMPTY, EMPTY)"},
{
"POLYGON ((1 2, 2 3, 3 4, 1 2))", "POLYGON ((4 5, 5 6, 6 7, 4 5))",
"MULTIPOLYGON (((1 2, 2 3, 3 4, 1 2)), ((4 5, 5 6, 6 7, 4 5)))",
},
{
"MULTIPOLYGON EMPTY", "MULTIPOLYGON EMPTY",
"GEOMETRYCOLLECTION (MULTIPOLYGON EMPTY, MULTIPOLYGON EMPTY)",
},
{
"MULTIPOLYGON (((1 2, 2 3, 3 4, 1 2)), ((2 3, 3 4, 4 5, 2 3)))",
"MULTIPOLYGON (((3 4, 4 5, 5 6, 3 4)), ((4 5, 5 6, 6 7, 4 5)))",
"GEOMETRYCOLLECTION (MULTIPOLYGON (((1 2, 2 3, 3 4, 1 2)), ((2 3, 3 4, 4 5, 2 3))), MULTIPOLYGON (((3 4, 4 5, 5 6, 3 4)), ((4 5, 5 6, 6 7, 4 5))))",
},
{"POINT (1 1)", "LINESTRING (2 2, 3 3)", "GEOMETRYCOLLECTION (POINT (1 1), LINESTRING (2 2, 3 3))"},
{"LINESTRING (1 1, 2 2)", "POLYGON ((1 2, 2 3, 3 4, 1 2))", "GEOMETRYCOLLECTION (LINESTRING (1 1, 2 2), POLYGON ((1 2, 2 3, 3 4, 1 2)))"},
{
"GEOMETRYCOLLECTION EMPTY", "GEOMETRYCOLLECTION EMPTY",
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION EMPTY, GEOMETRYCOLLECTION EMPTY)",
},
{
"GEOMETRYCOLLECTION (POINT (1 1))", "GEOMETRYCOLLECTION (POINT (2 2))",
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (1 1)), GEOMETRYCOLLECTION (POINT (2 2)))",
},
}
for _, tc := range testCases {
t.Run(fmt.Sprintf("%v %v", tc.wkt1, tc.wkt2), func(t *testing.T) {
srid := geopb.SRID(4000)
g1, err := geo.ParseGeometryFromEWKT(geopb.EWKT(tc.wkt1), srid, true)
require.NoError(t, err)
g2, err := geo.ParseGeometryFromEWKT(geopb.EWKT(tc.wkt2), srid, true)
require.NoError(t, err)
result, err := Collect(g1, g2)
require.NoError(t, err)
wkt, err := geo.SpatialObjectToWKT(result.SpatialObject(), 0)
require.NoError(t, err)
require.EqualValues(t, tc.expected, wkt)
require.EqualValues(t, srid, result.SRID())
})
}
}
func TestCollectionExtract(t *testing.T) {
mixedWithDupes := `GEOMETRYCOLLECTION(
POINT (1 1),
MULTIPOINT (2 2, 3 3),
LINESTRING (1 1, 2 2),
MULTILINESTRING ((3 3, 4 4), (5 5, 6 6)),
POLYGON ((1 2, 3 4, 5 6, 1 2)),
GEOMETRYCOLLECTION(
POINT (3 3),
MULTIPOINT (4 4, 5 5),
LINESTRING (3 3, 4 4),
MULTILINESTRING ((7 7, 8 8), (9 9, 0 0)),
MULTIPOLYGON (((1 2, 3 4, 5 6, 1 2)), ((3 4, 4 5, 5 6, 3 4)))
)
)`
testCases := []struct {
wkt string
shapeType geopb.ShapeType
expected string
}{
{"POINT EMPTY", geopb.ShapeType_Point, "POINT EMPTY"},
{"POINT EMPTY", geopb.ShapeType_LineString, "LINESTRING EMPTY"},
{"POINT EMPTY", geopb.ShapeType_Polygon, "POLYGON EMPTY"},
{"POINT (1 2)", geopb.ShapeType_Point, "POINT (1 2)"},
{"POINT (1 2)", geopb.ShapeType_LineString, "LINESTRING EMPTY"},
{"POINT (1 2)", geopb.ShapeType_Polygon, "POLYGON EMPTY"},
{"LINESTRING EMPTY", geopb.ShapeType_Point, "POINT EMPTY"},
{"LINESTRING EMPTY", geopb.ShapeType_LineString, "LINESTRING EMPTY"},
{"LINESTRING EMPTY", geopb.ShapeType_Polygon, "POLYGON EMPTY"},
{"LINESTRING (1 2, 3 4)", geopb.ShapeType_Point, "POINT EMPTY"},
{"LINESTRING (1 2, 3 4)", geopb.ShapeType_LineString, "LINESTRING (1 2, 3 4)"},
{"LINESTRING (1 2, 3 4)", geopb.ShapeType_Polygon, "POLYGON EMPTY"},
{"POLYGON EMPTY", geopb.ShapeType_Point, "POINT EMPTY"},
{"POLYGON EMPTY", geopb.ShapeType_LineString, "LINESTRING EMPTY"},
{"POLYGON EMPTY", geopb.ShapeType_Polygon, "POLYGON EMPTY"},
{"POLYGON ((1 2, 3 4, 5 6, 1 2))", geopb.ShapeType_Point, "POINT EMPTY"},
{"POLYGON ((1 2, 3 4, 5 6, 1 2))", geopb.ShapeType_LineString, "LINESTRING EMPTY"},
{"POLYGON ((1 2, 3 4, 5 6, 1 2))", geopb.ShapeType_Polygon, "POLYGON ((1 2, 3 4, 5 6, 1 2))"},
{"MULTIPOINT EMPTY", geopb.ShapeType_Point, "MULTIPOINT EMPTY"},
{"MULTIPOINT EMPTY", geopb.ShapeType_LineString, "MULTILINESTRING EMPTY"},
{"MULTIPOINT EMPTY", geopb.ShapeType_Polygon, "MULTIPOLYGON EMPTY"},
{"MULTIPOINT (1 2, 3 4)", geopb.ShapeType_Point, "MULTIPOINT (1 2, 3 4)"},
{"MULTIPOINT (1 2, 3 4)", geopb.ShapeType_LineString, "MULTILINESTRING EMPTY"},
{"MULTIPOINT (1 2, 3 4)", geopb.ShapeType_Polygon, "MULTIPOLYGON EMPTY"},
{"MULTILINESTRING EMPTY", geopb.ShapeType_Point, "MULTIPOINT EMPTY"},
{"MULTILINESTRING EMPTY", geopb.ShapeType_LineString, "MULTILINESTRING EMPTY"},
{"MULTILINESTRING EMPTY", geopb.ShapeType_Polygon, "MULTIPOLYGON EMPTY"},
{"MULTILINESTRING ((1 2, 3 4), (5 6, 7 8))", geopb.ShapeType_Point, "MULTIPOINT EMPTY"},
{"MULTILINESTRING ((1 2, 3 4), (5 6, 7 8))", geopb.ShapeType_LineString, "MULTILINESTRING ((1 2, 3 4), (5 6, 7 8))"},
{"MULTILINESTRING ((1 2, 3 4), (5 6, 7 8))", geopb.ShapeType_Polygon, "MULTIPOLYGON EMPTY"},
{"MULTIPOLYGON EMPTY", geopb.ShapeType_Point, "MULTIPOINT EMPTY"},
{"MULTIPOLYGON EMPTY", geopb.ShapeType_LineString, "MULTILINESTRING EMPTY"},
{"MULTIPOLYGON EMPTY", geopb.ShapeType_Polygon, "MULTIPOLYGON EMPTY"},
{"MULTIPOLYGON (((0 1, 2 3, 4 5, 0 1)), ((5 6, 7 8, 9 0, 5 6)))", geopb.ShapeType_Point, "MULTIPOINT EMPTY"},
{"MULTIPOLYGON (((0 1, 2 3, 4 5, 0 1)), ((5 6, 7 8, 9 0, 5 6)))", geopb.ShapeType_LineString, "MULTILINESTRING EMPTY"},
{"MULTIPOLYGON (((0 1, 2 3, 4 5, 0 1)), ((5 6, 7 8, 9 0, 5 6)))", geopb.ShapeType_Polygon, "MULTIPOLYGON (((0 1, 2 3, 4 5, 0 1)), ((5 6, 7 8, 9 0, 5 6)))"},
{"GEOMETRYCOLLECTION EMPTY", geopb.ShapeType_Point, "MULTIPOINT EMPTY"},
{"GEOMETRYCOLLECTION EMPTY", geopb.ShapeType_LineString, "MULTILINESTRING EMPTY"},
{"GEOMETRYCOLLECTION EMPTY", geopb.ShapeType_Polygon, "MULTIPOLYGON EMPTY"},
{"GEOMETRYCOLLECTION(GEOMETRYCOLLECTION EMPTY)", geopb.ShapeType_Point, "MULTIPOINT EMPTY"},
{"GEOMETRYCOLLECTION(GEOMETRYCOLLECTION EMPTY)", geopb.ShapeType_LineString, "MULTILINESTRING EMPTY"},
{"GEOMETRYCOLLECTION(GEOMETRYCOLLECTION EMPTY)", geopb.ShapeType_Polygon, "MULTIPOLYGON EMPTY"},
{mixedWithDupes, geopb.ShapeType_Point, "MULTIPOINT (1 1, 2 2, 3 3, 3 3, 4 4, 5 5)"},
{mixedWithDupes, geopb.ShapeType_LineString, "MULTILINESTRING ((1 1, 2 2), (3 3, 4 4), (5 5, 6 6), (3 3, 4 4), (7 7, 8 8), (9 9, 0 0))"},
{mixedWithDupes, geopb.ShapeType_Polygon, "MULTIPOLYGON (((1 2, 3 4, 5 6, 1 2)), ((1 2, 3 4, 5 6, 1 2)), ((3 4, 4 5, 5 6, 3 4)))"},
}
errorTestCases := []struct {
shapeType geopb.ShapeType
}{
{geopb.ShapeType_Unset},
{geopb.ShapeType_MultiPoint},
{geopb.ShapeType_MultiLineString},
{geopb.ShapeType_MultiPolygon},
{geopb.ShapeType_Geometry},
{geopb.ShapeType_GeometryCollection},
}
for _, tc := range testCases {
t.Run(tc.wkt, func(t *testing.T) {
srid := geopb.SRID(4000)
g, err := geo.ParseGeometryFromEWKT(geopb.EWKT(tc.wkt), srid, true)
require.NoError(t, err)
multi, err := CollectionExtract(g, tc.shapeType)
require.NoError(t, err)
wkt, err := geo.SpatialObjectToWKT(multi.SpatialObject(), 0)
require.NoError(t, err)
require.EqualValues(t, tc.expected, wkt)
require.EqualValues(t, srid, multi.SRID())
})
}
t.Run("errors on wrong shape type", func(t *testing.T) {
for _, tc := range errorTestCases {
t.Run(tc.shapeType.String(), func(t *testing.T) {
g, err := geo.ParseGeometry("POINT EMPTY")
require.NoError(t, err)
_, err = CollectionExtract(g, tc.shapeType)
require.Error(t, err)
})
}
})
}
func TestCollectionHomogenize(t *testing.T) {
testCases := []struct {
wkt string
expected string
}{
{"POINT EMPTY", "POINT EMPTY"},
{"POINT (1 2)", "POINT (1 2)"},
{"MULTIPOINT EMPTY", "MULTIPOINT EMPTY"},
{"MULTIPOINT (1 2)", "POINT (1 2)"},
{"MULTIPOINT (1 2, 3 4)", "MULTIPOINT (1 2, 3 4)"},
{"LINESTRING EMPTY", "LINESTRING EMPTY"},
{"LINESTRING (1 2, 3 4)", "LINESTRING (1 2, 3 4)"},
{"MULTILINESTRING EMPTY", "MULTILINESTRING EMPTY"},
{"MULTILINESTRING ((1 2, 3 4))", "LINESTRING (1 2, 3 4)"},
{"MULTILINESTRING ((1 2, 3 4), (5 6, 7 8))", "MULTILINESTRING ((1 2, 3 4), (5 6, 7 8))"},
{"POLYGON EMPTY", "POLYGON EMPTY"},
{"POLYGON ((1 2, 3 4, 5 6, 1 2))", "POLYGON ((1 2, 3 4, 5 6, 1 2))"},
{"MULTIPOLYGON EMPTY", "MULTIPOLYGON EMPTY"},
{"MULTIPOLYGON (((1 2, 3 4, 5 6, 1 2)))", "POLYGON ((1 2, 3 4, 5 6, 1 2))"},
{"MULTIPOLYGON (((1 2, 3 4, 5 6, 1 2)), ((7 8, 9 0, 1 2, 7 8)))", "MULTIPOLYGON (((1 2, 3 4, 5 6, 1 2)), ((7 8, 9 0, 1 2, 7 8)))"},
{"GEOMETRYCOLLECTION EMPTY", "GEOMETRYCOLLECTION EMPTY"},
{"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION EMPTY)", "GEOMETRYCOLLECTION EMPTY"},
{"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION(MULTIPOINT (1 1)))", "POINT (1 1)"},
{
`GEOMETRYCOLLECTION (
LINESTRING(1 1, 2 2),
POINT(1 1),
GEOMETRYCOLLECTION(
MULTIPOINT(2 2, 3 3),
GEOMETRYCOLLECTION EMPTY,
POINT(1 1),
MULTIPOLYGON(((1 2, 2 3, 3 4, 1 2))),
LINESTRING(3 3, 4 4),
GEOMETRYCOLLECTION(
POINT(4 4),
POINT(5 5)
)
),
MULTIPOINT EMPTY
)`,
"GEOMETRYCOLLECTION (MULTIPOINT (1 1, 2 2, 3 3, 1 1, 4 4, 5 5), MULTILINESTRING ((1 1, 2 2), (3 3, 4 4)), POLYGON ((1 2, 2 3, 3 4, 1 2)))",
},
}
for _, tc := range testCases {
t.Run(tc.wkt, func(t *testing.T) {
srid := geopb.SRID(4000)
g, err := geo.ParseGeometryFromEWKT(geopb.EWKT(tc.wkt), srid, true)
require.NoError(t, err)
result, err := CollectionHomogenize(g)
require.NoError(t, err)
wkt, err := geo.SpatialObjectToWKT(result.SpatialObject(), 0)
require.NoError(t, err)
require.EqualValues(t, tc.expected, wkt)
require.EqualValues(t, srid, result.SRID())
})
}
}
func TestForceCollection(t *testing.T) {
testCases := []struct {
wkt string
expected string
}{
{"POINT EMPTY", "GEOMETRYCOLLECTION (POINT EMPTY)"},
{"POINT (1 2)", "GEOMETRYCOLLECTION (POINT (1 2))"},
{"MULTIPOINT EMPTY", "GEOMETRYCOLLECTION EMPTY"},
{"MULTIPOINT (1 2)", "GEOMETRYCOLLECTION (POINT (1 2))"},
{"MULTIPOINT (1 2, 3 4)", "GEOMETRYCOLLECTION (POINT (1 2), POINT (3 4))"},
{"LINESTRING EMPTY", "GEOMETRYCOLLECTION (LINESTRING EMPTY)"},
{"LINESTRING (1 2, 3 4)", "GEOMETRYCOLLECTION (LINESTRING (1 2, 3 4))"},
{"MULTILINESTRING EMPTY", "GEOMETRYCOLLECTION EMPTY"},
{"MULTILINESTRING ((1 2, 3 4))", "GEOMETRYCOLLECTION (LINESTRING (1 2, 3 4))"},
{
"MULTILINESTRING ((1 2, 3 4), (5 6, 7 8))",
"GEOMETRYCOLLECTION (LINESTRING (1 2, 3 4), LINESTRING (5 6, 7 8))",
},
{"POLYGON EMPTY", "GEOMETRYCOLLECTION (POLYGON EMPTY)"},
{"POLYGON ((1 2, 3 4, 5 6, 1 2))", "GEOMETRYCOLLECTION (POLYGON ((1 2, 3 4, 5 6, 1 2)))"},
{"MULTIPOLYGON EMPTY", "GEOMETRYCOLLECTION EMPTY"},
{"MULTIPOLYGON (((1 2, 3 4, 5 6, 1 2)))", "GEOMETRYCOLLECTION (POLYGON ((1 2, 3 4, 5 6, 1 2)))"},
{
"MULTIPOLYGON (((1 2, 3 4, 5 6, 1 2)), ((7 8, 9 0, 1 2, 7 8)))",
"GEOMETRYCOLLECTION (POLYGON ((1 2, 3 4, 5 6, 1 2)), POLYGON ((7 8, 9 0, 1 2, 7 8)))",
},
{"GEOMETRYCOLLECTION EMPTY", "GEOMETRYCOLLECTION EMPTY"},
{"GEOMETRYCOLLECTION (MULTIPOINT (1 1, 2 2))", "GEOMETRYCOLLECTION (MULTIPOINT (1 1, 2 2))"},
{"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION EMPTY)", "GEOMETRYCOLLECTION (GEOMETRYCOLLECTION EMPTY)"},
{
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION(MULTIPOINT (1 1)))",
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (MULTIPOINT (1 1)))",
},
}
for _, tc := range testCases {
t.Run(tc.wkt, func(t *testing.T) {
srid := geopb.SRID(4000)
g, err := geo.ParseGeometryFromEWKT(geopb.EWKT(tc.wkt), srid, true)
require.NoError(t, err)
result, err := ForceCollection(g)
require.NoError(t, err)
wkt, err := geo.SpatialObjectToWKT(result.SpatialObject(), 0)
require.NoError(t, err)
require.EqualValues(t, tc.expected, wkt)
require.EqualValues(t, srid, result.SRID())
})
}
}
func TestMulti(t *testing.T) {
testCases := []struct {
wkt string
expected string
}{
{"POINT EMPTY", "MULTIPOINT EMPTY"},
{"POINT (1 2)", "MULTIPOINT (1 2)"},
{"MULTIPOINT EMPTY", "MULTIPOINT EMPTY"},
{"MULTIPOINT (1 2, 3 4)", "MULTIPOINT (1 2, 3 4)"},
{"LINESTRING EMPTY", "MULTILINESTRING EMPTY"},
{"LINESTRING (1 2, 3 4, 5 6)", "MULTILINESTRING ((1 2, 3 4, 5 6))"},
{"MULTILINESTRING EMPTY", "MULTILINESTRING EMPTY"},
{"MULTILINESTRING ((1 2, 3 4), (5 6, 7 8))", "MULTILINESTRING ((1 2, 3 4), (5 6, 7 8))"},
{"POLYGON EMPTY", "MULTIPOLYGON EMPTY"},
{"POLYGON ((1 2, 3 4, 5 6, 1 2))", "MULTIPOLYGON (((1 2, 3 4, 5 6, 1 2)))"},
{"MULTIPOLYGON (((1 2, 3 4, 5 6, 1 2)), ((0 0, 1 1, 2 2, 0 0)))", "MULTIPOLYGON (((1 2, 3 4, 5 6, 1 2)), ((0 0, 1 1, 2 2, 0 0)))"},
{"GEOMETRYCOLLECTION EMPTY", "GEOMETRYCOLLECTION EMPTY"},
{"GEOMETRYCOLLECTION (POINT (1 2), LINESTRING (1 2, 3 4))", "GEOMETRYCOLLECTION (POINT (1 2), LINESTRING (1 2, 3 4))"},
{"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (1 2), LINESTRING (1 2, 3 4)))", "GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (1 2), LINESTRING (1 2, 3 4)))"},
}
for _, tc := range testCases {
t.Run(tc.wkt, func(t *testing.T) {
srid := geopb.SRID(4000)
g, err := geo.ParseGeometryFromEWKT(geopb.EWKT(tc.wkt), srid, true)
require.NoError(t, err)
multi, err := Multi(g)
require.NoError(t, err)
wkt, err := geo.SpatialObjectToWKT(multi.SpatialObject(), 0)
require.NoError(t, err)
require.EqualValues(t, tc.expected, wkt)
require.EqualValues(t, srid, multi.SRID())
})
}
}
|
package main
import (
"fmt"
"strings"
)
type StringHandler interface {
SetNext(h StringHandler)
Process(s string) string
}
type LowerCaseHandler struct {
next StringHandler
}
func (l *LowerCaseHandler) SetNext(h StringHandler) {
l.next = h
}
func (l *LowerCaseHandler) Process(s string) string {
s = strings.ToLower(s)
if l.next != nil {
s = l.next.Process(s)
}
return s
}
type SpaceRemoval struct {
next StringHandler
}
func (s *SpaceRemoval) SetNext(h StringHandler) {
s.next = h
}
func (s *SpaceRemoval) Process(st string) string {
st = strings.Replace(st, " ", "", -1)
if s.next != nil {
st = s.next.Process(st)
}
return st
}
func main() {
sr := SpaceRemoval{}
lc := LowerCaseHandler{}
lc.SetNext(&sr)
st := lc.Process("THE titanic")
fmt.Println(st)
}
|
package utils
//
// crypto.go
// Copyright (C) 2020 light <light@1870499383@qq.com>
//
// Distributed under terms of the MIT license.
//
import (
"bytes"
"crypto"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"errors"
)
// PKCS5Padding 密文填充
func PKCS5Padding(text []byte, blockSize int) []byte {
padding := blockSize - len(text)%blockSize
paddingSuffix := bytes.Repeat([]byte{byte(padding)}, padding)
return append(text, paddingSuffix...)
}
// PKCS5UnPadding 取消填充
func PKCS5UnPadding(origData []byte) ([]byte, bool) {
length := len(origData)
padding := int(origData[length-1])
if padding >= length {
return nil, false
}
return origData[:(length - padding)], true
}
// AesEncrypt aes 加密
// key: 16, 24, or 32 bytes to select
func AesEncrypt(orig string, key []byte) (string, error) {
key = PKCS5Padding(key, 32)[:32]
origData := []byte(orig)
block, err := aes.NewCipher(key)
if err != nil {
return "", err
}
blockSize := block.BlockSize() // 16
origData = PKCS5Padding(origData, blockSize)
blockMode := cipher.NewCBCEncrypter(block, key[:blockSize])
encrypted := make([]byte, len(origData))
blockMode.CryptBlocks(encrypted, origData)
return base64.StdEncoding.EncodeToString(encrypted), nil
}
// AesDecrypt aes解密
// key: 16, 24, or 32 bytes to select
func AesDecrypt(encrypted string, key []byte) (string, error) {
key = PKCS5Padding(key, 32)[:32]
cryptData, err := base64.StdEncoding.DecodeString(encrypted)
if err != nil {
return "", err
}
block, err := aes.NewCipher(key)
if err != nil {
return "", err
}
blockSize := block.BlockSize()
blockMode := cipher.NewCBCDecrypter(block, key[:blockSize])
origData := make([]byte, len(cryptData))
blockMode.CryptBlocks(origData, cryptData)
if origData, isRight := PKCS5UnPadding(origData); isRight {
return string(origData), nil
}
return "", errors.New("invalid key")
}
// rsa
func GetRsaKey(bits int) (public *rsa.PublicKey, private *rsa.PrivateKey, err error) {
private, err = rsa.GenerateKey(rand.Reader, bits)
if err != nil {
return
}
public = &private.PublicKey
return
}
func GetPublicStr(key *rsa.PublicKey) (string, error) {
der, err := x509.MarshalPKIXPublicKey(key)
if err != nil {
return "", err
}
publicBlock := &pem.Block{
Type: "PUBLIC KEY",
Bytes: der,
}
return string(pem.EncodeToMemory(publicBlock)), nil
}
func GetPrivateStr(key *rsa.PrivateKey) (string, error) {
derStream := x509.MarshalPKCS1PrivateKey(key)
priBlock := &pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: derStream,
}
return string(pem.EncodeToMemory(priBlock)), nil
}
func GetPublicFromStr(key string) (*rsa.PublicKey, error) {
//解密pem格式的公钥
block, _ := pem.Decode([]byte(key))
if block == nil {
return nil, errors.New("public key error")
}
// 解析公钥
pubInterface, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
return nil, err
}
// 类型断言
pub := pubInterface.(*rsa.PublicKey)
return pub, nil
}
func GetPrivateFromStr(key string) (*rsa.PrivateKey, error) {
block, _ := pem.Decode([]byte(key))
if block == nil {
return nil, errors.New("private key error")
}
//解析PKCS1格式的私钥
return x509.ParsePKCS1PrivateKey(block.Bytes)
}
func RsaEncode(msg string, key *rsa.PublicKey) (string, error) {
encryptedBytes, err := rsa.EncryptOAEP(
sha256.New(),
rand.Reader,
key,
[]byte(msg),
nil)
if err != nil {
return "", err
}
return ToBase64(encryptedBytes), nil
}
func RsaDecode(msg string, key *rsa.PrivateKey) (string, error) {
raw, err := FromBase64(msg)
if err != nil {
return "", err
}
decryptedBytes, err := key.Decrypt(nil, raw, &rsa.OAEPOptions{Hash: crypto.SHA256})
return string(decryptedBytes), err
}
func RsaSign(msg string, key *rsa.PrivateKey) (string, error) {
signature, err := rsa.SignPSS(rand.Reader, key, crypto.SHA256, HashSha256Byte([]byte(msg)), nil)
if err != nil {
return "", err
}
return ToBase64(signature), nil
}
func RsaCheckSign(msg string, sign string, key *rsa.PublicKey) error {
raw, err := FromBase64(sign)
if err != nil {
return err
}
return rsa.VerifyPSS(key, crypto.SHA256, HashSha256Byte([]byte(msg)), raw, nil)
}
|
package xconnect
import (
"fmt"
"io/ioutil"
"os"
"strings"
"gopkg.in/yaml.v2"
)
const extraPathSeparator = "/"
// XConnect represents the xconnect data section of a YAML document.
// See spec-xconnect.yaml.
type XConnect struct {
Meta MetaProperties `yaml:"meta" json:"meta"`
Listen map[string]ListenEntry `yaml:"listen" json:"listen"`
Connect map[string]ConnectEntry `yaml:"connect" json:"connect"`
ExtraFields map[string]interface{} `yaml:"-,inline"`
}
func (x XConnect) find(keys []string) (interface{}, bool) {
if len(keys) == 0 {
return nil, false
}
switch keys[0] {
case "meta":
return x.Meta.find(keys[1:])
case "listen":
subkeys := keys[1:]
if len(subkeys) == 0 {
return nil, false
}
for k, each := range x.Listen {
if subkeys[0] == k {
if v, ok := each.find(subkeys[1:]); ok {
return v, ok
}
}
}
return nil, false
case "connect":
subkeys := keys[1:]
if len(subkeys) == 0 {
return nil, false
}
for k, each := range x.Connect {
if subkeys[0] == k {
if v, ok := each.find(subkeys[1:]); ok {
return v, ok
}
}
}
return nil, false
default:
return findInMap(keys, x.ExtraFields)
}
}
// MetaProperties represents the meta element in the xconnect data section.
type MetaProperties struct {
Name string `yaml:"name,omitempty" json:"name,omitempty"`
Version string `yaml:"version,omitempty" json:"version,omitempty"`
// Operational expenditure, or owner
Opex string `yaml:"opex,omitempty" json:"opex,omitempty"`
Labels []string `yaml:"tags,omitempty" json:"tags,omitempty"`
ExtraFields map[string]interface{} `yaml:"-,inline"`
Kind string `yaml:"kind,omitempty" json:"kind,omitempty"`
}
func (m MetaProperties) find(keys []string) (interface{}, bool) {
if len(keys) == 0 {
return nil, false
}
switch keys[0] {
case "name":
return m.Name, true
case "version":
return m.Version, true
case "opex":
return m.Opex, true
case "kind":
return m.Kind, true
default:
return findInMap(keys, m.ExtraFields)
}
}
// Document is the root YAML element
type Document struct {
XConnect XConnect `yaml:"xconnect"`
ExtraFields map[string]interface{} `yaml:"-,inline"`
}
// MustString same as FindString but panics if not found. E.g xconnect/connect/db/url .
func (d Document) MustString(path string) string {
if v, err := d.FindString(path); err != nil {
panic(err)
} else {
return v
}
}
// FindString returns a string for a given slash path, e.g xconnect/connect/db/url .
func (d Document) FindString(path string) (string, error) {
return FindString(d, path)
}
// FindString returns a string for a given slash path, e.g xconnect/connect/db/url .
func FindString(f finder, path string) (string, error) {
keys := strings.Split(path, extraPathSeparator)
v, ok := f.find(keys)
if !ok {
return "", fmt.Errorf("unable to find string at [%s]", path)
}
if s, ok := v.(string); !ok {
return "", fmt.Errorf("warn: xconnect, value is not a string, but a %T for path %s\n", v, path)
} else {
return s, nil
}
}
// MustBool same as FindBool but panics if not found. E.g xconnect/listen/api/secure
func (d Document) MustBool(path string) bool {
if v, err := d.FindBool(path); err != nil {
panic(err)
} else {
return v
}
}
// FindBool returns a bool for a given slash path.
func (d Document) FindBool(path string) (bool, error) {
return FindBool(d, path)
}
func FindBool(f finder, path string) (bool, error) {
keys := strings.Split(path, extraPathSeparator)
v, ok := f.find(keys)
if !ok {
return false, fmt.Errorf("unable to find bool at [%s]", path)
}
if s, ok := v.(bool); !ok {
return false, fmt.Errorf("warn: xconnect, value is not a bool, but a %T for path %s\n", v, path)
} else {
return s, nil
}
}
// MustInt same as FindInt but panics if not found. E.g xconnect/listen/api/port
func (d Document) MustInt(path string) int {
if v, err := d.FindInt(path); err != nil {
panic(err)
} else {
return v
}
}
// FindInt returns a integer for a given slash path, e.g xconnect/listen/api/port .
func (d Document) FindInt(path string) (int, error) {
return FindInt(d, path)
}
// FindInt returns a integer for a given slash path.
func FindInt(f finder, path string) (int, error) {
keys := strings.Split(path, extraPathSeparator)
v, ok := f.find(keys)
if !ok {
return 0, fmt.Errorf("unable to find int at [%s]", path)
}
if s, ok := v.(int); !ok {
return 0, fmt.Errorf("warn: xconnect, value is not a int, but a %T for path %s\n", v, path)
} else {
return s, nil
}
}
func (d Document) find(keys []string) (interface{}, bool) {
if len(keys) == 0 {
return nil, false
}
switch keys[0] {
case "xconnect":
return d.XConnect.find(keys[1:])
default:
return findInMap(keys, d.ExtraFields)
}
}
// GetConfig will first check the environment value at {envKey} to find the source of the confguration.
// If the environment value is not available (empty) then try reading the filename to get the configuration.
func GetConfig(envKey string, filename string) (Document, error) {
content := os.Getenv(envKey)
if len(content) == 0 {
return LoadConfig(filename)
}
var doc Document
err := yaml.Unmarshal([]byte(content), &doc)
if err != nil {
return Document{}, fmt.Errorf("unable to unmarshal YAML:%v", err)
}
return doc, nil
}
// LoadConfig returns the document containing the xconnect section.
func LoadConfig(filename string) (Document, error) {
content, err := ioutil.ReadFile(filename)
if err != nil {
return Document{}, fmt.Errorf("unable to read:%v", err)
}
var doc Document
err = yaml.Unmarshal(content, &doc)
if err != nil {
return Document{}, fmt.Errorf("unable to unmarshal YAML:%v", err)
}
return doc, nil
}
|
package migrations
import (
"github.com/jmoiron/sqlx"
)
func CreateAdminTable(tx *sqlx.Tx) error {
tx.MustExec("CREATE TABLE `admin` (`username` varchar(255) NOT NULL,`notes` varchar(255), PRIMARY KEY(`username`))")
return nil
}
|
package main
import (
"context"
"echo-crud/internal/config"
"echo-crud/internal/handler/http"
"echo-crud/internal/repository"
"echo-crud/internal/service"
"fmt"
nethttp "net/http"
"os"
"os/signal"
"syscall"
"time"
"github.com/rs/zerolog/log"
"gorm.io/driver/postgres"
"gorm.io/gorm"
)
func main() {
log.Info().Msg("echo-crud-setting starting")
cfg, err := config.NewConfig(".env")
checkError(err)
// tool.ErrorClient = setupErrorReporting(context.Background(), cfg)
var db *gorm.DB
db = openDatabase(cfg)
defer func() {
if sqlDB, err := db.DB(); err != nil {
log.Fatal().Err(err)
panic(err)
} else {
_ = sqlDB.Close()
}
}()
supplierHandler := buildSupplierHandler(db)
transaksiHandler := buildTransaksiHandler(db)
pelangganHandler := buildPelangganHandler(db)
produkHandler := buildProdukHandler(db)
pembayaranHandler := buildPembayaranHandler(db)
detailTransaksiHandler := buildDetailTransaksiHandler(db)
engine := http.NewGinEngine(supplierHandler, transaksiHandler, pelangganHandler, produkHandler, pembayaranHandler, detailTransaksiHandler, cfg.InternalConfig.Username, cfg.InternalConfig.Password)
server := &nethttp.Server{
Addr: fmt.Sprintf(":%s", cfg.Port),
Handler: engine,
}
// setGinMode(cfg.Env)
runServer(server)
waitForShutdown(server)
}
func runServer(srv *nethttp.Server) {
// Initializing the server in a goroutine so that
// it won't block the graceful shutdown handling below
go func() {
if err := srv.ListenAndServe(); err != nil && err != nethttp.ErrServerClosed {
log.Fatal().Err(err)
}
}()
}
func waitForShutdown(server *nethttp.Server) {
// Wait for interrupt signal to gracefully shutdown the server with
// a timeout of 5 seconds.
quit := make(chan os.Signal)
// kill (no param) default send syscall.SIGTERM
// kill -2 is syscall.SIGINT
// kill -9 is syscall.SIGKILL but can't be catch, so don't need add it
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
<-quit
log.Info().Msg("shutting down echo-crud")
// The context is used to inform the server it has 2 seconds to finish
// the request it is currently handling
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
if err := server.Shutdown(ctx); err != nil {
log.Fatal().Err(err).Msg("codelabs-service forced to shutdown")
}
log.Info().Msg("codelabs-service exiting")
}
func openDatabase(config *config.Config) *gorm.DB {
dsn := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=disable",
config.Database.Host,
config.Database.Port,
config.Database.Username,
config.Database.Password,
config.Database.Name)
db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{})
checkError(err)
return db
}
func checkError(err error) {
if err != nil {
panic(err)
}
}
func buildSupplierHandler(db *gorm.DB) *http.SupplierHandler {
repo := repository.NewSupplierRepository(db)
supplierService := service.NewSupplierService(repo)
return http.NewSupplierHandler(supplierService)
}
func buildTransaksiHandler(db *gorm.DB) *http.TransaksiHandler {
repo := repository.NewTransaksiRepository(db)
transaksiService := service.NewTransaksiService(repo)
return http.NewTransaksiHandler(transaksiService)
}
func buildPelangganHandler(db *gorm.DB) *http.PelangganHandler {
repo := repository.NewPelangganRepository(db)
pelangganService := service.NewPelangganService(repo)
return http.NewPelangganHandler(pelangganService)
}
func buildProdukHandler(db *gorm.DB) *http.ProdukHandler {
repo := repository.NewProdukRepository(db)
produkService := service.NewProdukService(repo)
return http.NewProdukHandler(produkService)
}
func buildPembayaranHandler(db *gorm.DB) *http.PembayaranHandler {
repo := repository.NewPembayaranRepository(db)
pembayaranService := service.NewPembayaranService(repo)
return http.NewPembayaranHandler(pembayaranService)
}
func buildDetailTransaksiHandler(db *gorm.DB) *http.DetailTransaksiHandler {
repo := repository.NewDetailTransaksiRepository(db)
detailTransaksiService := service.NewDetailTransaksiService(repo)
return http.NewDetailTransaksiHandler(detailTransaksiService)
}
|
package main
import (
"bytes"
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"net/textproto"
"os"
"strings"
"github.com/sthulb/mime/multipart"
)
var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
func escapeQuotes(s string) string {
return quoteEscaper.Replace(s)
}
type File struct {
Path string
Type string
Content []byte
}
type Configuration struct {
UserDataFiles [][]string `json:"cloud_init_parts"`
}
func buildUserData(files []File, boundary string) (string, error) {
// Let's build the launch config userdata
w := &bytes.Buffer{}
mimeWriter := multipart.NewWriter(w)
if boundary != "" {
if err := mimeWriter.SetBoundary(boundary); err != nil {
return "", err
}
}
// Craft a header for our mime type
fmt.Fprintf(w, "Content-Type: multipart/mixed; boundary=\"%s\"\r\n\r\n", mimeWriter.Boundary())
for _, file := range files {
fileParts := strings.Split(file.Path, "/")
h := textproto.MIMEHeader{}
h.Set("Content-Type", fmt.Sprintf("%s; charset=\"us-ascii\"", file.Type))
h.Set("MIME-Version", "1.0")
h.Set("Content-Transfer-Encoding", "7bit")
h.Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, escapeQuotes(fileParts[len(fileParts)-1])))
part, err := mimeWriter.CreatePart(h)
if err != nil {
return "", fmt.Errorf("Unable to create mime type: %v", err)
}
r := bytes.NewReader(file.Content)
_, err = io.Copy(part, r)
if err != nil {
return "", fmt.Errorf("Unable to copy the data part: %v", err)
}
}
mimeWriter.Close()
return w.String(), nil
}
func main() {
var (
configFile string
encode bool
fixedBoundary bool
)
flag.StringVar(&configFile, "config", "<file>", "Config file containing paths and type of the userdata")
flag.BoolVar(&encode, "encode", false, "Base64 encode the userdata")
flag.BoolVar(&fixedBoundary, "fixedBoundary", false, "Use the same boundary so that same input yields consistent output")
flag.Parse()
//Do some validation
if configFile == "<file>" {
fmt.Println("No config supplied")
os.Exit(1)
}
// Parse configuration
config, err := ioutil.ReadFile(configFile)
if err != nil {
fmt.Printf("Unable to open config file %s: %v\n", configFile, err)
os.Exit(1)
}
configuration := &Configuration{}
json.Unmarshal(config, &configuration)
files := make([]File, 0)
// Construct the files map
for _, file := range configuration.UserDataFiles {
b, err := ioutil.ReadFile(file[0])
if err != nil {
fmt.Printf("Unable to open user data file: %s\n", file[0])
os.Exit(1)
}
f := File{
Content: b,
Path: file[0],
Type: file[1],
}
files = append(files, f)
}
var boundary string
if fixedBoundary {
boundary = "MIMEBOUNDARY"
}
userdata, err := buildUserData(files, boundary)
if err != nil {
fmt.Printf("Error building userdata: %v", err)
os.Exit(1)
}
if encode {
userdata = base64.StdEncoding.EncodeToString([]byte(userdata))
}
fmt.Println(userdata)
return
}
|
package performance
import (
"context"
logrus "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"io/ioutil"
"os"
"testing"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/performance"
"github.com/vmware/govmomi/simulator"
"github.com/vmware/govmomi/view"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
)
func TestPerfCollector_parseConfigFile(t *testing.T) {
c := PerfCollector{
logger: logrus.New(),
collectionLevel: 2,
metricsAvaliableByID: make(map[int32]string),
metricsAvaliableByName: make(map[string]int32),
}
c.metricsAvaliableByName["cpu.coreUtilization.average"] = 1
c.metricsAvaliableByName["cpu.demand.average"] = 2
c.metricsAvaliableByName["cpu.outoflevel"] = 3
content := []byte(`
host:
level_1:
- cpu.coreUtilization.average
- not.considered
level_2:
- cpu.demand.average
vm:
level_1:
- cpu.demand.average
level_3:
- cpu.outoflevel
`)
tmpfile, err := ioutil.TempFile("", "config")
require.NoError(t, err)
defer func() {
err := os.Remove(tmpfile.Name())
assert.NoError(t, err)
}()
_, err = tmpfile.Write(content)
require.NoError(t, err)
err = c.parseConfigFile(tmpfile.Name())
require.NoError(t, err)
tmpfile.Close()
// - cpu.costop.summation is discarded since is not in c.metricsAvaliableByName
assert.Len(t, c.MetricDefinition.Host, 2)
assert.Len(t, c.MetricDefinition.VM, 1)
}
func TestPerfCollector_NewCollector(t *testing.T) {
content := []byte(`
host:
level_1:
- cpu.coreUtilization.average
- metric.not.available
level_2:
- cpu.demand.average
vm:
level_1:
- metric.not.available1
- cpu.demand.average
- metric.not.available2
#- commented
level_3:
- cpu.outoflevel
`)
tmpfile, err := ioutil.TempFile("", "config")
require.NoError(t, err)
defer func() {
err := os.Remove(tmpfile.Name())
assert.NoError(t, err)
}()
_, err = tmpfile.Write(content)
require.NoError(t, err)
_, err, c := startVcSim(t)
assert.NoError(t, err)
pc, err := NewCollector(c, logrus.New(), tmpfile.Name(), false, 2, "100", "50")
assert.NoError(t, err)
tmpfile.Close()
assert.Len(t, pc.MetricDefinition.Host, 2)
assert.Len(t, pc.MetricDefinition.VM, 1)
assert.Equal(t, 50, pc.batchSizePerfMetrics)
assert.Equal(t, 100, pc.batchSizePerfEntities)
ref := types.ManagedObjectReference{Type: "VirtualMachine", Value: "vm-87"}
metrics := pc.Collect([]types.ManagedObjectReference{ref}, pc.MetricDefinition.VM, RealTimeInterval)
assert.Equal(t, 1, len(metrics), "we fetched events for 1 vm only")
assert.Equal(t, 1, len(metrics[ref]), "we expect only one metric since only metrics with id 2 and 6 are defined for vms and only 2 is map in metricsAvaliableByID")
assert.Greater(t, metrics[ref][0].Value, int64(0), "the value is not static, therefore we assume that a value grater then 0 is there")
}
func TestPerfMetricsEmptyPerfCollector(t *testing.T) {
ctx, err, c := startVcSim(t)
assert.NoError(t, err)
var vms []mo.VirtualMachine
m := view.NewManager(c.Client)
cv, err := m.CreateContainerView(ctx, c.ServiceContent.RootFolder, []string{"VirtualMachine"}, true)
assert.NoError(t, err)
err = cv.Retrieve(ctx, []string{"VirtualMachine"}, []string{"name", "overallStatus"}, &vms)
assert.NoError(t, err)
var refSlice []types.ManagedObjectReference
for _, vm := range vms {
refSlice = append(refSlice, vm.Self)
}
p := PerfCollector{
client: c,
perfManager: performance.NewManager(c.Client),
logger: logrus.New(),
MetricDefinition: nil,
metricsAvaliableByID: nil,
metricsAvaliableByName: nil,
batchSizePerfEntities: 1,
batchSizePerfMetrics: 1,
}
//no fail SEG/Fault expected
metrics := p.Collect(refSlice, nil, RealTimeInterval)
assert.Equal(t, map[types.ManagedObjectReference][]PerfMetric{}, metrics)
ms := []types.PerfMetricId{{CounterId: 1, Instance: ""}, {CounterId: 2, Instance: ""}, {CounterId: 3, Instance: ""}, {CounterId: 4, Instance: ""}}
metrics = p.Collect(refSlice, ms, RealTimeInterval)
assert.Equal(t, map[types.ManagedObjectReference][]PerfMetric{}, metrics)
}
func startVcSim(t *testing.T) (context.Context, error, *govmomi.Client) {
ctx := context.Background()
//SettingUp Simulator
model := simulator.VPX()
model.Machine = 51
err := model.Create()
assert.NoError(t, err)
s := model.Service.NewServer()
c, _ := govmomi.NewClient(ctx, s.URL, true)
return ctx, err, c
}
func TestPerfMetrics(t *testing.T) {
ctx, err, c := startVcSim(t)
assert.NoError(t, err)
var vms []mo.VirtualMachine
m := view.NewManager(c.Client)
cv, err := m.CreateContainerView(ctx, c.ServiceContent.RootFolder, []string{"VirtualMachine"}, true)
assert.NoError(t, err)
err = cv.Retrieve(ctx, []string{"VirtualMachine"}, []string{"name", "overallStatus"}, &vms)
assert.NoError(t, err)
var refSlice []types.ManagedObjectReference
for _, vm := range vms {
refSlice = append(refSlice, vm.Self)
}
p := PerfCollector{
client: c,
perfManager: performance.NewManager(c.Client),
logger: logrus.New(),
MetricDefinition: nil,
metricsAvaliableByID: nil,
metricsAvaliableByName: nil,
batchSizePerfEntities: 1,
batchSizePerfMetrics: 1,
}
//no fail SEG/Fault expected
metrics := p.Collect(refSlice, nil, RealTimeInterval)
assert.Equal(t, map[types.ManagedObjectReference][]PerfMetric{}, metrics)
//Please notice that only value for ID 2 and 6 is defined
ms := []types.PerfMetricId{{CounterId: 1, Instance: ""}, {CounterId: 2, Instance: ""}, {CounterId: 5, Instance: ""}, {CounterId: 6, Instance: ""}}
metrics = p.Collect(refSlice, ms, RealTimeInterval)
assert.Equal(t, map[types.ManagedObjectReference][]PerfMetric{}, metrics)
p = PerfCollector{
client: c,
perfManager: performance.NewManager(c.Client),
logger: logrus.New(),
MetricDefinition: nil,
metricsAvaliableByID: map[int32]string{1: "test1", 2: "test2", 3: "test3"},
metricsAvaliableByName: map[string]int32{"test1": 1, "test2": 2, "test3": 3},
batchSizePerfEntities: 3,
batchSizePerfMetrics: 3,
}
metrics = p.Collect(refSlice, ms, RealTimeInterval)
assert.Equal(t, len(refSlice), len(metrics), "we have 100 vm, all of them should be present in the map")
assert.Equal(t, 1, len(metrics[refSlice[0]]), "we expect only one metric since only metrics with id 2 and 6 are defined for vms and only 2 is map in metricsAvaliableByID")
}
func TestMultipleEntities(t *testing.T) {
p := PerfCollector{
logger: logrus.New(),
metricsAvaliableByID: map[int32]string{99: "SingleInstanceCounter", 100: "MultipleInstanceCounter", 3: "NotUsed"},
metricsAvaliableByName: map[string]int32{"SingleInstanceCounter": 99, "MultipleInstanceCounter": 100, "NotUsed": 3},
}
//No Panic expected if passing nil
assert.NotPanics(t, func() { p.processEntityMetrics(nil, nil) }, "we expect the function not to panic")
pem := &types.PerfEntityMetric{}
perfMetricsByRef := map[types.ManagedObjectReference][]PerfMetric{}
//No panic expected if passing empty struct
assert.NotPanics(t, func() { p.processEntityMetrics(pem, perfMetricsByRef) }, "we expect the function not to panic")
hostEntity := types.ManagedObjectReference{Type: "Host", Value: "Host-155"}
pemPopulated := &types.PerfEntityMetric{
PerfEntityMetricBase: types.PerfEntityMetricBase{
Entity: hostEntity,
},
SampleInfo: []types.PerfSampleInfo{},
Value: append([]types.BasePerfMetricSeries{},
returnPerfMetricIntSeries(100, "Instance1", 75),
returnPerfMetricIntSeries(100, "Instance2", 225),
returnPerfMetricIntSeries(100, "", 300),
returnPerfMetricIntSeries(99, "", 15)),
}
assert.NotPanics(t, func() { p.processEntityMetrics(pemPopulated, perfMetricsByRef) }, "we expect the function not to panic")
testTwoCunters(t, perfMetricsByRef, hostEntity)
// Testing retrieving data regarding a different host, it should not change any previous value
differentHost := types.ManagedObjectReference{Type: "Host", Value: "Different host"}
pemPopulated.Entity = differentHost
assert.NotPanics(t, func() { p.processEntityMetrics(pemPopulated, perfMetricsByRef) }, "we expect the function not to panic")
testTwoCunters(t, perfMetricsByRef, hostEntity)
testTwoCunters(t, perfMetricsByRef, differentHost)
}
func returnPerfMetricIntSeries(counter int32, instanceName string, value int64) *types.PerfMetricIntSeries {
return &types.PerfMetricIntSeries{
PerfMetricSeries: types.PerfMetricSeries{
Id: types.PerfMetricId{
CounterId: counter,
Instance: instanceName,
},
},
Value: []int64{value},
}
}
func testTwoCunters(t *testing.T, perfMetricsByRef map[types.ManagedObjectReference][]PerfMetric, hostEntity types.ManagedObjectReference) {
for _, val := range perfMetricsByRef[hostEntity] {
if val.Counter == "MultipleInstanceCounter" {
assert.Equal(t, int64(200), val.Value)
}
if val.Counter == "SingleInstanceCounter" {
assert.Equal(t, int64(15), val.Value)
}
if val.Counter == "NotUsed" {
assert.Fail(t, "Not used counter should not be present")
}
}
}
func TestSanitize(t *testing.T) {
_, _, err := sanitizeArgs("1", "2")
assert.NoError(t, err)
_, _, err = sanitizeArgs("pg", "2")
assert.Error(t, err)
_, _, err = sanitizeArgs("-1", "2")
assert.Error(t, err)
_, _, err = sanitizeArgs("1", "0")
assert.Error(t, err)
}
|
package aoc2019
import (
"reflect"
"testing"
)
func TestDay16TestPatternForElement(t *testing.T) {
for idx, expPattern := range [][]int64{
{0, 1, 0, -1},
{0, 0, 1, 1, 0, 0, -1, -1},
{0, 0, 0, 1, 1, 1, 0, 0, 0, -1, -1, -1},
} {
if p := day16PatternForElement(idx); !reflect.DeepEqual(expPattern, p.pattern) {
t.Errorf("Unexpected pattern for idx=%d: exp=%+v got=%+v", idx, expPattern, p.pattern)
}
}
}
func TestDay16PatternLoop(t *testing.T) {
p1 := day16PatternForElement(0)
for i, expN := range []int64{
// MUST skip first element at first execution
// 0,
1, 0, -1, 0, 1, 0, -1, 0, 1, 0, -1,
} {
if n := p1.get(); n != expN {
t.Errorf("Unexpected number for pattern p1 at index %d: exp=%d got=%d", i, expN, n)
}
}
}
func TestDay16ParseInputSignal(t *testing.T) {
s, err := day16ReadInputSignal("12345678")
if err != nil {
t.Fatalf("Unable to parse input signal: %s", err)
}
if !reflect.DeepEqual(s, []int64{1, 2, 3, 4, 5, 6, 7, 8}) {
t.Errorf("Unexpected input signal: %+v", s)
}
}
func TestDay16ProcessSignal(t *testing.T) {
s, err := day16ReadInputSignal("12345678")
if err != nil {
t.Fatalf("Unable to parse input signal: %s", err)
}
for i, expS := range [][]int64{
{1, 2, 3, 4, 5, 6, 7, 8},
{4, 8, 2, 2, 6, 1, 5, 8},
{3, 4, 0, 4, 0, 4, 3, 8},
{0, 3, 4, 1, 5, 5, 1, 8},
{0, 1, 0, 2, 9, 4, 9, 8},
} {
if rs := day16ProcessSignal(s, i); !reflect.DeepEqual(rs, expS) {
t.Errorf("Unexpected processed signal after %d phases: exp=%+v got=%+v", i, expS, rs)
}
}
}
func TestDay16ProcessSignal8OfLonger(t *testing.T) {
for signal, expPSig8 := range map[string][]int64{
"80871224585914546619083218645595": {2, 4, 1, 7, 6, 1, 7, 6},
"19617804207202209144916044189917": {7, 3, 7, 4, 5, 4, 1, 8},
"69317163492948606335995924319873": {5, 2, 4, 3, 2, 1, 3, 3},
} {
s, err := day16ReadInputSignal(signal)
if err != nil {
t.Fatalf("Unable to parse input signal %q: %s", signal, err)
}
if rs := day16ProcessSignal(s, 100); !reflect.DeepEqual(rs[:8], expPSig8) {
t.Errorf("Unexpected processed signal for signal %q: exp=%+v got=%+v", signal, expPSig8, rs[:8])
}
}
}
func TestCalculateDay16_Part1(t *testing.T) {
res, err := solveDay16Part1("day16_input.txt")
if err != nil {
t.Fatalf("Day 16 solver failed: %s", err)
}
t.Logf("Solution Day 16 Part 1: %s", res)
}
func TestCalculateDay16_Part2(t *testing.T) {
res, err := solveDay16Part2("day16_input.txt")
if err != nil {
t.Fatalf("Day 16 solver failed: %s", err)
}
t.Logf("Solution Day 16 Part 2: %s", res)
}
|
package main
import (
"net"
"time"
countdownpb "github.com/Sadham-Hussian/go-gRPC/stream/server-streaming/countDown/proto"
"google.golang.org/grpc"
)
type server struct{}
func main() {
listener, err := net.Listen("tcp", "localhost:4000")
if err != nil {
panic(err)
}
srv := grpc.NewServer()
countdownpb.RegisterCountdownServer(srv, &server{})
if err := srv.Serve(listener); err != nil {
panic(err)
}
}
func (s *server) Start(req *countdownpb.CountdownRequest, stream countdownpb.Countdown_StartServer) error {
timer := req.GetTimer()
for timer > 0 {
res := countdownpb.CountdownResponse{Count: timer}
stream.Send(&res)
timer = timer - 1
time.Sleep(time.Second)
}
return nil
}
|
package server
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/robinjoseph08/hello/pkg/application"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNew(t *testing.T) {
app := application.New()
srv, err := New(app)
require.NoError(t, err)
req, err := http.NewRequest("GET", "/health", nil)
require.Nil(t, err, "unexpecetd error when making new request")
w := httptest.NewRecorder()
srv.Handler.ServeHTTP(w, req)
assert.Equal(t, http.StatusOK, w.Code, "incorrect status code")
assert.Equal(t, `{"healthy":true}`, w.Body.String(), "incorrect response")
req, err = http.NewRequest("GET", "/foo", nil)
require.Nil(t, err, "unexpecetd error when making new request")
w = httptest.NewRecorder()
srv.Handler.ServeHTTP(w, req)
assert.Equal(t, http.StatusNotFound, w.Code, "incorrect status code")
assert.Contains(t, w.Body.String(), "not found", "incorrect response")
}
|
package logs
import (
"encoding/json"
"fmt"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
"lhc.go.game.user/utitls/https"
"net/url"
)
type HttpHook struct {
}
func NewHttpHook() *HttpHook {
return &HttpHook{}
}
func (l *HttpHook) Levels() []logrus.Level {
return logrus.AllLevels
}
func (l *HttpHook) Fire(entry *logrus.Entry) error {
urls ,err := url.Parse(viper.GetString("log.urls"))
if err != nil {
return err
}
loguUrl := fmt.Sprintf("%s://%s%s",urls.Scheme,urls.Host,urls.Path)
var data = make(map[string]interface{})
data["data"] = entry.Data
logData := make(map[string]interface{})
formData := make(map[string]interface{})
for k,v := range entry.Data {
formData[k] = v
}
logData["index"] = "http"
logData["data"] = formData
if val,ok := formData["postfrom"];ok {
var form = make(map[string]interface{})
err := json.Unmarshal([]byte(val.(string)), &form)
if err !=nil {
return err
}
formData["postfrom"] = form
}
fmt.Printf("data:%#v\n",logData)
bytes, err := https.Post(loguUrl, "application/json", logData)
if err!=nil {
logrus.Println(err)
}
fmt.Println(string(bytes))
return nil
} |
package boot
import (
"github.com/gogf/gf/os/glog"
"golang-coding/app/service"
"time"
)
func bootTime() {
service.BootTime = time.Now().Format("2006-01-02 15:04:05")
glog.Info("application boot time :" + service.BootTime)
}
|
package gowalletsafrica
import (
"net/http"
"time"
)
type (
Currency string
TransactionType int
base struct {
HTTPClient *http.Client
APIURL string
secretKey string
publicKey string
}
self struct {
*base
}
wallets struct {
*base
}
payouts struct {
*base
}
airtime struct {
*base
}
identity struct {
*base
}
WalletsAfrica struct {
Self *self
Wallets *wallets
Payouts *payouts
Airtime *airtime
Identity *identity
}
Config struct {
Environment string
PublicKey string
SecretKey string
RequestTimeout time.Duration
}
Transaction struct {
Amount float64
Currency string
Category string
Narration string
DateTransacted string
PreviousBalance float64
NewBalance float64
Type string
}
Wallet struct {
Username string
AccountNumber string
BVN string
City string
Country string
DateCreated string
DateOfBirth string
Email string
FirstName string
LastName string
PhoneNumber string
DateSignedup string
AccountName string
AccountNo string
AvailableBalance float64
Bank string
Password string
}
AirtimeProvider struct {
Code string
Name string
}
Bank struct {
BankCode string
BankName string
BankSortCode string
//PaymentGateway string
}
BankDetail struct {
Bank string
AccountNumber string
DateTransferred string
Amount float64
RecipientName string
SessionId string
ResponseCode string
Message string
}
payloadBody map[string]interface{}
responseBody map[string]interface{}
//Endpoint Results
CheckBalanceResult struct {
WalletBalance float64
WalletCurrency string
}
CreditWalletResult struct {
AmountCredited float64
RecipientWalletBalance float64
SenderWalletBalance float64
}
Transactions []Transaction
Wallets []Wallet
AirtimeProviders []AirtimeProvider
Banks []Bank
ResolveBVN struct {
FirstName string
LastName string
MiddleName string
Email string
PhoneNumber string
BVN string
DateOfBirth string
EnrollmentBank string
EnrollmentBranch string
Gender string
LevelOfAccount string
LgaOfOrigin string
LgaOfResidence string
MaritalStatus string
NameOnCard string
Nationality string
StateOfOrigin string
StateOfResidence string
Title string
WatchListed string
Picture string
ResponseCode string
Message string
}
)
|
package e
const (
SUCCESS = 200
InvalidParams = 400
ERROR = 500
ErrorAuthCheckTokenFail = 30001
ErrorAuthCheckTokenTimeout = 30002
ErrorAuthInsufficientAuthority = 30003
ErrorUploadFile = 40001
ErrorFavorExist = 40002
ErrorLikeExist = 40003
) |
package gol
import (
"fmt"
"time"
"github.com/mediaFORGE/gol/fields"
field_severity "github.com/mediaFORGE/gol/fields/severity"
field_timestamp "github.com/mediaFORGE/gol/fields/timestamp"
)
// LogMessage is a log message.
type LogMessage map[string]interface{}
// FieldLength returns the number of fields in the message.
func (msg LogMessage) FieldLength() (n int) {
return len(msg)
}
// Get returns the value of the given logger message field.
func (msg LogMessage) Get(f string) (i interface{}, err error) {
if v, ok := msg[f]; ok {
return v, nil
}
return nil, fmt.Errorf("Message does not contain field %s", f)
}
// Severity returns the value of the logger message severity level field.
func (msg LogMessage) Severity() (lvl field_severity.Type, err error) {
var v interface{}
if v, err = msg.Get(fields.Severity); err == nil {
return v.(field_severity.Type), nil
}
return field_severity.Type(-1), err
}
// SetSeverity sets the value of the logger message severity level field.
func (msg LogMessage) SetSeverity(lvl field_severity.Type) (err error) {
if err = lvl.Validate(); err == nil {
msg[fields.Severity] = lvl
}
return
}
// Start returns the value of the logger message start field.
func (msg LogMessage) Start() (s *time.Time, err error) {
var v interface{}
if v, err = msg.Get(fields.Start); err == nil {
return v.(*time.Time), nil
}
return &time.Time{}, err
}
// SetStart sets the value of the logger message start field.
func (msg LogMessage) SetStart(s *time.Time) (err error) {
msg[fields.Start] = s
return nil
}
// Stop returns the value of the logger message stop field.
func (msg LogMessage) Stop() (s *time.Time, err error) {
var v interface{}
if v, err = msg.Get(fields.Stop); err == nil {
return v.(*time.Time), nil
}
return &time.Time{}, err
}
// SetStop sets the value of the logger message stop field.
func (msg LogMessage) SetStop(s *time.Time) (err error) {
msg[fields.Stop] = s
return nil
}
// Timestamp returns the value of the logger message timestamp field.
func (msg LogMessage) Timestamp() (*field_timestamp.Type, error) {
var v interface{}
var err error
if v, err = msg.Get(fields.Timestamp); err == nil {
vt := v.(field_timestamp.Type)
return &vt, nil
}
return nil, err
}
// NewLogMessageFunc is the function signature of LogMessage constructor functions.
type NewLogMessageFunc func(args ...interface{}) *LogMessage
// NewEmergency builds an emergency severity message.
func NewEmergency(args ...interface{}) *LogMessage {
return NewMessage(field_severity.Type(field_severity.Emergency), args...)
}
// NewAlert builds an alert severity message.
func NewAlert(args ...interface{}) *LogMessage {
return NewMessage(field_severity.Type(field_severity.Alert), args...)
}
// NewCritical builds a critical severity message.
func NewCritical(args ...interface{}) *LogMessage {
return NewMessage(field_severity.Type(field_severity.Critical), args...)
}
// NewError builds an error severity message.
func NewError(args ...interface{}) *LogMessage {
return NewMessage(field_severity.Type(field_severity.Error), args...)
}
// NewWarning builds a warning severity message.
func NewWarning(args ...interface{}) *LogMessage {
return NewMessage(field_severity.Type(field_severity.Warning), args...)
}
// NewNotice builds a notice severity message.
func NewNotice(args ...interface{}) *LogMessage {
return NewMessage(field_severity.Type(field_severity.Notice), args...)
}
// NewInfo builds an info severity message.
func NewInfo(args ...interface{}) *LogMessage {
return NewMessage(field_severity.Type(field_severity.Info), args...)
}
// NewDebug builds a debug severity message.
func NewDebug(args ...interface{}) *LogMessage {
return NewMessage(field_severity.Type(field_severity.Debug), args...)
}
// NewMessage build a log message with the given severity level.
func NewMessage(l field_severity.Type, args ...interface{}) *LogMessage {
msg := LogMessage{
fields.Timestamp: field_timestamp.Type{time.Now()},
fields.Severity: l,
}
for i := 0; i < len(args); i += 2 {
msg[args[i].(string)] = args[i+1]
}
return &msg
}
|
//go:build !nostores
// +build !nostores
/*
Copyright 2015 All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"net/url"
"time"
redis "gopkg.in/redis.v4"
)
type redisStore struct {
client *redis.Client
}
// newRedisStore creates a new redis store
func newRedisStore(location *url.URL) (storage, error) {
// step: get any password
password := ""
if location.User != nil {
password, _ = location.User.Password()
}
// step: parse the url notation
client := redis.NewClient(&redis.Options{
Addr: location.Host,
DB: 0,
Password: password,
})
return redisStore{
client: client,
}, nil
}
// Set adds a token to the store
func (r redisStore) Set(key, value string) error {
if err := r.client.Set(key, value, time.Duration(0)); err.Err() != nil {
return err.Err()
}
return nil
}
// Get retrieves a token from the store
func (r redisStore) Get(key string) (string, error) {
result := r.client.Get(key)
if result.Err() != nil {
return "", result.Err()
}
return result.String(), nil
}
// Delete remove the key
func (r redisStore) Delete(key string) error {
return r.client.Del(key).Err()
}
// Close closes of any open resources
func (r redisStore) Close() error {
if r.client != nil {
return r.client.Close()
}
return nil
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package perfutil
import (
"context"
"fmt"
"path/filepath"
"time"
"chromiumos/tast/common/perf"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/metrics"
"chromiumos/tast/testing"
)
// DefaultRuns provides the default number of iteration for a perftest conducts.
const DefaultRuns = 10
// ScenarioFunc is the function to conduct the test operation and returns the
// metric value.
type ScenarioFunc func(context.Context, string) ([]*metrics.Histogram, error)
// RunAndWaitAll is a utility function to create ScenarioFunc which conducts
// f with metrics.RunAndWaitAll.
func RunAndWaitAll(tconn *chrome.TestConn, f func(ctx context.Context) error, names ...string) ScenarioFunc {
return func(ctx context.Context, name string) ([]*metrics.Histogram, error) {
return metrics.RunAndWaitAll(ctx, tconn, time.Minute, f, names...)
}
}
// RunAndWaitAny is a utility function to create ScenarioFunc which conducts
// f with metrics.RunAndWaitAny.
func RunAndWaitAny(tconn *chrome.TestConn, f func(ctx context.Context) error, names ...string) ScenarioFunc {
return func(ctx context.Context, name string) ([]*metrics.Histogram, error) {
return metrics.RunAndWaitAny(ctx, tconn, time.Minute, f, names...)
}
}
// StoreFunc is a function to be used for RunMultiple.
type StoreFunc func(ctx context.Context, pv *Values, hists []*metrics.Histogram) error
// StoreAllWithHeuristics is a utility function to store all metrics. It
// determines the direction of perf (bigger is better or smaller is better)
// and unit through heuristics from the name of metrics.
func StoreAllWithHeuristics(suffix string) StoreFunc {
return func(ctx context.Context, pv *Values, hists []*metrics.Histogram) error {
for _, hist := range hists {
mean, err := hist.Mean()
if err != nil {
return errors.Wrapf(err, "failed to get mean for histogram %s", hist.Name)
}
name := hist.Name
if suffix != "" {
name = name + "." + suffix
}
testing.ContextLog(ctx, name, " = ", mean)
direction, unit := estimateMetricPresenattionType(ctx, name)
pv.Append(perf.Metric{
Name: name,
Unit: unit,
Direction: direction,
}, mean)
}
return nil
}
}
// StoreAll is a function to store all histograms into values.
func StoreAll(direction perf.Direction, unit, suffix string) StoreFunc {
return func(ctx context.Context, pv *Values, hists []*metrics.Histogram) error {
for _, hist := range hists {
mean, err := hist.Mean()
if err != nil {
return errors.Wrapf(err, "failed to get mean for histogram %s", hist.Name)
}
name := hist.Name
if suffix != "" {
name = name + "." + suffix
}
testing.ContextLog(ctx, name, " = ", mean)
pv.Append(perf.Metric{
Name: name,
Unit: unit,
Direction: direction,
}, mean)
}
return nil
}
}
// StoreSmoothness is a utility function to store animation smoothness metrics.
func StoreSmoothness(ctx context.Context, pv *Values, hists []*metrics.Histogram) error {
return StoreAll(perf.BiggerIsBetter, "percent", "")(ctx, pv, hists)
}
// StoreLatency is a utility function to store input-latency metrics.
func StoreLatency(ctx context.Context, pv *Values, hists []*metrics.Histogram) error {
return StoreAll(perf.SmallerIsBetter, "ms", "")(ctx, pv, hists)
}
// Runner is an entity to manage multiple runs of the test scenario.
type Runner struct {
br *browser.Browser
pv *Values
Runs int
RunTracing bool
}
// NewRunner creates a new instance of Runner.
func NewRunner(br *browser.Browser) *Runner {
return &Runner{br: br, pv: NewValues(), Runs: DefaultRuns, RunTracing: (br != nil)}
}
// Values returns the values in the runner.
func (r *Runner) Values() *Values {
return r.pv
}
// RunMultiple runs scenario multiple times and store the data through store
// function. It invokes scenario+store 10 times, and then invokes scenario only
// with tracing enabled. If one of the runs fails, it quits immediately and
// reports an error. The run function is executed within the scenario one and
// has to be implemented by the caller. The name parameter is used for the
// prefix of subtest names for calling scenario/store function and the prefix
// for the trace data file. The name can be empty, in which case the runner
// uses default prefix values. Returns false when it has an error.
func (r *Runner) RunMultiple(ctx context.Context, name string, scenario ScenarioFunc, store StoreFunc) error {
runPrefix := name
if name == "" {
runPrefix = "run"
}
for i := 0; i < r.Runs; i++ {
hists, err := scenario(ctx, fmt.Sprintf("%s-%d", runPrefix, i))
if err != nil {
return errors.Wrap(err, "failed to run the test scenario")
}
if err := store(ctx, r.pv, hists); err != nil {
return errors.Wrap(err, "failed to store the histogram data")
}
}
if !r.RunTracing {
return nil
}
const traceCleanupDuration = 2 * time.Second
if deadline, ok := ctx.Deadline(); ok && deadline.Sub(time.Now()) < traceCleanupDuration {
testing.ContextLog(ctx, "There are no time to conduct a tracing run. Skipping")
return nil
}
defer r.br.StopTracing(ctx)
sctx, cancel := ctxutil.Shorten(ctx, traceCleanupDuration)
defer cancel()
// At this time, systrace causes kernel crash on dedede devices. Because of
// that and data points from systrace isn't actually helpful to most of
// UI tests, disable systraces for the time being.
// TODO(https://crbug.com/1162385, b/177636800): enable it.
if err := r.br.StartTracing(sctx, []string{"benchmark", "cc", "gpu", "input", "toplevel", "ui", "views", "viz"}, browser.DisableSystrace()); err != nil {
return errors.Wrap(err, "failed to start tracing")
}
_, err := scenario(sctx, fmt.Sprintf("%s-tracing", runPrefix))
if err != nil {
return errors.Wrap(err, "failed to run the test scenario")
}
tr, err := r.br.StopTracing(ctx)
if err != nil {
return errors.Wrap(err, "failed to stop tracing")
}
if tr == nil || len(tr.Packet) == 0 {
return errors.Wrap(err, "no trace data is collected")
}
filename := "trace.data.gz"
if name != "" {
filename = fmt.Sprintf("%s-%s", name, filename)
}
outdir, ok := testing.ContextOutDir(ctx)
if !ok {
return errors.Wrap(err, "failed to get name of the output directory")
}
if err := chrome.SaveTraceToFile(ctx, tr, filepath.Join(outdir, filename)); err != nil {
return errors.Wrap(err, "failed to save trace to file")
}
return nil
}
// RunMultipleAndSave is a utility to create a new runner, conduct runs multiple times,
// and save the recorded values.
func RunMultipleAndSave(ctx context.Context, outDir string, br *browser.Browser, scenario ScenarioFunc, store StoreFunc) error {
r := NewRunner(br)
if err := r.RunMultiple(ctx, "", scenario, store); err != nil {
return err
}
if err := r.Values().Save(ctx, outDir); err != nil {
return errors.Wrap(err, "failed to save results")
}
return nil
}
|
package main
import (
"fmt"
"github.com/peterfraedrich/consulmq"
)
func main() {
mq, err := consulmq.Connect(consulmq.Config{
Address: "172.17.0.2:8500",
Datacenter: "dc1",
Token: "",
MQName: "cmq",
})
if err != nil {
panic(err)
}
i := 0
for i <= 100 {
// Put and item on the queue
qo, err := mq.Push([]byte("Hello, is it me you're looking for?"))
if err != nil {
panic(err)
}
fmt.Println(qo.ID)
i++
}
fmt.Println("++++++++++++++++++++++++++++++++++++++++++++++++++++++")
x := 0
for x <= 100 {
// Pop an item off the queue
_, qo, err := mq.Pop()
if err != nil {
panic(err)
}
fmt.Println(qo.ID)
x++
}
}
|
package operations
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
)
// NewGetServerEditionNameParams creates a new GetServerEditionNameParams object
// with the default values initialized.
func NewGetServerEditionNameParams() *GetServerEditionNameParams {
return &GetServerEditionNameParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetServerEditionNameParamsWithTimeout creates a new GetServerEditionNameParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewGetServerEditionNameParamsWithTimeout(timeout time.Duration) *GetServerEditionNameParams {
return &GetServerEditionNameParams{
timeout: timeout,
}
}
/*GetServerEditionNameParams contains all the parameters to send to the API endpoint
for the get server edition name operation typically these are written to a http.Request
*/
type GetServerEditionNameParams struct {
timeout time.Duration
}
// WriteToRequest writes these params to a swagger request
func (o *GetServerEditionNameParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
r.SetTimeout(o.timeout)
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
|
package mdproc
import (
"bytes"
"regexp"
"github.com/n0x1m/md2gmi/pipe"
)
func FormatHeadings(in chan pipe.StreamItem) chan pipe.StreamItem {
out := make(chan pipe.StreamItem)
go func() {
re := regexp.MustCompile(`^[#]{4,}`)
re2 := regexp.MustCompile(`^(#+)[^# ]`)
for b := range in {
// fix up more than 4 levels
data := re.ReplaceAll(b.Payload(), []byte("###"))
// ensure we have a space
sub := re2.FindSubmatch(data)
if len(sub) > 0 {
data = bytes.Replace(data, sub[1], append(sub[1], []byte(" ")...), 1)
}
// writeback
out <- pipe.NewItem(b.Index(), data)
}
close(out)
}()
return out
}
|
package main
//
// Reference
//
// https://echo.labstack.com/recipes/websocket
//
//
import (
"github.com/labstack/echo"
"fmt"
"net/http"
"io"
"html/template"
"github.com/labstack/echo/middleware"
)
type EchoRenderer struct {
}
func(r *EchoRenderer) Render(w io.Writer, name string, data interface{}, ctx echo.Context) error {
a, err := Asset(name)
if err != nil {
panic(err)
}
t, err := template.New(name).Parse(string(a))
if err != nil {
panic(err)
}
return t.ExecuteTemplate(w, name, data)
}
func RenderStatic (c echo.Context) error {
a, err := Asset(fmt.Sprintf("static%s", c.P(0)))
if err != nil {
panic(err)
}
return c.String(http.StatusOK, string(a))
}
func InitializeRenderers (e *echo.Echo) error {
//e.Use(middleware.Logger())
e.Use(middleware.Recover())
e.SetRenderer(&EchoRenderer{})
e.GET("/static*", RenderStatic)
return nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.